blob: 1cf1ff59eb0b0932a89de7bfa81343aa83f52559 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
Stephen Warrenba4dfef2016-10-21 14:46:47 -06005 * Portions based on U-Boot's rtl8169.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -060029#include <common.h>
30#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070031#include <cpu_func.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060032#include <dm.h>
33#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060034#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070035#include <malloc.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060036#include <memalign.h>
37#include <miiphy.h>
38#include <net.h>
39#include <netdev.h>
40#include <phy.h>
41#include <reset.h>
42#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060043#include <asm/cache.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060044#include <asm/gpio.h>
45#include <asm/io.h>
Ye Li6a895d02020-05-03 22:41:15 +080046#include <eth_phy.h>
Fugang Duan0e9d2392020-05-03 22:41:18 +080047#ifdef CONFIG_ARCH_IMX8M
48#include <asm/arch/clock.h>
49#include <asm/mach-imx/sys_proto.h>
50#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -060051
52/* Core registers */
53
54#define EQOS_MAC_REGS_BASE 0x000
55struct eqos_mac_regs {
56 uint32_t configuration; /* 0x000 */
57 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
58 uint32_t q0_tx_flow_ctrl; /* 0x070 */
59 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
60 uint32_t rx_flow_ctrl; /* 0x090 */
61 uint32_t unused_094; /* 0x094 */
62 uint32_t txq_prty_map0; /* 0x098 */
63 uint32_t unused_09c; /* 0x09c */
64 uint32_t rxq_ctrl0; /* 0x0a0 */
65 uint32_t unused_0a4; /* 0x0a4 */
66 uint32_t rxq_ctrl2; /* 0x0a8 */
67 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
68 uint32_t us_tic_counter; /* 0x0dc */
69 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
70 uint32_t hw_feature0; /* 0x11c */
71 uint32_t hw_feature1; /* 0x120 */
72 uint32_t hw_feature2; /* 0x124 */
73 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
74 uint32_t mdio_address; /* 0x200 */
75 uint32_t mdio_data; /* 0x204 */
76 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
77 uint32_t address0_high; /* 0x300 */
78 uint32_t address0_low; /* 0x304 */
79};
80
81#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
82#define EQOS_MAC_CONFIGURATION_CST BIT(21)
83#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
84#define EQOS_MAC_CONFIGURATION_WD BIT(19)
85#define EQOS_MAC_CONFIGURATION_JD BIT(17)
86#define EQOS_MAC_CONFIGURATION_JE BIT(16)
87#define EQOS_MAC_CONFIGURATION_PS BIT(15)
88#define EQOS_MAC_CONFIGURATION_FES BIT(14)
89#define EQOS_MAC_CONFIGURATION_DM BIT(13)
Fugang Duan3a97da12020-05-03 22:41:17 +080090#define EQOS_MAC_CONFIGURATION_LM BIT(12)
Stephen Warrenba4dfef2016-10-21 14:46:47 -060091#define EQOS_MAC_CONFIGURATION_TE BIT(1)
92#define EQOS_MAC_CONFIGURATION_RE BIT(0)
93
94#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
95#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
96#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
97
98#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
99
100#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
101#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
102
103#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
104#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
105#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
106#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200107#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600108
109#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
110#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
111
Fugang Duan3a97da12020-05-03 22:41:17 +0800112#define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
113#define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
114#define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
115#define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
116
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600117#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
118#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
119#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
120#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
121
Fugang Duan3a97da12020-05-03 22:41:17 +0800122#define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
123#define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
124
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600125#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
126#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
127#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
128#define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200129#define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600130#define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
131#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
132#define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
133#define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
134#define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
135#define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
136
137#define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
138
139#define EQOS_MTL_REGS_BASE 0xd00
140struct eqos_mtl_regs {
141 uint32_t txq0_operation_mode; /* 0xd00 */
142 uint32_t unused_d04; /* 0xd04 */
143 uint32_t txq0_debug; /* 0xd08 */
144 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
145 uint32_t txq0_quantum_weight; /* 0xd18 */
146 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
147 uint32_t rxq0_operation_mode; /* 0xd30 */
148 uint32_t unused_d34; /* 0xd34 */
149 uint32_t rxq0_debug; /* 0xd38 */
150};
151
152#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
153#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
154#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
155#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
156#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
157#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
158#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
159
160#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
161#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
162#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
163
164#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
165#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
166#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
167#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
168#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
169#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
170#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
171#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
Fugang Duan3a97da12020-05-03 22:41:17 +0800172#define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4)
173#define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600174
175#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
176#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
177#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
178#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
179
180#define EQOS_DMA_REGS_BASE 0x1000
181struct eqos_dma_regs {
182 uint32_t mode; /* 0x1000 */
183 uint32_t sysbus_mode; /* 0x1004 */
184 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
185 uint32_t ch0_control; /* 0x1100 */
186 uint32_t ch0_tx_control; /* 0x1104 */
187 uint32_t ch0_rx_control; /* 0x1108 */
188 uint32_t unused_110c; /* 0x110c */
189 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
190 uint32_t ch0_txdesc_list_address; /* 0x1114 */
191 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
192 uint32_t ch0_rxdesc_list_address; /* 0x111c */
193 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
194 uint32_t unused_1124; /* 0x1124 */
195 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
196 uint32_t ch0_txdesc_ring_length; /* 0x112c */
197 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
198};
199
200#define EQOS_DMA_MODE_SWR BIT(0)
201
202#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
203#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
204#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
205#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
206#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
207#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
208
209#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
210
211#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
212#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
213#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
214#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
215
216#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
217#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
218#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
219#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
220#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
221
222/* These registers are Tegra186-specific */
223#define EQOS_TEGRA186_REGS_BASE 0x8800
224struct eqos_tegra186_regs {
225 uint32_t sdmemcomppadctrl; /* 0x8800 */
226 uint32_t auto_cal_config; /* 0x8804 */
227 uint32_t unused_8808; /* 0x8808 */
228 uint32_t auto_cal_status; /* 0x880c */
229};
230
231#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
232
233#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
234#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
235
236#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
237
238/* Descriptors */
239
240#define EQOS_DESCRIPTOR_WORDS 4
241#define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4)
242/* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
243#define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN
244#define EQOS_DESCRIPTORS_TX 4
245#define EQOS_DESCRIPTORS_RX 4
246#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
247#define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \
248 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
249#define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
250#define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
251#define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
252
253/*
254 * Warn if the cache-line size is larger than the descriptor size. In such
255 * cases the driver will likely fail because the CPU needs to flush the cache
256 * when requeuing RX buffers, therefore descriptors written by the hardware
257 * may be discarded. Architectures with full IO coherence, such as x86, do not
258 * experience this issue, and hence are excluded from this condition.
259 *
260 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
261 * the driver to allocate descriptors from a pool of non-cached memory.
262 */
263#if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
264#if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
Trevor Woerner10015022019-05-03 09:41:00 -0400265 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) && !defined(CONFIG_X86)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600266#warning Cache line size is larger than descriptor size
267#endif
268#endif
269
270struct eqos_desc {
271 u32 des0;
272 u32 des1;
273 u32 des2;
274 u32 des3;
275};
276
277#define EQOS_DESC3_OWN BIT(31)
278#define EQOS_DESC3_FD BIT(29)
279#define EQOS_DESC3_LD BIT(28)
280#define EQOS_DESC3_BUF1V BIT(24)
281
282struct eqos_config {
283 bool reg_access_always_ok;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200284 int mdio_wait;
285 int swr_wait;
286 int config_mac;
287 int config_mac_mdio;
288 phy_interface_t (*interface)(struct udevice *dev);
289 struct eqos_ops *ops;
290};
291
292struct eqos_ops {
293 void (*eqos_inval_desc)(void *desc);
294 void (*eqos_flush_desc)(void *desc);
295 void (*eqos_inval_buffer)(void *buf, size_t size);
296 void (*eqos_flush_buffer)(void *buf, size_t size);
297 int (*eqos_probe_resources)(struct udevice *dev);
298 int (*eqos_remove_resources)(struct udevice *dev);
299 int (*eqos_stop_resets)(struct udevice *dev);
300 int (*eqos_start_resets)(struct udevice *dev);
301 void (*eqos_stop_clks)(struct udevice *dev);
302 int (*eqos_start_clks)(struct udevice *dev);
303 int (*eqos_calibrate_pads)(struct udevice *dev);
304 int (*eqos_disable_calibration)(struct udevice *dev);
305 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
306 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600307};
308
309struct eqos_priv {
310 struct udevice *dev;
311 const struct eqos_config *config;
312 fdt_addr_t regs;
313 struct eqos_mac_regs *mac_regs;
314 struct eqos_mtl_regs *mtl_regs;
315 struct eqos_dma_regs *dma_regs;
316 struct eqos_tegra186_regs *tegra186_regs;
317 struct reset_ctl reset_ctl;
318 struct gpio_desc phy_reset_gpio;
319 struct clk clk_master_bus;
320 struct clk clk_rx;
321 struct clk clk_ptp_ref;
322 struct clk clk_tx;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200323 struct clk clk_ck;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600324 struct clk clk_slave_bus;
325 struct mii_dev *mii;
326 struct phy_device *phy;
Patrick Delaunay4f60a512020-03-18 10:50:16 +0100327 int phyaddr;
328 u32 max_speed;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600329 void *descs;
330 struct eqos_desc *tx_descs;
331 struct eqos_desc *rx_descs;
332 int tx_desc_idx, rx_desc_idx;
333 void *tx_dma_buf;
334 void *rx_dma_buf;
335 void *rx_pkt;
336 bool started;
337 bool reg_access_ok;
338};
339
340/*
341 * TX and RX descriptors are 16 bytes. This causes problems with the cache
342 * maintenance on CPUs where the cache-line size exceeds the size of these
343 * descriptors. What will happen is that when the driver receives a packet
344 * it will be immediately requeued for the hardware to reuse. The CPU will
345 * therefore need to flush the cache-line containing the descriptor, which
346 * will cause all other descriptors in the same cache-line to be flushed
347 * along with it. If one of those descriptors had been written to by the
348 * device those changes (and the associated packet) will be lost.
349 *
350 * To work around this, we make use of non-cached memory if available. If
351 * descriptors are mapped uncached there's no need to manually flush them
352 * or invalidate them.
353 *
354 * Note that this only applies to descriptors. The packet data buffers do
355 * not have the same constraints since they are 1536 bytes large, so they
356 * are unlikely to share cache-lines.
357 */
358static void *eqos_alloc_descs(unsigned int num)
359{
360#ifdef CONFIG_SYS_NONCACHED_MEMORY
361 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
362 EQOS_DESCRIPTOR_ALIGN);
363#else
364 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
365#endif
366}
367
368static void eqos_free_descs(void *descs)
369{
370#ifdef CONFIG_SYS_NONCACHED_MEMORY
371 /* FIXME: noncached_alloc() has no opposite */
372#else
373 free(descs);
374#endif
375}
376
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200377static void eqos_inval_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600378{
379#ifndef CONFIG_SYS_NONCACHED_MEMORY
380 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
381 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
382 ARCH_DMA_MINALIGN);
383
384 invalidate_dcache_range(start, end);
385#endif
386}
387
Fugang Duan3a97da12020-05-03 22:41:17 +0800388static void eqos_inval_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200389{
390#ifndef CONFIG_SYS_NONCACHED_MEMORY
391 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
392 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
393 ARCH_DMA_MINALIGN);
394
395 invalidate_dcache_range(start, end);
396#endif
397}
398
399static void eqos_flush_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600400{
401#ifndef CONFIG_SYS_NONCACHED_MEMORY
402 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
403#endif
404}
405
Fugang Duan3a97da12020-05-03 22:41:17 +0800406static void eqos_flush_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200407{
408#ifndef CONFIG_SYS_NONCACHED_MEMORY
409 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
410 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
411 ARCH_DMA_MINALIGN);
412
413 flush_dcache_range(start, end);
414#endif
415}
416
417static void eqos_inval_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600418{
419 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
420 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
421
422 invalidate_dcache_range(start, end);
423}
424
Fugang Duan3a97da12020-05-03 22:41:17 +0800425static void eqos_inval_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200426{
427 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
428 unsigned long end = roundup((unsigned long)buf + size,
429 ARCH_DMA_MINALIGN);
430
431 invalidate_dcache_range(start, end);
432}
433
434static void eqos_flush_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600435{
436 flush_cache((unsigned long)buf, size);
437}
438
Fugang Duan3a97da12020-05-03 22:41:17 +0800439static void eqos_flush_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200440{
441 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
442 unsigned long end = roundup((unsigned long)buf + size,
443 ARCH_DMA_MINALIGN);
444
445 flush_dcache_range(start, end);
446}
447
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600448static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
449{
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100450 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
451 EQOS_MAC_MDIO_ADDRESS_GB, false,
452 1000000, true);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600453}
454
455static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
456 int mdio_reg)
457{
458 struct eqos_priv *eqos = bus->priv;
459 u32 val;
460 int ret;
461
462 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
463 mdio_reg);
464
465 ret = eqos_mdio_wait_idle(eqos);
466 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900467 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600468 return ret;
469 }
470
471 val = readl(&eqos->mac_regs->mdio_address);
472 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
473 EQOS_MAC_MDIO_ADDRESS_C45E;
474 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
475 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200476 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600477 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
478 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
479 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
480 EQOS_MAC_MDIO_ADDRESS_GB;
481 writel(val, &eqos->mac_regs->mdio_address);
482
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200483 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600484
485 ret = eqos_mdio_wait_idle(eqos);
486 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900487 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600488 return ret;
489 }
490
491 val = readl(&eqos->mac_regs->mdio_data);
492 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
493
494 debug("%s: val=%x\n", __func__, val);
495
496 return val;
497}
498
499static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
500 int mdio_reg, u16 mdio_val)
501{
502 struct eqos_priv *eqos = bus->priv;
503 u32 val;
504 int ret;
505
506 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
507 mdio_addr, mdio_reg, mdio_val);
508
509 ret = eqos_mdio_wait_idle(eqos);
510 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900511 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600512 return ret;
513 }
514
515 writel(mdio_val, &eqos->mac_regs->mdio_data);
516
517 val = readl(&eqos->mac_regs->mdio_address);
518 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
519 EQOS_MAC_MDIO_ADDRESS_C45E;
520 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
521 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200522 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600523 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
524 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
525 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
526 EQOS_MAC_MDIO_ADDRESS_GB;
527 writel(val, &eqos->mac_regs->mdio_address);
528
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200529 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600530
531 ret = eqos_mdio_wait_idle(eqos);
532 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900533 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600534 return ret;
535 }
536
537 return 0;
538}
539
540static int eqos_start_clks_tegra186(struct udevice *dev)
541{
Fugang Duan3a97da12020-05-03 22:41:17 +0800542#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600543 struct eqos_priv *eqos = dev_get_priv(dev);
544 int ret;
545
546 debug("%s(dev=%p):\n", __func__, dev);
547
548 ret = clk_enable(&eqos->clk_slave_bus);
549 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900550 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600551 goto err;
552 }
553
554 ret = clk_enable(&eqos->clk_master_bus);
555 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900556 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600557 goto err_disable_clk_slave_bus;
558 }
559
560 ret = clk_enable(&eqos->clk_rx);
561 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900562 pr_err("clk_enable(clk_rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600563 goto err_disable_clk_master_bus;
564 }
565
566 ret = clk_enable(&eqos->clk_ptp_ref);
567 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900568 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600569 goto err_disable_clk_rx;
570 }
571
572 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
573 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900574 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600575 goto err_disable_clk_ptp_ref;
576 }
577
578 ret = clk_enable(&eqos->clk_tx);
579 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900580 pr_err("clk_enable(clk_tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600581 goto err_disable_clk_ptp_ref;
582 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800583#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600584
585 debug("%s: OK\n", __func__);
586 return 0;
587
Fugang Duan3a97da12020-05-03 22:41:17 +0800588#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600589err_disable_clk_ptp_ref:
590 clk_disable(&eqos->clk_ptp_ref);
591err_disable_clk_rx:
592 clk_disable(&eqos->clk_rx);
593err_disable_clk_master_bus:
594 clk_disable(&eqos->clk_master_bus);
595err_disable_clk_slave_bus:
596 clk_disable(&eqos->clk_slave_bus);
597err:
598 debug("%s: FAILED: %d\n", __func__, ret);
599 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800600#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600601}
602
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200603static int eqos_start_clks_stm32(struct udevice *dev)
604{
Fugang Duan3a97da12020-05-03 22:41:17 +0800605#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200606 struct eqos_priv *eqos = dev_get_priv(dev);
607 int ret;
608
609 debug("%s(dev=%p):\n", __func__, dev);
610
611 ret = clk_enable(&eqos->clk_master_bus);
612 if (ret < 0) {
613 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
614 goto err;
615 }
616
617 ret = clk_enable(&eqos->clk_rx);
618 if (ret < 0) {
619 pr_err("clk_enable(clk_rx) failed: %d", ret);
620 goto err_disable_clk_master_bus;
621 }
622
623 ret = clk_enable(&eqos->clk_tx);
624 if (ret < 0) {
625 pr_err("clk_enable(clk_tx) failed: %d", ret);
626 goto err_disable_clk_rx;
627 }
628
629 if (clk_valid(&eqos->clk_ck)) {
630 ret = clk_enable(&eqos->clk_ck);
631 if (ret < 0) {
632 pr_err("clk_enable(clk_ck) failed: %d", ret);
633 goto err_disable_clk_tx;
634 }
635 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800636#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200637
638 debug("%s: OK\n", __func__);
639 return 0;
640
Fugang Duan3a97da12020-05-03 22:41:17 +0800641#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200642err_disable_clk_tx:
643 clk_disable(&eqos->clk_tx);
644err_disable_clk_rx:
645 clk_disable(&eqos->clk_rx);
646err_disable_clk_master_bus:
647 clk_disable(&eqos->clk_master_bus);
648err:
649 debug("%s: FAILED: %d\n", __func__, ret);
650 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800651#endif
652}
653
654static int eqos_start_clks_imx(struct udevice *dev)
655{
656 return 0;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200657}
658
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200659static void eqos_stop_clks_tegra186(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600660{
Fugang Duan3a97da12020-05-03 22:41:17 +0800661#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600662 struct eqos_priv *eqos = dev_get_priv(dev);
663
664 debug("%s(dev=%p):\n", __func__, dev);
665
666 clk_disable(&eqos->clk_tx);
667 clk_disable(&eqos->clk_ptp_ref);
668 clk_disable(&eqos->clk_rx);
669 clk_disable(&eqos->clk_master_bus);
670 clk_disable(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800671#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600672
673 debug("%s: OK\n", __func__);
674}
675
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200676static void eqos_stop_clks_stm32(struct udevice *dev)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200677{
Fugang Duan3a97da12020-05-03 22:41:17 +0800678#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200679 struct eqos_priv *eqos = dev_get_priv(dev);
680
681 debug("%s(dev=%p):\n", __func__, dev);
682
683 clk_disable(&eqos->clk_tx);
684 clk_disable(&eqos->clk_rx);
685 clk_disable(&eqos->clk_master_bus);
686 if (clk_valid(&eqos->clk_ck))
687 clk_disable(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +0800688#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200689
690 debug("%s: OK\n", __func__);
691}
692
Fugang Duan3a97da12020-05-03 22:41:17 +0800693static void eqos_stop_clks_imx(struct udevice *dev)
694{
695 /* empty */
696}
697
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600698static int eqos_start_resets_tegra186(struct udevice *dev)
699{
700 struct eqos_priv *eqos = dev_get_priv(dev);
701 int ret;
702
703 debug("%s(dev=%p):\n", __func__, dev);
704
705 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
706 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900707 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600708 return ret;
709 }
710
711 udelay(2);
712
713 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
714 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900715 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600716 return ret;
717 }
718
719 ret = reset_assert(&eqos->reset_ctl);
720 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900721 pr_err("reset_assert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600722 return ret;
723 }
724
725 udelay(2);
726
727 ret = reset_deassert(&eqos->reset_ctl);
728 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900729 pr_err("reset_deassert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600730 return ret;
731 }
732
733 debug("%s: OK\n", __func__);
734 return 0;
735}
736
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200737static int eqos_start_resets_stm32(struct udevice *dev)
738{
Christophe Roullier5177b312020-03-18 10:50:15 +0100739 struct eqos_priv *eqos = dev_get_priv(dev);
740 int ret;
741
742 debug("%s(dev=%p):\n", __func__, dev);
743 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
744 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
745 if (ret < 0) {
746 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
747 ret);
748 return ret;
749 }
750
751 udelay(2);
752
753 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
754 if (ret < 0) {
755 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
756 ret);
757 return ret;
758 }
759 }
760 debug("%s: OK\n", __func__);
761
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200762 return 0;
763}
764
Fugang Duan3a97da12020-05-03 22:41:17 +0800765static int eqos_start_resets_imx(struct udevice *dev)
766{
767 return 0;
768}
769
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600770static int eqos_stop_resets_tegra186(struct udevice *dev)
771{
772 struct eqos_priv *eqos = dev_get_priv(dev);
773
774 reset_assert(&eqos->reset_ctl);
775 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
776
777 return 0;
778}
779
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200780static int eqos_stop_resets_stm32(struct udevice *dev)
781{
Christophe Roullier5177b312020-03-18 10:50:15 +0100782 struct eqos_priv *eqos = dev_get_priv(dev);
783 int ret;
784
785 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
786 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
787 if (ret < 0) {
788 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
789 ret);
790 return ret;
791 }
792 }
793
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200794 return 0;
795}
796
Fugang Duan3a97da12020-05-03 22:41:17 +0800797static int eqos_stop_resets_imx(struct udevice *dev)
798{
799 return 0;
800}
801
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600802static int eqos_calibrate_pads_tegra186(struct udevice *dev)
803{
804 struct eqos_priv *eqos = dev_get_priv(dev);
805 int ret;
806
807 debug("%s(dev=%p):\n", __func__, dev);
808
809 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
810 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
811
812 udelay(1);
813
814 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
815 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
816
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100817 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
818 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600819 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900820 pr_err("calibrate didn't start");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600821 goto failed;
822 }
823
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100824 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
825 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600826 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900827 pr_err("calibrate didn't finish");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600828 goto failed;
829 }
830
831 ret = 0;
832
833failed:
834 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
835 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
836
837 debug("%s: returns %d\n", __func__, ret);
838
839 return ret;
840}
841
842static int eqos_disable_calibration_tegra186(struct udevice *dev)
843{
844 struct eqos_priv *eqos = dev_get_priv(dev);
845
846 debug("%s(dev=%p):\n", __func__, dev);
847
848 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
849 EQOS_AUTO_CAL_CONFIG_ENABLE);
850
851 return 0;
852}
853
854static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
855{
Fugang Duan3a97da12020-05-03 22:41:17 +0800856#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600857 struct eqos_priv *eqos = dev_get_priv(dev);
858
859 return clk_get_rate(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800860#else
861 return 0;
862#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600863}
864
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200865static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
866{
Fugang Duan3a97da12020-05-03 22:41:17 +0800867#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200868 struct eqos_priv *eqos = dev_get_priv(dev);
869
870 return clk_get_rate(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800871#else
872 return 0;
873#endif
874}
875
Fugang Duan0e9d2392020-05-03 22:41:18 +0800876__weak u32 imx_get_eqos_csr_clk(void)
877{
878 return 100 * 1000000;
879}
880__weak int imx_eqos_txclk_set_rate(unsigned long rate)
881{
882 return 0;
883}
884
Fugang Duan3a97da12020-05-03 22:41:17 +0800885static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
886{
Fugang Duan0e9d2392020-05-03 22:41:18 +0800887 return imx_get_eqos_csr_clk();
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200888}
889
890static int eqos_calibrate_pads_stm32(struct udevice *dev)
891{
892 return 0;
893}
894
Fugang Duan3a97da12020-05-03 22:41:17 +0800895static int eqos_calibrate_pads_imx(struct udevice *dev)
896{
897 return 0;
898}
899
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200900static int eqos_disable_calibration_stm32(struct udevice *dev)
901{
902 return 0;
903}
904
Fugang Duan3a97da12020-05-03 22:41:17 +0800905static int eqos_disable_calibration_imx(struct udevice *dev)
906{
907 return 0;
908}
909
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600910static int eqos_set_full_duplex(struct udevice *dev)
911{
912 struct eqos_priv *eqos = dev_get_priv(dev);
913
914 debug("%s(dev=%p):\n", __func__, dev);
915
916 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
917
918 return 0;
919}
920
921static int eqos_set_half_duplex(struct udevice *dev)
922{
923 struct eqos_priv *eqos = dev_get_priv(dev);
924
925 debug("%s(dev=%p):\n", __func__, dev);
926
927 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
928
929 /* WAR: Flush TX queue when switching to half-duplex */
930 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
931 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
932
933 return 0;
934}
935
936static int eqos_set_gmii_speed(struct udevice *dev)
937{
938 struct eqos_priv *eqos = dev_get_priv(dev);
939
940 debug("%s(dev=%p):\n", __func__, dev);
941
942 clrbits_le32(&eqos->mac_regs->configuration,
943 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
944
945 return 0;
946}
947
948static int eqos_set_mii_speed_100(struct udevice *dev)
949{
950 struct eqos_priv *eqos = dev_get_priv(dev);
951
952 debug("%s(dev=%p):\n", __func__, dev);
953
954 setbits_le32(&eqos->mac_regs->configuration,
955 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
956
957 return 0;
958}
959
960static int eqos_set_mii_speed_10(struct udevice *dev)
961{
962 struct eqos_priv *eqos = dev_get_priv(dev);
963
964 debug("%s(dev=%p):\n", __func__, dev);
965
966 clrsetbits_le32(&eqos->mac_regs->configuration,
967 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
968
969 return 0;
970}
971
972static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
973{
Fugang Duan3a97da12020-05-03 22:41:17 +0800974#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600975 struct eqos_priv *eqos = dev_get_priv(dev);
976 ulong rate;
977 int ret;
978
979 debug("%s(dev=%p):\n", __func__, dev);
980
981 switch (eqos->phy->speed) {
982 case SPEED_1000:
983 rate = 125 * 1000 * 1000;
984 break;
985 case SPEED_100:
986 rate = 25 * 1000 * 1000;
987 break;
988 case SPEED_10:
989 rate = 2.5 * 1000 * 1000;
990 break;
991 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900992 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600993 return -EINVAL;
994 }
995
996 ret = clk_set_rate(&eqos->clk_tx, rate);
997 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900998 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600999 return ret;
1000 }
Fugang Duan3a97da12020-05-03 22:41:17 +08001001#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001002
1003 return 0;
1004}
1005
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001006static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
1007{
1008 return 0;
1009}
1010
Fugang Duan3a97da12020-05-03 22:41:17 +08001011static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
1012{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001013 struct eqos_priv *eqos = dev_get_priv(dev);
1014 ulong rate;
1015 int ret;
1016
1017 debug("%s(dev=%p):\n", __func__, dev);
1018
1019 switch (eqos->phy->speed) {
1020 case SPEED_1000:
1021 rate = 125 * 1000 * 1000;
1022 break;
1023 case SPEED_100:
1024 rate = 25 * 1000 * 1000;
1025 break;
1026 case SPEED_10:
1027 rate = 2.5 * 1000 * 1000;
1028 break;
1029 default:
1030 pr_err("invalid speed %d", eqos->phy->speed);
1031 return -EINVAL;
1032 }
1033
1034 ret = imx_eqos_txclk_set_rate(rate);
1035 if (ret < 0) {
1036 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
1037 return ret;
1038 }
1039
Fugang Duan3a97da12020-05-03 22:41:17 +08001040 return 0;
1041}
1042
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001043static int eqos_adjust_link(struct udevice *dev)
1044{
1045 struct eqos_priv *eqos = dev_get_priv(dev);
1046 int ret;
1047 bool en_calibration;
1048
1049 debug("%s(dev=%p):\n", __func__, dev);
1050
1051 if (eqos->phy->duplex)
1052 ret = eqos_set_full_duplex(dev);
1053 else
1054 ret = eqos_set_half_duplex(dev);
1055 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001056 pr_err("eqos_set_*_duplex() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001057 return ret;
1058 }
1059
1060 switch (eqos->phy->speed) {
1061 case SPEED_1000:
1062 en_calibration = true;
1063 ret = eqos_set_gmii_speed(dev);
1064 break;
1065 case SPEED_100:
1066 en_calibration = true;
1067 ret = eqos_set_mii_speed_100(dev);
1068 break;
1069 case SPEED_10:
1070 en_calibration = false;
1071 ret = eqos_set_mii_speed_10(dev);
1072 break;
1073 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001074 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001075 return -EINVAL;
1076 }
1077 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001078 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001079 return ret;
1080 }
1081
1082 if (en_calibration) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001083 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001084 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001085 pr_err("eqos_calibrate_pads() failed: %d",
1086 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001087 return ret;
1088 }
1089 } else {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001090 ret = eqos->config->ops->eqos_disable_calibration(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001091 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001092 pr_err("eqos_disable_calibration() failed: %d",
1093 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001094 return ret;
1095 }
1096 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001097 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001098 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001099 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001100 return ret;
1101 }
1102
1103 return 0;
1104}
1105
1106static int eqos_write_hwaddr(struct udevice *dev)
1107{
1108 struct eth_pdata *plat = dev_get_platdata(dev);
1109 struct eqos_priv *eqos = dev_get_priv(dev);
1110 uint32_t val;
1111
1112 /*
1113 * This function may be called before start() or after stop(). At that
1114 * time, on at least some configurations of the EQoS HW, all clocks to
1115 * the EQoS HW block will be stopped, and a reset signal applied. If
1116 * any register access is attempted in this state, bus timeouts or CPU
1117 * hangs may occur. This check prevents that.
1118 *
1119 * A simple solution to this problem would be to not implement
1120 * write_hwaddr(), since start() always writes the MAC address into HW
1121 * anyway. However, it is desirable to implement write_hwaddr() to
1122 * support the case of SW that runs subsequent to U-Boot which expects
1123 * the MAC address to already be programmed into the EQoS registers,
1124 * which must happen irrespective of whether the U-Boot user (or
1125 * scripts) actually made use of the EQoS device, and hence
1126 * irrespective of whether start() was ever called.
1127 *
1128 * Note that this requirement by subsequent SW is not valid for
1129 * Tegra186, and is likely not valid for any non-PCI instantiation of
1130 * the EQoS HW block. This function is implemented solely as
1131 * future-proofing with the expectation the driver will eventually be
1132 * ported to some system where the expectation above is true.
1133 */
1134 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1135 return 0;
1136
1137 /* Update the MAC address */
1138 val = (plat->enetaddr[5] << 8) |
1139 (plat->enetaddr[4]);
1140 writel(val, &eqos->mac_regs->address0_high);
1141 val = (plat->enetaddr[3] << 24) |
1142 (plat->enetaddr[2] << 16) |
1143 (plat->enetaddr[1] << 8) |
1144 (plat->enetaddr[0]);
1145 writel(val, &eqos->mac_regs->address0_low);
1146
1147 return 0;
1148}
1149
Ye Li580fab42020-05-03 22:41:20 +08001150static int eqos_read_rom_hwaddr(struct udevice *dev)
1151{
1152 struct eth_pdata *pdata = dev_get_platdata(dev);
1153
1154#ifdef CONFIG_ARCH_IMX8M
1155 imx_get_mac_from_fuse(dev->req_seq, pdata->enetaddr);
1156#endif
1157 return !is_valid_ethaddr(pdata->enetaddr);
1158}
1159
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001160static int eqos_start(struct udevice *dev)
1161{
1162 struct eqos_priv *eqos = dev_get_priv(dev);
1163 int ret, i;
1164 ulong rate;
1165 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1166 ulong last_rx_desc;
1167
1168 debug("%s(dev=%p):\n", __func__, dev);
1169
1170 eqos->tx_desc_idx = 0;
1171 eqos->rx_desc_idx = 0;
1172
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001173 ret = eqos->config->ops->eqos_start_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001174 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001175 pr_err("eqos_start_clks() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001176 goto err;
1177 }
1178
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001179 ret = eqos->config->ops->eqos_start_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001180 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001181 pr_err("eqos_start_resets() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001182 goto err_stop_clks;
1183 }
1184
1185 udelay(10);
1186
1187 eqos->reg_access_ok = true;
1188
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +01001189 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001190 EQOS_DMA_MODE_SWR, false,
1191 eqos->config->swr_wait, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001192 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001193 pr_err("EQOS_DMA_MODE_SWR stuck");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001194 goto err_stop_resets;
1195 }
1196
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001197 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001198 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001199 pr_err("eqos_calibrate_pads() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001200 goto err_stop_resets;
1201 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001202 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001203
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001204 val = (rate / 1000000) - 1;
1205 writel(val, &eqos->mac_regs->us_tic_counter);
1206
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001207 /*
1208 * if PHY was already connected and configured,
1209 * don't need to reconnect/reconfigure again
1210 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001211 if (!eqos->phy) {
Ye Li6a895d02020-05-03 22:41:15 +08001212 int addr = -1;
1213#ifdef CONFIG_DM_ETH_PHY
1214 addr = eth_phy_get_addr(dev);
1215#endif
1216#ifdef DWC_NET_PHYADDR
1217 addr = DWC_NET_PHYADDR;
1218#endif
1219 eqos->phy = phy_connect(eqos->mii, addr, dev,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001220 eqos->config->interface(dev));
1221 if (!eqos->phy) {
1222 pr_err("phy_connect() failed");
1223 goto err_stop_resets;
1224 }
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001225
1226 if (eqos->max_speed) {
1227 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1228 if (ret) {
1229 pr_err("phy_set_supported() failed: %d", ret);
1230 goto err_shutdown_phy;
1231 }
1232 }
1233
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001234 ret = phy_config(eqos->phy);
1235 if (ret < 0) {
1236 pr_err("phy_config() failed: %d", ret);
1237 goto err_shutdown_phy;
1238 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001239 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001240
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001241 ret = phy_startup(eqos->phy);
1242 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001243 pr_err("phy_startup() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001244 goto err_shutdown_phy;
1245 }
1246
1247 if (!eqos->phy->link) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001248 pr_err("No link");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001249 goto err_shutdown_phy;
1250 }
1251
1252 ret = eqos_adjust_link(dev);
1253 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001254 pr_err("eqos_adjust_link() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001255 goto err_shutdown_phy;
1256 }
1257
1258 /* Configure MTL */
Fugang Duan3a97da12020-05-03 22:41:17 +08001259 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001260
1261 /* Enable Store and Forward mode for TX */
1262 /* Program Tx operating mode */
1263 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1264 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1265 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1266 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1267
1268 /* Transmit Queue weight */
1269 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1270
1271 /* Enable Store and Forward mode for RX, since no jumbo frame */
1272 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
Fugang Duan3a97da12020-05-03 22:41:17 +08001273 EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1274 EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1275 EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001276
1277 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1278 val = readl(&eqos->mac_regs->hw_feature1);
1279 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1280 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1281 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1282 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1283
1284 /*
1285 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1286 * r/tqs is encoded as (n / 256) - 1.
1287 */
1288 tqs = (128 << tx_fifo_sz) / 256 - 1;
1289 rqs = (128 << rx_fifo_sz) / 256 - 1;
1290
1291 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1292 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1293 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1294 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1295 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1296 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1297 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1298 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1299
1300 /* Flow control used only if each channel gets 4KB or more FIFO */
1301 if (rqs >= ((4096 / 256) - 1)) {
1302 u32 rfd, rfa;
1303
1304 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1305 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1306
1307 /*
1308 * Set Threshold for Activating Flow Contol space for min 2
1309 * frames ie, (1500 * 1) = 1500 bytes.
1310 *
1311 * Set Threshold for Deactivating Flow Contol for space of
1312 * min 1 frame (frame size 1500bytes) in receive fifo
1313 */
1314 if (rqs == ((4096 / 256) - 1)) {
1315 /*
1316 * This violates the above formula because of FIFO size
1317 * limit therefore overflow may occur inspite of this.
1318 */
1319 rfd = 0x3; /* Full-3K */
1320 rfa = 0x1; /* Full-1.5K */
1321 } else if (rqs == ((8192 / 256) - 1)) {
1322 rfd = 0x6; /* Full-4K */
1323 rfa = 0xa; /* Full-6K */
1324 } else if (rqs == ((16384 / 256) - 1)) {
1325 rfd = 0x6; /* Full-4K */
1326 rfa = 0x12; /* Full-10K */
1327 } else {
1328 rfd = 0x6; /* Full-4K */
1329 rfa = 0x1E; /* Full-16K */
1330 }
1331
1332 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1333 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1334 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1335 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1336 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1337 (rfd <<
1338 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1339 (rfa <<
1340 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1341 }
1342
1343 /* Configure MAC */
1344
1345 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1346 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1347 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001348 eqos->config->config_mac <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001349 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1350
Fugang Duan3a97da12020-05-03 22:41:17 +08001351 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1352 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1353 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1354 0x2 <<
1355 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1356
1357 /* Multicast and Broadcast Queue Enable */
1358 setbits_le32(&eqos->mac_regs->unused_0a4,
1359 0x00100000);
1360 /* enable promise mode */
1361 setbits_le32(&eqos->mac_regs->unused_004[1],
1362 0x1);
1363
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001364 /* Set TX flow control parameters */
1365 /* Set Pause Time */
1366 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1367 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1368 /* Assign priority for TX flow control */
1369 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1370 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1371 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1372 /* Assign priority for RX flow control */
1373 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1374 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1375 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1376 /* Enable flow control */
1377 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1378 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1379 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1380 EQOS_MAC_RX_FLOW_CTRL_RFE);
1381
1382 clrsetbits_le32(&eqos->mac_regs->configuration,
1383 EQOS_MAC_CONFIGURATION_GPSLCE |
1384 EQOS_MAC_CONFIGURATION_WD |
1385 EQOS_MAC_CONFIGURATION_JD |
1386 EQOS_MAC_CONFIGURATION_JE,
1387 EQOS_MAC_CONFIGURATION_CST |
1388 EQOS_MAC_CONFIGURATION_ACS);
1389
1390 eqos_write_hwaddr(dev);
1391
1392 /* Configure DMA */
1393
1394 /* Enable OSP mode */
1395 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1396 EQOS_DMA_CH0_TX_CONTROL_OSP);
1397
1398 /* RX buffer size. Must be a multiple of bus width */
1399 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1400 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1401 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1402 EQOS_MAX_PACKET_SIZE <<
1403 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1404
1405 setbits_le32(&eqos->dma_regs->ch0_control,
1406 EQOS_DMA_CH0_CONTROL_PBLX8);
1407
1408 /*
1409 * Burst length must be < 1/2 FIFO size.
1410 * FIFO size in tqs is encoded as (n / 256) - 1.
1411 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1412 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1413 */
1414 pbl = tqs + 1;
1415 if (pbl > 32)
1416 pbl = 32;
1417 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1418 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1419 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1420 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1421
1422 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1423 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1424 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1425 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1426
1427 /* DMA performance configuration */
1428 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1429 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1430 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1431 writel(val, &eqos->dma_regs->sysbus_mode);
1432
1433 /* Set up descriptors */
1434
1435 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1436 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1437 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1438 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1439 (i * EQOS_MAX_PACKET_SIZE));
Marek Vasut4332d802020-03-23 02:02:57 +01001440 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Fugang Duan3a97da12020-05-03 22:41:17 +08001441 mb();
Marek Vasutdd90c2e2020-03-23 02:09:01 +01001442 eqos->config->ops->eqos_flush_desc(rx_desc);
Fugang Duan3a97da12020-05-03 22:41:17 +08001443 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1444 (i * EQOS_MAX_PACKET_SIZE),
1445 EQOS_MAX_PACKET_SIZE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001446 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001447
1448 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1449 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1450 writel(EQOS_DESCRIPTORS_TX - 1,
1451 &eqos->dma_regs->ch0_txdesc_ring_length);
1452
1453 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1454 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1455 writel(EQOS_DESCRIPTORS_RX - 1,
1456 &eqos->dma_regs->ch0_rxdesc_ring_length);
1457
1458 /* Enable everything */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001459 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1460 EQOS_DMA_CH0_TX_CONTROL_ST);
1461 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1462 EQOS_DMA_CH0_RX_CONTROL_SR);
Fugang Duan3a97da12020-05-03 22:41:17 +08001463 setbits_le32(&eqos->mac_regs->configuration,
1464 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001465
1466 /* TX tail pointer not written until we need to TX a packet */
1467 /*
1468 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1469 * first descriptor, implying all descriptors were available. However,
1470 * that's not distinguishable from none of the descriptors being
1471 * available.
1472 */
1473 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1474 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1475
1476 eqos->started = true;
1477
1478 debug("%s: OK\n", __func__);
1479 return 0;
1480
1481err_shutdown_phy:
1482 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001483err_stop_resets:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001484 eqos->config->ops->eqos_stop_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001485err_stop_clks:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001486 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001487err:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001488 pr_err("FAILED: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001489 return ret;
1490}
1491
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001492static void eqos_stop(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001493{
1494 struct eqos_priv *eqos = dev_get_priv(dev);
1495 int i;
1496
1497 debug("%s(dev=%p):\n", __func__, dev);
1498
1499 if (!eqos->started)
1500 return;
1501 eqos->started = false;
1502 eqos->reg_access_ok = false;
1503
1504 /* Disable TX DMA */
1505 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1506 EQOS_DMA_CH0_TX_CONTROL_ST);
1507
1508 /* Wait for TX all packets to drain out of MTL */
1509 for (i = 0; i < 1000000; i++) {
1510 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1511 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1512 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1513 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1514 if ((trcsts != 1) && (!txqsts))
1515 break;
1516 }
1517
1518 /* Turn off MAC TX and RX */
1519 clrbits_le32(&eqos->mac_regs->configuration,
1520 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1521
1522 /* Wait for all RX packets to drain out of MTL */
1523 for (i = 0; i < 1000000; i++) {
1524 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1525 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1526 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1527 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1528 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1529 if ((!prxq) && (!rxqsts))
1530 break;
1531 }
1532
1533 /* Turn off RX DMA */
1534 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1535 EQOS_DMA_CH0_RX_CONTROL_SR);
1536
1537 if (eqos->phy) {
1538 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001539 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001540 eqos->config->ops->eqos_stop_resets(dev);
1541 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001542
1543 debug("%s: OK\n", __func__);
1544}
1545
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001546static int eqos_send(struct udevice *dev, void *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001547{
1548 struct eqos_priv *eqos = dev_get_priv(dev);
1549 struct eqos_desc *tx_desc;
1550 int i;
1551
1552 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1553 length);
1554
1555 memcpy(eqos->tx_dma_buf, packet, length);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001556 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001557
1558 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1559 eqos->tx_desc_idx++;
1560 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1561
1562 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1563 tx_desc->des1 = 0;
1564 tx_desc->des2 = length;
1565 /*
1566 * Make sure that if HW sees the _OWN write below, it will see all the
1567 * writes to the rest of the descriptor too.
1568 */
1569 mb();
1570 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001571 eqos->config->ops->eqos_flush_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001572
Marek Vasut83858d82020-03-23 02:03:50 +01001573 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1574 &eqos->dma_regs->ch0_txdesc_tail_pointer);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001575
1576 for (i = 0; i < 1000000; i++) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001577 eqos->config->ops->eqos_inval_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001578 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1579 return 0;
1580 udelay(1);
1581 }
1582
1583 debug("%s: TX timeout\n", __func__);
1584
1585 return -ETIMEDOUT;
1586}
1587
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001588static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001589{
1590 struct eqos_priv *eqos = dev_get_priv(dev);
1591 struct eqos_desc *rx_desc;
1592 int length;
1593
1594 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1595
1596 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasut738ee272020-03-23 02:09:21 +01001597 eqos->config->ops->eqos_inval_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001598 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1599 debug("%s: RX packet not available\n", __func__);
1600 return -EAGAIN;
1601 }
1602
1603 *packetp = eqos->rx_dma_buf +
1604 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1605 length = rx_desc->des3 & 0x7fff;
1606 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1607
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001608 eqos->config->ops->eqos_inval_buffer(*packetp, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001609
1610 return length;
1611}
1612
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001613static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001614{
1615 struct eqos_priv *eqos = dev_get_priv(dev);
1616 uchar *packet_expected;
1617 struct eqos_desc *rx_desc;
1618
1619 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1620
1621 packet_expected = eqos->rx_dma_buf +
1622 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1623 if (packet != packet_expected) {
1624 debug("%s: Unexpected packet (expected %p)\n", __func__,
1625 packet_expected);
1626 return -EINVAL;
1627 }
1628
Fugang Duan3a97da12020-05-03 22:41:17 +08001629 eqos->config->ops->eqos_inval_buffer(packet, length);
1630
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001631 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001632
Marek Vasut24891dd2020-03-23 02:11:46 +01001633 rx_desc->des0 = 0;
1634 mb();
1635 eqos->config->ops->eqos_flush_desc(rx_desc);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001636 eqos->config->ops->eqos_inval_buffer(packet, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001637 rx_desc->des0 = (u32)(ulong)packet;
1638 rx_desc->des1 = 0;
1639 rx_desc->des2 = 0;
1640 /*
1641 * Make sure that if HW sees the _OWN write below, it will see all the
1642 * writes to the rest of the descriptor too.
1643 */
1644 mb();
Marek Vasut4332d802020-03-23 02:02:57 +01001645 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001646 eqos->config->ops->eqos_flush_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001647
1648 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1649
1650 eqos->rx_desc_idx++;
1651 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1652
1653 return 0;
1654}
1655
1656static int eqos_probe_resources_core(struct udevice *dev)
1657{
1658 struct eqos_priv *eqos = dev_get_priv(dev);
1659 int ret;
1660
1661 debug("%s(dev=%p):\n", __func__, dev);
1662
1663 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1664 EQOS_DESCRIPTORS_RX);
1665 if (!eqos->descs) {
1666 debug("%s: eqos_alloc_descs() failed\n", __func__);
1667 ret = -ENOMEM;
1668 goto err;
1669 }
1670 eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1671 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1672 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1673 eqos->rx_descs);
1674
1675 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1676 if (!eqos->tx_dma_buf) {
1677 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1678 ret = -ENOMEM;
1679 goto err_free_descs;
1680 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001681 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001682
1683 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1684 if (!eqos->rx_dma_buf) {
1685 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1686 ret = -ENOMEM;
1687 goto err_free_tx_dma_buf;
1688 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001689 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001690
1691 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1692 if (!eqos->rx_pkt) {
1693 debug("%s: malloc(rx_pkt) failed\n", __func__);
1694 ret = -ENOMEM;
1695 goto err_free_rx_dma_buf;
1696 }
1697 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1698
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001699 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1700 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1701
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001702 debug("%s: OK\n", __func__);
1703 return 0;
1704
1705err_free_rx_dma_buf:
1706 free(eqos->rx_dma_buf);
1707err_free_tx_dma_buf:
1708 free(eqos->tx_dma_buf);
1709err_free_descs:
1710 eqos_free_descs(eqos->descs);
1711err:
1712
1713 debug("%s: returns %d\n", __func__, ret);
1714 return ret;
1715}
1716
1717static int eqos_remove_resources_core(struct udevice *dev)
1718{
1719 struct eqos_priv *eqos = dev_get_priv(dev);
1720
1721 debug("%s(dev=%p):\n", __func__, dev);
1722
1723 free(eqos->rx_pkt);
1724 free(eqos->rx_dma_buf);
1725 free(eqos->tx_dma_buf);
1726 eqos_free_descs(eqos->descs);
1727
1728 debug("%s: OK\n", __func__);
1729 return 0;
1730}
1731
1732static int eqos_probe_resources_tegra186(struct udevice *dev)
1733{
1734 struct eqos_priv *eqos = dev_get_priv(dev);
1735 int ret;
1736
1737 debug("%s(dev=%p):\n", __func__, dev);
1738
1739 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1740 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001741 pr_err("reset_get_by_name(rst) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001742 return ret;
1743 }
1744
1745 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1746 &eqos->phy_reset_gpio,
1747 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1748 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001749 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001750 goto err_free_reset_eqos;
1751 }
1752
1753 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1754 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001755 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001756 goto err_free_gpio_phy_reset;
1757 }
1758
1759 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1760 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001761 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001762 goto err_free_clk_slave_bus;
1763 }
1764
1765 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1766 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001767 pr_err("clk_get_by_name(rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001768 goto err_free_clk_master_bus;
1769 }
1770
1771 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1772 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001773 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001774 goto err_free_clk_rx;
1775 return ret;
1776 }
1777
1778 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1779 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001780 pr_err("clk_get_by_name(tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001781 goto err_free_clk_ptp_ref;
1782 }
1783
1784 debug("%s: OK\n", __func__);
1785 return 0;
1786
1787err_free_clk_ptp_ref:
1788 clk_free(&eqos->clk_ptp_ref);
1789err_free_clk_rx:
1790 clk_free(&eqos->clk_rx);
1791err_free_clk_master_bus:
1792 clk_free(&eqos->clk_master_bus);
1793err_free_clk_slave_bus:
1794 clk_free(&eqos->clk_slave_bus);
1795err_free_gpio_phy_reset:
1796 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1797err_free_reset_eqos:
1798 reset_free(&eqos->reset_ctl);
1799
1800 debug("%s: returns %d\n", __func__, ret);
1801 return ret;
1802}
1803
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001804/* board-specific Ethernet Interface initializations. */
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001805__weak int board_interface_eth_init(struct udevice *dev,
1806 phy_interface_t interface_type)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001807{
1808 return 0;
1809}
1810
1811static int eqos_probe_resources_stm32(struct udevice *dev)
1812{
1813 struct eqos_priv *eqos = dev_get_priv(dev);
1814 int ret;
1815 phy_interface_t interface;
Christophe Roullier5177b312020-03-18 10:50:15 +01001816 struct ofnode_phandle_args phandle_args;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001817
1818 debug("%s(dev=%p):\n", __func__, dev);
1819
1820 interface = eqos->config->interface(dev);
1821
1822 if (interface == PHY_INTERFACE_MODE_NONE) {
1823 pr_err("Invalid PHY interface\n");
1824 return -EINVAL;
1825 }
1826
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001827 ret = board_interface_eth_init(dev, interface);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001828 if (ret)
1829 return -EINVAL;
1830
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001831 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1832
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001833 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1834 if (ret) {
1835 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1836 goto err_probe;
1837 }
1838
1839 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1840 if (ret) {
1841 pr_err("clk_get_by_name(rx) failed: %d", ret);
1842 goto err_free_clk_master_bus;
1843 }
1844
1845 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1846 if (ret) {
1847 pr_err("clk_get_by_name(tx) failed: %d", ret);
1848 goto err_free_clk_rx;
1849 }
1850
1851 /* Get ETH_CLK clocks (optional) */
1852 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1853 if (ret)
1854 pr_warn("No phy clock provided %d", ret);
1855
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001856 eqos->phyaddr = -1;
Christophe Roullier5177b312020-03-18 10:50:15 +01001857 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1858 &phandle_args);
1859 if (!ret) {
1860 /* search "reset-gpios" in phy node */
1861 ret = gpio_request_by_name_nodev(phandle_args.node,
1862 "reset-gpios", 0,
1863 &eqos->phy_reset_gpio,
1864 GPIOD_IS_OUT |
1865 GPIOD_IS_OUT_ACTIVE);
1866 if (ret)
1867 pr_warn("gpio_request_by_name(phy reset) not provided %d",
1868 ret);
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001869
1870 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1871 "reg", -1);
Christophe Roullier5177b312020-03-18 10:50:15 +01001872 }
1873
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001874 debug("%s: OK\n", __func__);
1875 return 0;
1876
1877err_free_clk_rx:
1878 clk_free(&eqos->clk_rx);
1879err_free_clk_master_bus:
1880 clk_free(&eqos->clk_master_bus);
1881err_probe:
1882
1883 debug("%s: returns %d\n", __func__, ret);
1884 return ret;
1885}
1886
1887static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1888{
1889 const char *phy_mode;
1890 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1891
1892 debug("%s(dev=%p):\n", __func__, dev);
1893
1894 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1895 NULL);
1896 if (phy_mode)
1897 interface = phy_get_interface_by_name(phy_mode);
1898
1899 return interface;
1900}
1901
1902static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1903{
1904 return PHY_INTERFACE_MODE_MII;
1905}
1906
Fugang Duan3a97da12020-05-03 22:41:17 +08001907static int eqos_probe_resources_imx(struct udevice *dev)
1908{
1909 struct eqos_priv *eqos = dev_get_priv(dev);
1910 phy_interface_t interface;
1911
1912 debug("%s(dev=%p):\n", __func__, dev);
1913
1914 interface = eqos->config->interface(dev);
1915
1916 if (interface == PHY_INTERFACE_MODE_NONE) {
1917 pr_err("Invalid PHY interface\n");
1918 return -EINVAL;
1919 }
1920
1921 debug("%s: OK\n", __func__);
1922 return 0;
1923}
1924
1925static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1926{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001927 const char *phy_mode;
1928 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1929
1930 debug("%s(dev=%p):\n", __func__, dev);
1931
1932 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1933 NULL);
1934 if (phy_mode)
1935 interface = phy_get_interface_by_name(phy_mode);
1936
1937 return interface;
Fugang Duan3a97da12020-05-03 22:41:17 +08001938}
1939
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001940static int eqos_remove_resources_tegra186(struct udevice *dev)
1941{
1942 struct eqos_priv *eqos = dev_get_priv(dev);
1943
1944 debug("%s(dev=%p):\n", __func__, dev);
1945
Fugang Duan3a97da12020-05-03 22:41:17 +08001946#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001947 clk_free(&eqos->clk_tx);
1948 clk_free(&eqos->clk_ptp_ref);
1949 clk_free(&eqos->clk_rx);
1950 clk_free(&eqos->clk_slave_bus);
1951 clk_free(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +08001952#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001953 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1954 reset_free(&eqos->reset_ctl);
1955
1956 debug("%s: OK\n", __func__);
1957 return 0;
1958}
1959
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001960static int eqos_remove_resources_stm32(struct udevice *dev)
1961{
Fugang Duan3a97da12020-05-03 22:41:17 +08001962#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001963 struct eqos_priv *eqos = dev_get_priv(dev);
1964
1965 debug("%s(dev=%p):\n", __func__, dev);
1966
1967 clk_free(&eqos->clk_tx);
1968 clk_free(&eqos->clk_rx);
1969 clk_free(&eqos->clk_master_bus);
1970 if (clk_valid(&eqos->clk_ck))
1971 clk_free(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +08001972#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001973
Christophe Roullier5177b312020-03-18 10:50:15 +01001974 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1975 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1976
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001977 debug("%s: OK\n", __func__);
1978 return 0;
1979}
1980
Fugang Duan3a97da12020-05-03 22:41:17 +08001981static int eqos_remove_resources_imx(struct udevice *dev)
1982{
1983 return 0;
1984}
1985
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001986static int eqos_probe(struct udevice *dev)
1987{
1988 struct eqos_priv *eqos = dev_get_priv(dev);
1989 int ret;
1990
1991 debug("%s(dev=%p):\n", __func__, dev);
1992
1993 eqos->dev = dev;
1994 eqos->config = (void *)dev_get_driver_data(dev);
1995
Simon Glassa821c4a2017-05-17 17:18:05 -06001996 eqos->regs = devfdt_get_addr(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001997 if (eqos->regs == FDT_ADDR_T_NONE) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001998 pr_err("devfdt_get_addr() failed");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001999 return -ENODEV;
2000 }
2001 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
2002 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
2003 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
2004 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
2005
2006 ret = eqos_probe_resources_core(dev);
2007 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09002008 pr_err("eqos_probe_resources_core() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002009 return ret;
2010 }
2011
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002012 ret = eqos->config->ops->eqos_probe_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002013 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002014 pr_err("eqos_probe_resources() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002015 goto err_remove_resources_core;
2016 }
2017
Ye Li6a895d02020-05-03 22:41:15 +08002018#ifdef CONFIG_DM_ETH_PHY
2019 eqos->mii = eth_phy_get_mdio_bus(dev);
2020#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002021 if (!eqos->mii) {
Ye Li6a895d02020-05-03 22:41:15 +08002022 eqos->mii = mdio_alloc();
2023 if (!eqos->mii) {
2024 pr_err("mdio_alloc() failed");
2025 ret = -ENOMEM;
2026 goto err_remove_resources_tegra;
2027 }
2028 eqos->mii->read = eqos_mdio_read;
2029 eqos->mii->write = eqos_mdio_write;
2030 eqos->mii->priv = eqos;
2031 strcpy(eqos->mii->name, dev->name);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002032
Ye Li6a895d02020-05-03 22:41:15 +08002033 ret = mdio_register(eqos->mii);
2034 if (ret < 0) {
2035 pr_err("mdio_register() failed: %d", ret);
2036 goto err_free_mdio;
2037 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002038 }
2039
Ye Li6a895d02020-05-03 22:41:15 +08002040#ifdef CONFIG_DM_ETH_PHY
2041 eth_phy_set_mdio_bus(dev, eqos->mii);
2042#endif
2043
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002044 debug("%s: OK\n", __func__);
2045 return 0;
2046
2047err_free_mdio:
2048 mdio_free(eqos->mii);
2049err_remove_resources_tegra:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002050 eqos->config->ops->eqos_remove_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002051err_remove_resources_core:
2052 eqos_remove_resources_core(dev);
2053
2054 debug("%s: returns %d\n", __func__, ret);
2055 return ret;
2056}
2057
2058static int eqos_remove(struct udevice *dev)
2059{
2060 struct eqos_priv *eqos = dev_get_priv(dev);
2061
2062 debug("%s(dev=%p):\n", __func__, dev);
2063
2064 mdio_unregister(eqos->mii);
2065 mdio_free(eqos->mii);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002066 eqos->config->ops->eqos_remove_resources(dev);
2067
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002068 eqos_probe_resources_core(dev);
2069
2070 debug("%s: OK\n", __func__);
2071 return 0;
2072}
2073
2074static const struct eth_ops eqos_ops = {
2075 .start = eqos_start,
2076 .stop = eqos_stop,
2077 .send = eqos_send,
2078 .recv = eqos_recv,
2079 .free_pkt = eqos_free_pkt,
2080 .write_hwaddr = eqos_write_hwaddr,
Ye Li580fab42020-05-03 22:41:20 +08002081 .read_rom_hwaddr = eqos_read_rom_hwaddr,
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002082};
2083
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002084static struct eqos_ops eqos_tegra186_ops = {
2085 .eqos_inval_desc = eqos_inval_desc_tegra186,
2086 .eqos_flush_desc = eqos_flush_desc_tegra186,
2087 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
2088 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
2089 .eqos_probe_resources = eqos_probe_resources_tegra186,
2090 .eqos_remove_resources = eqos_remove_resources_tegra186,
2091 .eqos_stop_resets = eqos_stop_resets_tegra186,
2092 .eqos_start_resets = eqos_start_resets_tegra186,
2093 .eqos_stop_clks = eqos_stop_clks_tegra186,
2094 .eqos_start_clks = eqos_start_clks_tegra186,
2095 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2096 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
2097 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2098 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
2099};
2100
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002101static const struct eqos_config eqos_tegra186_config = {
2102 .reg_access_always_ok = false,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002103 .mdio_wait = 10,
2104 .swr_wait = 10,
2105 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2106 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2107 .interface = eqos_get_interface_tegra186,
2108 .ops = &eqos_tegra186_ops
2109};
2110
2111static struct eqos_ops eqos_stm32_ops = {
Fugang Duan3a97da12020-05-03 22:41:17 +08002112 .eqos_inval_desc = eqos_inval_desc_generic,
2113 .eqos_flush_desc = eqos_flush_desc_generic,
2114 .eqos_inval_buffer = eqos_inval_buffer_generic,
2115 .eqos_flush_buffer = eqos_flush_buffer_generic,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002116 .eqos_probe_resources = eqos_probe_resources_stm32,
2117 .eqos_remove_resources = eqos_remove_resources_stm32,
2118 .eqos_stop_resets = eqos_stop_resets_stm32,
2119 .eqos_start_resets = eqos_start_resets_stm32,
2120 .eqos_stop_clks = eqos_stop_clks_stm32,
2121 .eqos_start_clks = eqos_start_clks_stm32,
2122 .eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2123 .eqos_disable_calibration = eqos_disable_calibration_stm32,
2124 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2125 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
2126};
2127
2128static const struct eqos_config eqos_stm32_config = {
2129 .reg_access_always_ok = false,
2130 .mdio_wait = 10000,
2131 .swr_wait = 50,
2132 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2133 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2134 .interface = eqos_get_interface_stm32,
2135 .ops = &eqos_stm32_ops
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002136};
2137
Fugang Duan3a97da12020-05-03 22:41:17 +08002138static struct eqos_ops eqos_imx_ops = {
2139 .eqos_inval_desc = eqos_inval_desc_generic,
2140 .eqos_flush_desc = eqos_flush_desc_generic,
2141 .eqos_inval_buffer = eqos_inval_buffer_generic,
2142 .eqos_flush_buffer = eqos_flush_buffer_generic,
2143 .eqos_probe_resources = eqos_probe_resources_imx,
2144 .eqos_remove_resources = eqos_remove_resources_imx,
2145 .eqos_stop_resets = eqos_stop_resets_imx,
2146 .eqos_start_resets = eqos_start_resets_imx,
2147 .eqos_stop_clks = eqos_stop_clks_imx,
2148 .eqos_start_clks = eqos_start_clks_imx,
2149 .eqos_calibrate_pads = eqos_calibrate_pads_imx,
2150 .eqos_disable_calibration = eqos_disable_calibration_imx,
2151 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2152 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2153};
2154
2155struct eqos_config eqos_imx_config = {
2156 .reg_access_always_ok = false,
2157 .mdio_wait = 10000,
2158 .swr_wait = 50,
2159 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2160 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2161 .interface = eqos_get_interface_imx,
2162 .ops = &eqos_imx_ops
2163};
2164
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002165static const struct udevice_id eqos_ids[] = {
2166 {
2167 .compatible = "nvidia,tegra186-eqos",
2168 .data = (ulong)&eqos_tegra186_config
2169 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002170 {
2171 .compatible = "snps,dwmac-4.20a",
2172 .data = (ulong)&eqos_stm32_config
2173 },
Fugang Duan3a97da12020-05-03 22:41:17 +08002174 {
2175 .compatible = "fsl,imx-eqos",
2176 .data = (ulong)&eqos_imx_config
2177 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002178
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002179 { }
2180};
2181
2182U_BOOT_DRIVER(eth_eqos) = {
2183 .name = "eth_eqos",
2184 .id = UCLASS_ETH,
Fugang Duan3a97da12020-05-03 22:41:17 +08002185 .of_match = of_match_ptr(eqos_ids),
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002186 .probe = eqos_probe,
2187 .remove = eqos_remove,
2188 .ops = &eqos_ops,
2189 .priv_auto_alloc_size = sizeof(struct eqos_priv),
2190 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
2191};