blob: 75c70663c9e55bdf42d8df88b68fbc1ae3ae23c2 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
Stephen Warrenba4dfef2016-10-21 14:46:47 -06005 * Portions based on U-Boot's rtl8169.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -060029#include <common.h>
30#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070031#include <cpu_func.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060032#include <dm.h>
33#include <errno.h>
Simon Glass336d4612020-02-03 07:36:16 -070034#include <malloc.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060035#include <memalign.h>
36#include <miiphy.h>
37#include <net.h>
38#include <netdev.h>
39#include <phy.h>
40#include <reset.h>
41#include <wait_bit.h>
42#include <asm/gpio.h>
43#include <asm/io.h>
Ye Li6a895d02020-05-03 22:41:15 +080044#include <eth_phy.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060045
46/* Core registers */
47
48#define EQOS_MAC_REGS_BASE 0x000
49struct eqos_mac_regs {
50 uint32_t configuration; /* 0x000 */
51 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
52 uint32_t q0_tx_flow_ctrl; /* 0x070 */
53 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
54 uint32_t rx_flow_ctrl; /* 0x090 */
55 uint32_t unused_094; /* 0x094 */
56 uint32_t txq_prty_map0; /* 0x098 */
57 uint32_t unused_09c; /* 0x09c */
58 uint32_t rxq_ctrl0; /* 0x0a0 */
59 uint32_t unused_0a4; /* 0x0a4 */
60 uint32_t rxq_ctrl2; /* 0x0a8 */
61 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
62 uint32_t us_tic_counter; /* 0x0dc */
63 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
64 uint32_t hw_feature0; /* 0x11c */
65 uint32_t hw_feature1; /* 0x120 */
66 uint32_t hw_feature2; /* 0x124 */
67 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
68 uint32_t mdio_address; /* 0x200 */
69 uint32_t mdio_data; /* 0x204 */
70 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
71 uint32_t address0_high; /* 0x300 */
72 uint32_t address0_low; /* 0x304 */
73};
74
75#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
76#define EQOS_MAC_CONFIGURATION_CST BIT(21)
77#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
78#define EQOS_MAC_CONFIGURATION_WD BIT(19)
79#define EQOS_MAC_CONFIGURATION_JD BIT(17)
80#define EQOS_MAC_CONFIGURATION_JE BIT(16)
81#define EQOS_MAC_CONFIGURATION_PS BIT(15)
82#define EQOS_MAC_CONFIGURATION_FES BIT(14)
83#define EQOS_MAC_CONFIGURATION_DM BIT(13)
Fugang Duan3a97da12020-05-03 22:41:17 +080084#define EQOS_MAC_CONFIGURATION_LM BIT(12)
Stephen Warrenba4dfef2016-10-21 14:46:47 -060085#define EQOS_MAC_CONFIGURATION_TE BIT(1)
86#define EQOS_MAC_CONFIGURATION_RE BIT(0)
87
88#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
89#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
90#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
91
92#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
93
94#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
95#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
96
97#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
98#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
99#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
100#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200101#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600102
103#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
104#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
105
Fugang Duan3a97da12020-05-03 22:41:17 +0800106#define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
107#define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
108#define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
109#define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
110
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600111#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
112#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
113#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
114#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
115
Fugang Duan3a97da12020-05-03 22:41:17 +0800116#define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
117#define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
118
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600119#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
120#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
121#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
122#define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200123#define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600124#define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
125#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
126#define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
127#define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
128#define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
129#define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
130
131#define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
132
133#define EQOS_MTL_REGS_BASE 0xd00
134struct eqos_mtl_regs {
135 uint32_t txq0_operation_mode; /* 0xd00 */
136 uint32_t unused_d04; /* 0xd04 */
137 uint32_t txq0_debug; /* 0xd08 */
138 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
139 uint32_t txq0_quantum_weight; /* 0xd18 */
140 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
141 uint32_t rxq0_operation_mode; /* 0xd30 */
142 uint32_t unused_d34; /* 0xd34 */
143 uint32_t rxq0_debug; /* 0xd38 */
144};
145
146#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
147#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
148#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
149#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
150#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
151#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
152#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
153
154#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
155#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
156#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
157
158#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
159#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
160#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
161#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
162#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
163#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
164#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
165#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
Fugang Duan3a97da12020-05-03 22:41:17 +0800166#define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4)
167#define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600168
169#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
170#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
171#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
172#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
173
174#define EQOS_DMA_REGS_BASE 0x1000
175struct eqos_dma_regs {
176 uint32_t mode; /* 0x1000 */
177 uint32_t sysbus_mode; /* 0x1004 */
178 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
179 uint32_t ch0_control; /* 0x1100 */
180 uint32_t ch0_tx_control; /* 0x1104 */
181 uint32_t ch0_rx_control; /* 0x1108 */
182 uint32_t unused_110c; /* 0x110c */
183 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
184 uint32_t ch0_txdesc_list_address; /* 0x1114 */
185 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
186 uint32_t ch0_rxdesc_list_address; /* 0x111c */
187 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
188 uint32_t unused_1124; /* 0x1124 */
189 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
190 uint32_t ch0_txdesc_ring_length; /* 0x112c */
191 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
192};
193
194#define EQOS_DMA_MODE_SWR BIT(0)
195
196#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
197#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
198#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
199#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
200#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
201#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
202
203#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
204
205#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
206#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
207#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
208#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
209
210#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
211#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
212#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
213#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
214#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
215
216/* These registers are Tegra186-specific */
217#define EQOS_TEGRA186_REGS_BASE 0x8800
218struct eqos_tegra186_regs {
219 uint32_t sdmemcomppadctrl; /* 0x8800 */
220 uint32_t auto_cal_config; /* 0x8804 */
221 uint32_t unused_8808; /* 0x8808 */
222 uint32_t auto_cal_status; /* 0x880c */
223};
224
225#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
226
227#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
228#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
229
230#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
231
232/* Descriptors */
233
234#define EQOS_DESCRIPTOR_WORDS 4
235#define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4)
236/* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
237#define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN
238#define EQOS_DESCRIPTORS_TX 4
239#define EQOS_DESCRIPTORS_RX 4
240#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
241#define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \
242 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
243#define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
244#define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
245#define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
246
247/*
248 * Warn if the cache-line size is larger than the descriptor size. In such
249 * cases the driver will likely fail because the CPU needs to flush the cache
250 * when requeuing RX buffers, therefore descriptors written by the hardware
251 * may be discarded. Architectures with full IO coherence, such as x86, do not
252 * experience this issue, and hence are excluded from this condition.
253 *
254 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
255 * the driver to allocate descriptors from a pool of non-cached memory.
256 */
257#if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
258#if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
Trevor Woerner10015022019-05-03 09:41:00 -0400259 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) && !defined(CONFIG_X86)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600260#warning Cache line size is larger than descriptor size
261#endif
262#endif
263
264struct eqos_desc {
265 u32 des0;
266 u32 des1;
267 u32 des2;
268 u32 des3;
269};
270
271#define EQOS_DESC3_OWN BIT(31)
272#define EQOS_DESC3_FD BIT(29)
273#define EQOS_DESC3_LD BIT(28)
274#define EQOS_DESC3_BUF1V BIT(24)
275
276struct eqos_config {
277 bool reg_access_always_ok;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200278 int mdio_wait;
279 int swr_wait;
280 int config_mac;
281 int config_mac_mdio;
282 phy_interface_t (*interface)(struct udevice *dev);
283 struct eqos_ops *ops;
284};
285
286struct eqos_ops {
287 void (*eqos_inval_desc)(void *desc);
288 void (*eqos_flush_desc)(void *desc);
289 void (*eqos_inval_buffer)(void *buf, size_t size);
290 void (*eqos_flush_buffer)(void *buf, size_t size);
291 int (*eqos_probe_resources)(struct udevice *dev);
292 int (*eqos_remove_resources)(struct udevice *dev);
293 int (*eqos_stop_resets)(struct udevice *dev);
294 int (*eqos_start_resets)(struct udevice *dev);
295 void (*eqos_stop_clks)(struct udevice *dev);
296 int (*eqos_start_clks)(struct udevice *dev);
297 int (*eqos_calibrate_pads)(struct udevice *dev);
298 int (*eqos_disable_calibration)(struct udevice *dev);
299 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
300 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600301};
302
303struct eqos_priv {
304 struct udevice *dev;
305 const struct eqos_config *config;
306 fdt_addr_t regs;
307 struct eqos_mac_regs *mac_regs;
308 struct eqos_mtl_regs *mtl_regs;
309 struct eqos_dma_regs *dma_regs;
310 struct eqos_tegra186_regs *tegra186_regs;
311 struct reset_ctl reset_ctl;
312 struct gpio_desc phy_reset_gpio;
313 struct clk clk_master_bus;
314 struct clk clk_rx;
315 struct clk clk_ptp_ref;
316 struct clk clk_tx;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200317 struct clk clk_ck;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600318 struct clk clk_slave_bus;
319 struct mii_dev *mii;
320 struct phy_device *phy;
Patrick Delaunay4f60a512020-03-18 10:50:16 +0100321 int phyaddr;
322 u32 max_speed;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600323 void *descs;
324 struct eqos_desc *tx_descs;
325 struct eqos_desc *rx_descs;
326 int tx_desc_idx, rx_desc_idx;
327 void *tx_dma_buf;
328 void *rx_dma_buf;
329 void *rx_pkt;
330 bool started;
331 bool reg_access_ok;
332};
333
334/*
335 * TX and RX descriptors are 16 bytes. This causes problems with the cache
336 * maintenance on CPUs where the cache-line size exceeds the size of these
337 * descriptors. What will happen is that when the driver receives a packet
338 * it will be immediately requeued for the hardware to reuse. The CPU will
339 * therefore need to flush the cache-line containing the descriptor, which
340 * will cause all other descriptors in the same cache-line to be flushed
341 * along with it. If one of those descriptors had been written to by the
342 * device those changes (and the associated packet) will be lost.
343 *
344 * To work around this, we make use of non-cached memory if available. If
345 * descriptors are mapped uncached there's no need to manually flush them
346 * or invalidate them.
347 *
348 * Note that this only applies to descriptors. The packet data buffers do
349 * not have the same constraints since they are 1536 bytes large, so they
350 * are unlikely to share cache-lines.
351 */
352static void *eqos_alloc_descs(unsigned int num)
353{
354#ifdef CONFIG_SYS_NONCACHED_MEMORY
355 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
356 EQOS_DESCRIPTOR_ALIGN);
357#else
358 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
359#endif
360}
361
362static void eqos_free_descs(void *descs)
363{
364#ifdef CONFIG_SYS_NONCACHED_MEMORY
365 /* FIXME: noncached_alloc() has no opposite */
366#else
367 free(descs);
368#endif
369}
370
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200371static void eqos_inval_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600372{
373#ifndef CONFIG_SYS_NONCACHED_MEMORY
374 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
375 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
376 ARCH_DMA_MINALIGN);
377
378 invalidate_dcache_range(start, end);
379#endif
380}
381
Fugang Duan3a97da12020-05-03 22:41:17 +0800382static void eqos_inval_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200383{
384#ifndef CONFIG_SYS_NONCACHED_MEMORY
385 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
386 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
387 ARCH_DMA_MINALIGN);
388
389 invalidate_dcache_range(start, end);
390#endif
391}
392
393static void eqos_flush_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600394{
395#ifndef CONFIG_SYS_NONCACHED_MEMORY
396 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
397#endif
398}
399
Fugang Duan3a97da12020-05-03 22:41:17 +0800400static void eqos_flush_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200401{
402#ifndef CONFIG_SYS_NONCACHED_MEMORY
403 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
404 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
405 ARCH_DMA_MINALIGN);
406
407 flush_dcache_range(start, end);
408#endif
409}
410
411static void eqos_inval_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600412{
413 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
414 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
415
416 invalidate_dcache_range(start, end);
417}
418
Fugang Duan3a97da12020-05-03 22:41:17 +0800419static void eqos_inval_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200420{
421 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
422 unsigned long end = roundup((unsigned long)buf + size,
423 ARCH_DMA_MINALIGN);
424
425 invalidate_dcache_range(start, end);
426}
427
428static void eqos_flush_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600429{
430 flush_cache((unsigned long)buf, size);
431}
432
Fugang Duan3a97da12020-05-03 22:41:17 +0800433static void eqos_flush_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200434{
435 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
436 unsigned long end = roundup((unsigned long)buf + size,
437 ARCH_DMA_MINALIGN);
438
439 flush_dcache_range(start, end);
440}
441
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600442static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
443{
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100444 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
445 EQOS_MAC_MDIO_ADDRESS_GB, false,
446 1000000, true);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600447}
448
449static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
450 int mdio_reg)
451{
452 struct eqos_priv *eqos = bus->priv;
453 u32 val;
454 int ret;
455
456 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
457 mdio_reg);
458
459 ret = eqos_mdio_wait_idle(eqos);
460 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900461 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600462 return ret;
463 }
464
465 val = readl(&eqos->mac_regs->mdio_address);
466 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
467 EQOS_MAC_MDIO_ADDRESS_C45E;
468 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
469 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200470 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600471 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
472 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
473 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
474 EQOS_MAC_MDIO_ADDRESS_GB;
475 writel(val, &eqos->mac_regs->mdio_address);
476
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200477 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600478
479 ret = eqos_mdio_wait_idle(eqos);
480 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900481 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600482 return ret;
483 }
484
485 val = readl(&eqos->mac_regs->mdio_data);
486 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
487
488 debug("%s: val=%x\n", __func__, val);
489
490 return val;
491}
492
493static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
494 int mdio_reg, u16 mdio_val)
495{
496 struct eqos_priv *eqos = bus->priv;
497 u32 val;
498 int ret;
499
500 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
501 mdio_addr, mdio_reg, mdio_val);
502
503 ret = eqos_mdio_wait_idle(eqos);
504 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900505 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600506 return ret;
507 }
508
509 writel(mdio_val, &eqos->mac_regs->mdio_data);
510
511 val = readl(&eqos->mac_regs->mdio_address);
512 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
513 EQOS_MAC_MDIO_ADDRESS_C45E;
514 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
515 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200516 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600517 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
518 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
519 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
520 EQOS_MAC_MDIO_ADDRESS_GB;
521 writel(val, &eqos->mac_regs->mdio_address);
522
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200523 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600524
525 ret = eqos_mdio_wait_idle(eqos);
526 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900527 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600528 return ret;
529 }
530
531 return 0;
532}
533
534static int eqos_start_clks_tegra186(struct udevice *dev)
535{
Fugang Duan3a97da12020-05-03 22:41:17 +0800536#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600537 struct eqos_priv *eqos = dev_get_priv(dev);
538 int ret;
539
540 debug("%s(dev=%p):\n", __func__, dev);
541
542 ret = clk_enable(&eqos->clk_slave_bus);
543 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900544 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600545 goto err;
546 }
547
548 ret = clk_enable(&eqos->clk_master_bus);
549 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900550 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600551 goto err_disable_clk_slave_bus;
552 }
553
554 ret = clk_enable(&eqos->clk_rx);
555 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900556 pr_err("clk_enable(clk_rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600557 goto err_disable_clk_master_bus;
558 }
559
560 ret = clk_enable(&eqos->clk_ptp_ref);
561 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900562 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600563 goto err_disable_clk_rx;
564 }
565
566 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
567 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900568 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600569 goto err_disable_clk_ptp_ref;
570 }
571
572 ret = clk_enable(&eqos->clk_tx);
573 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900574 pr_err("clk_enable(clk_tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600575 goto err_disable_clk_ptp_ref;
576 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800577#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600578
579 debug("%s: OK\n", __func__);
580 return 0;
581
Fugang Duan3a97da12020-05-03 22:41:17 +0800582#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600583err_disable_clk_ptp_ref:
584 clk_disable(&eqos->clk_ptp_ref);
585err_disable_clk_rx:
586 clk_disable(&eqos->clk_rx);
587err_disable_clk_master_bus:
588 clk_disable(&eqos->clk_master_bus);
589err_disable_clk_slave_bus:
590 clk_disable(&eqos->clk_slave_bus);
591err:
592 debug("%s: FAILED: %d\n", __func__, ret);
593 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800594#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600595}
596
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200597static int eqos_start_clks_stm32(struct udevice *dev)
598{
Fugang Duan3a97da12020-05-03 22:41:17 +0800599#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200600 struct eqos_priv *eqos = dev_get_priv(dev);
601 int ret;
602
603 debug("%s(dev=%p):\n", __func__, dev);
604
605 ret = clk_enable(&eqos->clk_master_bus);
606 if (ret < 0) {
607 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
608 goto err;
609 }
610
611 ret = clk_enable(&eqos->clk_rx);
612 if (ret < 0) {
613 pr_err("clk_enable(clk_rx) failed: %d", ret);
614 goto err_disable_clk_master_bus;
615 }
616
617 ret = clk_enable(&eqos->clk_tx);
618 if (ret < 0) {
619 pr_err("clk_enable(clk_tx) failed: %d", ret);
620 goto err_disable_clk_rx;
621 }
622
623 if (clk_valid(&eqos->clk_ck)) {
624 ret = clk_enable(&eqos->clk_ck);
625 if (ret < 0) {
626 pr_err("clk_enable(clk_ck) failed: %d", ret);
627 goto err_disable_clk_tx;
628 }
629 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800630#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200631
632 debug("%s: OK\n", __func__);
633 return 0;
634
Fugang Duan3a97da12020-05-03 22:41:17 +0800635#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200636err_disable_clk_tx:
637 clk_disable(&eqos->clk_tx);
638err_disable_clk_rx:
639 clk_disable(&eqos->clk_rx);
640err_disable_clk_master_bus:
641 clk_disable(&eqos->clk_master_bus);
642err:
643 debug("%s: FAILED: %d\n", __func__, ret);
644 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800645#endif
646}
647
648static int eqos_start_clks_imx(struct udevice *dev)
649{
650 return 0;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200651}
652
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200653static void eqos_stop_clks_tegra186(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600654{
Fugang Duan3a97da12020-05-03 22:41:17 +0800655#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600656 struct eqos_priv *eqos = dev_get_priv(dev);
657
658 debug("%s(dev=%p):\n", __func__, dev);
659
660 clk_disable(&eqos->clk_tx);
661 clk_disable(&eqos->clk_ptp_ref);
662 clk_disable(&eqos->clk_rx);
663 clk_disable(&eqos->clk_master_bus);
664 clk_disable(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800665#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600666
667 debug("%s: OK\n", __func__);
668}
669
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200670static void eqos_stop_clks_stm32(struct udevice *dev)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200671{
Fugang Duan3a97da12020-05-03 22:41:17 +0800672#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200673 struct eqos_priv *eqos = dev_get_priv(dev);
674
675 debug("%s(dev=%p):\n", __func__, dev);
676
677 clk_disable(&eqos->clk_tx);
678 clk_disable(&eqos->clk_rx);
679 clk_disable(&eqos->clk_master_bus);
680 if (clk_valid(&eqos->clk_ck))
681 clk_disable(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +0800682#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200683
684 debug("%s: OK\n", __func__);
685}
686
Fugang Duan3a97da12020-05-03 22:41:17 +0800687static void eqos_stop_clks_imx(struct udevice *dev)
688{
689 /* empty */
690}
691
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600692static int eqos_start_resets_tegra186(struct udevice *dev)
693{
694 struct eqos_priv *eqos = dev_get_priv(dev);
695 int ret;
696
697 debug("%s(dev=%p):\n", __func__, dev);
698
699 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
700 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900701 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600702 return ret;
703 }
704
705 udelay(2);
706
707 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
708 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900709 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600710 return ret;
711 }
712
713 ret = reset_assert(&eqos->reset_ctl);
714 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900715 pr_err("reset_assert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600716 return ret;
717 }
718
719 udelay(2);
720
721 ret = reset_deassert(&eqos->reset_ctl);
722 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900723 pr_err("reset_deassert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600724 return ret;
725 }
726
727 debug("%s: OK\n", __func__);
728 return 0;
729}
730
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200731static int eqos_start_resets_stm32(struct udevice *dev)
732{
Christophe Roullier5177b312020-03-18 10:50:15 +0100733 struct eqos_priv *eqos = dev_get_priv(dev);
734 int ret;
735
736 debug("%s(dev=%p):\n", __func__, dev);
737 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
738 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
739 if (ret < 0) {
740 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
741 ret);
742 return ret;
743 }
744
745 udelay(2);
746
747 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
748 if (ret < 0) {
749 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
750 ret);
751 return ret;
752 }
753 }
754 debug("%s: OK\n", __func__);
755
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200756 return 0;
757}
758
Fugang Duan3a97da12020-05-03 22:41:17 +0800759static int eqos_start_resets_imx(struct udevice *dev)
760{
761 return 0;
762}
763
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600764static int eqos_stop_resets_tegra186(struct udevice *dev)
765{
766 struct eqos_priv *eqos = dev_get_priv(dev);
767
768 reset_assert(&eqos->reset_ctl);
769 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
770
771 return 0;
772}
773
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200774static int eqos_stop_resets_stm32(struct udevice *dev)
775{
Christophe Roullier5177b312020-03-18 10:50:15 +0100776 struct eqos_priv *eqos = dev_get_priv(dev);
777 int ret;
778
779 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
780 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
781 if (ret < 0) {
782 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
783 ret);
784 return ret;
785 }
786 }
787
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200788 return 0;
789}
790
Fugang Duan3a97da12020-05-03 22:41:17 +0800791static int eqos_stop_resets_imx(struct udevice *dev)
792{
793 return 0;
794}
795
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600796static int eqos_calibrate_pads_tegra186(struct udevice *dev)
797{
798 struct eqos_priv *eqos = dev_get_priv(dev);
799 int ret;
800
801 debug("%s(dev=%p):\n", __func__, dev);
802
803 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
804 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
805
806 udelay(1);
807
808 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
809 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
810
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100811 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
812 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600813 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900814 pr_err("calibrate didn't start");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600815 goto failed;
816 }
817
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100818 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
819 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600820 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900821 pr_err("calibrate didn't finish");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600822 goto failed;
823 }
824
825 ret = 0;
826
827failed:
828 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
829 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
830
831 debug("%s: returns %d\n", __func__, ret);
832
833 return ret;
834}
835
836static int eqos_disable_calibration_tegra186(struct udevice *dev)
837{
838 struct eqos_priv *eqos = dev_get_priv(dev);
839
840 debug("%s(dev=%p):\n", __func__, dev);
841
842 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
843 EQOS_AUTO_CAL_CONFIG_ENABLE);
844
845 return 0;
846}
847
848static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
849{
Fugang Duan3a97da12020-05-03 22:41:17 +0800850#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600851 struct eqos_priv *eqos = dev_get_priv(dev);
852
853 return clk_get_rate(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800854#else
855 return 0;
856#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600857}
858
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200859static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
860{
Fugang Duan3a97da12020-05-03 22:41:17 +0800861#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200862 struct eqos_priv *eqos = dev_get_priv(dev);
863
864 return clk_get_rate(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800865#else
866 return 0;
867#endif
868}
869
870static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
871{
872 /* TODO: retrieve from CSR clock */
873 return 100 * 1000000;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200874}
875
876static int eqos_calibrate_pads_stm32(struct udevice *dev)
877{
878 return 0;
879}
880
Fugang Duan3a97da12020-05-03 22:41:17 +0800881static int eqos_calibrate_pads_imx(struct udevice *dev)
882{
883 return 0;
884}
885
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200886static int eqos_disable_calibration_stm32(struct udevice *dev)
887{
888 return 0;
889}
890
Fugang Duan3a97da12020-05-03 22:41:17 +0800891static int eqos_disable_calibration_imx(struct udevice *dev)
892{
893 return 0;
894}
895
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600896static int eqos_set_full_duplex(struct udevice *dev)
897{
898 struct eqos_priv *eqos = dev_get_priv(dev);
899
900 debug("%s(dev=%p):\n", __func__, dev);
901
902 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
903
904 return 0;
905}
906
907static int eqos_set_half_duplex(struct udevice *dev)
908{
909 struct eqos_priv *eqos = dev_get_priv(dev);
910
911 debug("%s(dev=%p):\n", __func__, dev);
912
913 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
914
915 /* WAR: Flush TX queue when switching to half-duplex */
916 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
917 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
918
919 return 0;
920}
921
922static int eqos_set_gmii_speed(struct udevice *dev)
923{
924 struct eqos_priv *eqos = dev_get_priv(dev);
925
926 debug("%s(dev=%p):\n", __func__, dev);
927
928 clrbits_le32(&eqos->mac_regs->configuration,
929 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
930
931 return 0;
932}
933
934static int eqos_set_mii_speed_100(struct udevice *dev)
935{
936 struct eqos_priv *eqos = dev_get_priv(dev);
937
938 debug("%s(dev=%p):\n", __func__, dev);
939
940 setbits_le32(&eqos->mac_regs->configuration,
941 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
942
943 return 0;
944}
945
946static int eqos_set_mii_speed_10(struct udevice *dev)
947{
948 struct eqos_priv *eqos = dev_get_priv(dev);
949
950 debug("%s(dev=%p):\n", __func__, dev);
951
952 clrsetbits_le32(&eqos->mac_regs->configuration,
953 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
954
955 return 0;
956}
957
958static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
959{
Fugang Duan3a97da12020-05-03 22:41:17 +0800960#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600961 struct eqos_priv *eqos = dev_get_priv(dev);
962 ulong rate;
963 int ret;
964
965 debug("%s(dev=%p):\n", __func__, dev);
966
967 switch (eqos->phy->speed) {
968 case SPEED_1000:
969 rate = 125 * 1000 * 1000;
970 break;
971 case SPEED_100:
972 rate = 25 * 1000 * 1000;
973 break;
974 case SPEED_10:
975 rate = 2.5 * 1000 * 1000;
976 break;
977 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900978 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600979 return -EINVAL;
980 }
981
982 ret = clk_set_rate(&eqos->clk_tx, rate);
983 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900984 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600985 return ret;
986 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800987#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600988
989 return 0;
990}
991
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200992static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
993{
994 return 0;
995}
996
Fugang Duan3a97da12020-05-03 22:41:17 +0800997static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
998{
999 return 0;
1000}
1001
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001002static int eqos_adjust_link(struct udevice *dev)
1003{
1004 struct eqos_priv *eqos = dev_get_priv(dev);
1005 int ret;
1006 bool en_calibration;
1007
1008 debug("%s(dev=%p):\n", __func__, dev);
1009
1010 if (eqos->phy->duplex)
1011 ret = eqos_set_full_duplex(dev);
1012 else
1013 ret = eqos_set_half_duplex(dev);
1014 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001015 pr_err("eqos_set_*_duplex() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001016 return ret;
1017 }
1018
1019 switch (eqos->phy->speed) {
1020 case SPEED_1000:
1021 en_calibration = true;
1022 ret = eqos_set_gmii_speed(dev);
1023 break;
1024 case SPEED_100:
1025 en_calibration = true;
1026 ret = eqos_set_mii_speed_100(dev);
1027 break;
1028 case SPEED_10:
1029 en_calibration = false;
1030 ret = eqos_set_mii_speed_10(dev);
1031 break;
1032 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001033 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001034 return -EINVAL;
1035 }
1036 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001037 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001038 return ret;
1039 }
1040
1041 if (en_calibration) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001042 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001043 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001044 pr_err("eqos_calibrate_pads() failed: %d",
1045 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001046 return ret;
1047 }
1048 } else {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001049 ret = eqos->config->ops->eqos_disable_calibration(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001050 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001051 pr_err("eqos_disable_calibration() failed: %d",
1052 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001053 return ret;
1054 }
1055 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001056 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001057 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001058 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001059 return ret;
1060 }
1061
1062 return 0;
1063}
1064
1065static int eqos_write_hwaddr(struct udevice *dev)
1066{
1067 struct eth_pdata *plat = dev_get_platdata(dev);
1068 struct eqos_priv *eqos = dev_get_priv(dev);
1069 uint32_t val;
1070
1071 /*
1072 * This function may be called before start() or after stop(). At that
1073 * time, on at least some configurations of the EQoS HW, all clocks to
1074 * the EQoS HW block will be stopped, and a reset signal applied. If
1075 * any register access is attempted in this state, bus timeouts or CPU
1076 * hangs may occur. This check prevents that.
1077 *
1078 * A simple solution to this problem would be to not implement
1079 * write_hwaddr(), since start() always writes the MAC address into HW
1080 * anyway. However, it is desirable to implement write_hwaddr() to
1081 * support the case of SW that runs subsequent to U-Boot which expects
1082 * the MAC address to already be programmed into the EQoS registers,
1083 * which must happen irrespective of whether the U-Boot user (or
1084 * scripts) actually made use of the EQoS device, and hence
1085 * irrespective of whether start() was ever called.
1086 *
1087 * Note that this requirement by subsequent SW is not valid for
1088 * Tegra186, and is likely not valid for any non-PCI instantiation of
1089 * the EQoS HW block. This function is implemented solely as
1090 * future-proofing with the expectation the driver will eventually be
1091 * ported to some system where the expectation above is true.
1092 */
1093 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1094 return 0;
1095
1096 /* Update the MAC address */
1097 val = (plat->enetaddr[5] << 8) |
1098 (plat->enetaddr[4]);
1099 writel(val, &eqos->mac_regs->address0_high);
1100 val = (plat->enetaddr[3] << 24) |
1101 (plat->enetaddr[2] << 16) |
1102 (plat->enetaddr[1] << 8) |
1103 (plat->enetaddr[0]);
1104 writel(val, &eqos->mac_regs->address0_low);
1105
1106 return 0;
1107}
1108
1109static int eqos_start(struct udevice *dev)
1110{
1111 struct eqos_priv *eqos = dev_get_priv(dev);
1112 int ret, i;
1113 ulong rate;
1114 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1115 ulong last_rx_desc;
1116
1117 debug("%s(dev=%p):\n", __func__, dev);
1118
1119 eqos->tx_desc_idx = 0;
1120 eqos->rx_desc_idx = 0;
1121
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001122 ret = eqos->config->ops->eqos_start_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001123 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001124 pr_err("eqos_start_clks() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001125 goto err;
1126 }
1127
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001128 ret = eqos->config->ops->eqos_start_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001129 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001130 pr_err("eqos_start_resets() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001131 goto err_stop_clks;
1132 }
1133
1134 udelay(10);
1135
1136 eqos->reg_access_ok = true;
1137
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +01001138 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001139 EQOS_DMA_MODE_SWR, false,
1140 eqos->config->swr_wait, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001141 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001142 pr_err("EQOS_DMA_MODE_SWR stuck");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001143 goto err_stop_resets;
1144 }
1145
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001146 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001147 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001148 pr_err("eqos_calibrate_pads() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001149 goto err_stop_resets;
1150 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001151 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001152
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001153 val = (rate / 1000000) - 1;
1154 writel(val, &eqos->mac_regs->us_tic_counter);
1155
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001156 /*
1157 * if PHY was already connected and configured,
1158 * don't need to reconnect/reconfigure again
1159 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001160 if (!eqos->phy) {
Ye Li6a895d02020-05-03 22:41:15 +08001161 int addr = -1;
1162#ifdef CONFIG_DM_ETH_PHY
1163 addr = eth_phy_get_addr(dev);
1164#endif
1165#ifdef DWC_NET_PHYADDR
1166 addr = DWC_NET_PHYADDR;
1167#endif
1168 eqos->phy = phy_connect(eqos->mii, addr, dev,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001169 eqos->config->interface(dev));
1170 if (!eqos->phy) {
1171 pr_err("phy_connect() failed");
1172 goto err_stop_resets;
1173 }
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001174
1175 if (eqos->max_speed) {
1176 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1177 if (ret) {
1178 pr_err("phy_set_supported() failed: %d", ret);
1179 goto err_shutdown_phy;
1180 }
1181 }
1182
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001183 ret = phy_config(eqos->phy);
1184 if (ret < 0) {
1185 pr_err("phy_config() failed: %d", ret);
1186 goto err_shutdown_phy;
1187 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001188 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001189
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001190 ret = phy_startup(eqos->phy);
1191 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001192 pr_err("phy_startup() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001193 goto err_shutdown_phy;
1194 }
1195
1196 if (!eqos->phy->link) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001197 pr_err("No link");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001198 goto err_shutdown_phy;
1199 }
1200
1201 ret = eqos_adjust_link(dev);
1202 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001203 pr_err("eqos_adjust_link() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001204 goto err_shutdown_phy;
1205 }
1206
1207 /* Configure MTL */
Fugang Duan3a97da12020-05-03 22:41:17 +08001208 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001209
1210 /* Enable Store and Forward mode for TX */
1211 /* Program Tx operating mode */
1212 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1213 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1214 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1215 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1216
1217 /* Transmit Queue weight */
1218 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1219
1220 /* Enable Store and Forward mode for RX, since no jumbo frame */
1221 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
Fugang Duan3a97da12020-05-03 22:41:17 +08001222 EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1223 EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1224 EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001225
1226 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1227 val = readl(&eqos->mac_regs->hw_feature1);
1228 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1229 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1230 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1231 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1232
1233 /*
1234 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1235 * r/tqs is encoded as (n / 256) - 1.
1236 */
1237 tqs = (128 << tx_fifo_sz) / 256 - 1;
1238 rqs = (128 << rx_fifo_sz) / 256 - 1;
1239
1240 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1241 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1242 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1243 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1244 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1245 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1246 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1247 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1248
1249 /* Flow control used only if each channel gets 4KB or more FIFO */
1250 if (rqs >= ((4096 / 256) - 1)) {
1251 u32 rfd, rfa;
1252
1253 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1254 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1255
1256 /*
1257 * Set Threshold for Activating Flow Contol space for min 2
1258 * frames ie, (1500 * 1) = 1500 bytes.
1259 *
1260 * Set Threshold for Deactivating Flow Contol for space of
1261 * min 1 frame (frame size 1500bytes) in receive fifo
1262 */
1263 if (rqs == ((4096 / 256) - 1)) {
1264 /*
1265 * This violates the above formula because of FIFO size
1266 * limit therefore overflow may occur inspite of this.
1267 */
1268 rfd = 0x3; /* Full-3K */
1269 rfa = 0x1; /* Full-1.5K */
1270 } else if (rqs == ((8192 / 256) - 1)) {
1271 rfd = 0x6; /* Full-4K */
1272 rfa = 0xa; /* Full-6K */
1273 } else if (rqs == ((16384 / 256) - 1)) {
1274 rfd = 0x6; /* Full-4K */
1275 rfa = 0x12; /* Full-10K */
1276 } else {
1277 rfd = 0x6; /* Full-4K */
1278 rfa = 0x1E; /* Full-16K */
1279 }
1280
1281 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1282 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1283 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1284 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1285 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1286 (rfd <<
1287 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1288 (rfa <<
1289 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1290 }
1291
1292 /* Configure MAC */
1293
1294 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1295 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1296 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001297 eqos->config->config_mac <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001298 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1299
Fugang Duan3a97da12020-05-03 22:41:17 +08001300 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1301 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1302 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1303 0x2 <<
1304 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1305
1306 /* Multicast and Broadcast Queue Enable */
1307 setbits_le32(&eqos->mac_regs->unused_0a4,
1308 0x00100000);
1309 /* enable promise mode */
1310 setbits_le32(&eqos->mac_regs->unused_004[1],
1311 0x1);
1312
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001313 /* Set TX flow control parameters */
1314 /* Set Pause Time */
1315 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1316 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1317 /* Assign priority for TX flow control */
1318 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1319 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1320 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1321 /* Assign priority for RX flow control */
1322 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1323 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1324 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1325 /* Enable flow control */
1326 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1327 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1328 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1329 EQOS_MAC_RX_FLOW_CTRL_RFE);
1330
1331 clrsetbits_le32(&eqos->mac_regs->configuration,
1332 EQOS_MAC_CONFIGURATION_GPSLCE |
1333 EQOS_MAC_CONFIGURATION_WD |
1334 EQOS_MAC_CONFIGURATION_JD |
1335 EQOS_MAC_CONFIGURATION_JE,
1336 EQOS_MAC_CONFIGURATION_CST |
1337 EQOS_MAC_CONFIGURATION_ACS);
1338
1339 eqos_write_hwaddr(dev);
1340
1341 /* Configure DMA */
1342
1343 /* Enable OSP mode */
1344 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1345 EQOS_DMA_CH0_TX_CONTROL_OSP);
1346
1347 /* RX buffer size. Must be a multiple of bus width */
1348 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1349 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1350 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1351 EQOS_MAX_PACKET_SIZE <<
1352 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1353
1354 setbits_le32(&eqos->dma_regs->ch0_control,
1355 EQOS_DMA_CH0_CONTROL_PBLX8);
1356
1357 /*
1358 * Burst length must be < 1/2 FIFO size.
1359 * FIFO size in tqs is encoded as (n / 256) - 1.
1360 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1361 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1362 */
1363 pbl = tqs + 1;
1364 if (pbl > 32)
1365 pbl = 32;
1366 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1367 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1368 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1369 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1370
1371 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1372 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1373 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1374 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1375
1376 /* DMA performance configuration */
1377 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1378 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1379 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1380 writel(val, &eqos->dma_regs->sysbus_mode);
1381
1382 /* Set up descriptors */
1383
1384 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1385 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1386 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1387 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1388 (i * EQOS_MAX_PACKET_SIZE));
Marek Vasut4332d802020-03-23 02:02:57 +01001389 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Fugang Duan3a97da12020-05-03 22:41:17 +08001390 mb();
Marek Vasutdd90c2e2020-03-23 02:09:01 +01001391 eqos->config->ops->eqos_flush_desc(rx_desc);
Fugang Duan3a97da12020-05-03 22:41:17 +08001392 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1393 (i * EQOS_MAX_PACKET_SIZE),
1394 EQOS_MAX_PACKET_SIZE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001395 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001396
1397 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1398 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1399 writel(EQOS_DESCRIPTORS_TX - 1,
1400 &eqos->dma_regs->ch0_txdesc_ring_length);
1401
1402 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1403 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1404 writel(EQOS_DESCRIPTORS_RX - 1,
1405 &eqos->dma_regs->ch0_rxdesc_ring_length);
1406
1407 /* Enable everything */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001408 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1409 EQOS_DMA_CH0_TX_CONTROL_ST);
1410 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1411 EQOS_DMA_CH0_RX_CONTROL_SR);
Fugang Duan3a97da12020-05-03 22:41:17 +08001412 setbits_le32(&eqos->mac_regs->configuration,
1413 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001414
1415 /* TX tail pointer not written until we need to TX a packet */
1416 /*
1417 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1418 * first descriptor, implying all descriptors were available. However,
1419 * that's not distinguishable from none of the descriptors being
1420 * available.
1421 */
1422 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1423 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1424
1425 eqos->started = true;
1426
1427 debug("%s: OK\n", __func__);
1428 return 0;
1429
1430err_shutdown_phy:
1431 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001432err_stop_resets:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001433 eqos->config->ops->eqos_stop_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001434err_stop_clks:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001435 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001436err:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001437 pr_err("FAILED: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001438 return ret;
1439}
1440
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001441static void eqos_stop(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001442{
1443 struct eqos_priv *eqos = dev_get_priv(dev);
1444 int i;
1445
1446 debug("%s(dev=%p):\n", __func__, dev);
1447
1448 if (!eqos->started)
1449 return;
1450 eqos->started = false;
1451 eqos->reg_access_ok = false;
1452
1453 /* Disable TX DMA */
1454 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1455 EQOS_DMA_CH0_TX_CONTROL_ST);
1456
1457 /* Wait for TX all packets to drain out of MTL */
1458 for (i = 0; i < 1000000; i++) {
1459 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1460 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1461 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1462 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1463 if ((trcsts != 1) && (!txqsts))
1464 break;
1465 }
1466
1467 /* Turn off MAC TX and RX */
1468 clrbits_le32(&eqos->mac_regs->configuration,
1469 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1470
1471 /* Wait for all RX packets to drain out of MTL */
1472 for (i = 0; i < 1000000; i++) {
1473 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1474 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1475 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1476 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1477 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1478 if ((!prxq) && (!rxqsts))
1479 break;
1480 }
1481
1482 /* Turn off RX DMA */
1483 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1484 EQOS_DMA_CH0_RX_CONTROL_SR);
1485
1486 if (eqos->phy) {
1487 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001488 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001489 eqos->config->ops->eqos_stop_resets(dev);
1490 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001491
1492 debug("%s: OK\n", __func__);
1493}
1494
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001495static int eqos_send(struct udevice *dev, void *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001496{
1497 struct eqos_priv *eqos = dev_get_priv(dev);
1498 struct eqos_desc *tx_desc;
1499 int i;
1500
1501 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1502 length);
1503
1504 memcpy(eqos->tx_dma_buf, packet, length);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001505 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001506
1507 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1508 eqos->tx_desc_idx++;
1509 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1510
1511 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1512 tx_desc->des1 = 0;
1513 tx_desc->des2 = length;
1514 /*
1515 * Make sure that if HW sees the _OWN write below, it will see all the
1516 * writes to the rest of the descriptor too.
1517 */
1518 mb();
1519 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001520 eqos->config->ops->eqos_flush_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001521
Marek Vasut83858d82020-03-23 02:03:50 +01001522 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1523 &eqos->dma_regs->ch0_txdesc_tail_pointer);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001524
1525 for (i = 0; i < 1000000; i++) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001526 eqos->config->ops->eqos_inval_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001527 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1528 return 0;
1529 udelay(1);
1530 }
1531
1532 debug("%s: TX timeout\n", __func__);
1533
1534 return -ETIMEDOUT;
1535}
1536
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001537static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001538{
1539 struct eqos_priv *eqos = dev_get_priv(dev);
1540 struct eqos_desc *rx_desc;
1541 int length;
1542
1543 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1544
1545 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasut738ee272020-03-23 02:09:21 +01001546 eqos->config->ops->eqos_inval_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001547 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1548 debug("%s: RX packet not available\n", __func__);
1549 return -EAGAIN;
1550 }
1551
1552 *packetp = eqos->rx_dma_buf +
1553 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1554 length = rx_desc->des3 & 0x7fff;
1555 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1556
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001557 eqos->config->ops->eqos_inval_buffer(*packetp, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001558
1559 return length;
1560}
1561
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001562static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001563{
1564 struct eqos_priv *eqos = dev_get_priv(dev);
1565 uchar *packet_expected;
1566 struct eqos_desc *rx_desc;
1567
1568 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1569
1570 packet_expected = eqos->rx_dma_buf +
1571 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1572 if (packet != packet_expected) {
1573 debug("%s: Unexpected packet (expected %p)\n", __func__,
1574 packet_expected);
1575 return -EINVAL;
1576 }
1577
Fugang Duan3a97da12020-05-03 22:41:17 +08001578 eqos->config->ops->eqos_inval_buffer(packet, length);
1579
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001580 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001581
Marek Vasut24891dd2020-03-23 02:11:46 +01001582 rx_desc->des0 = 0;
1583 mb();
1584 eqos->config->ops->eqos_flush_desc(rx_desc);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001585 eqos->config->ops->eqos_inval_buffer(packet, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001586 rx_desc->des0 = (u32)(ulong)packet;
1587 rx_desc->des1 = 0;
1588 rx_desc->des2 = 0;
1589 /*
1590 * Make sure that if HW sees the _OWN write below, it will see all the
1591 * writes to the rest of the descriptor too.
1592 */
1593 mb();
Marek Vasut4332d802020-03-23 02:02:57 +01001594 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001595 eqos->config->ops->eqos_flush_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001596
1597 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1598
1599 eqos->rx_desc_idx++;
1600 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1601
1602 return 0;
1603}
1604
1605static int eqos_probe_resources_core(struct udevice *dev)
1606{
1607 struct eqos_priv *eqos = dev_get_priv(dev);
1608 int ret;
1609
1610 debug("%s(dev=%p):\n", __func__, dev);
1611
1612 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1613 EQOS_DESCRIPTORS_RX);
1614 if (!eqos->descs) {
1615 debug("%s: eqos_alloc_descs() failed\n", __func__);
1616 ret = -ENOMEM;
1617 goto err;
1618 }
1619 eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1620 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1621 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1622 eqos->rx_descs);
1623
1624 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1625 if (!eqos->tx_dma_buf) {
1626 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1627 ret = -ENOMEM;
1628 goto err_free_descs;
1629 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001630 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001631
1632 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1633 if (!eqos->rx_dma_buf) {
1634 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1635 ret = -ENOMEM;
1636 goto err_free_tx_dma_buf;
1637 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001638 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001639
1640 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1641 if (!eqos->rx_pkt) {
1642 debug("%s: malloc(rx_pkt) failed\n", __func__);
1643 ret = -ENOMEM;
1644 goto err_free_rx_dma_buf;
1645 }
1646 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1647
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001648 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1649 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1650
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001651 debug("%s: OK\n", __func__);
1652 return 0;
1653
1654err_free_rx_dma_buf:
1655 free(eqos->rx_dma_buf);
1656err_free_tx_dma_buf:
1657 free(eqos->tx_dma_buf);
1658err_free_descs:
1659 eqos_free_descs(eqos->descs);
1660err:
1661
1662 debug("%s: returns %d\n", __func__, ret);
1663 return ret;
1664}
1665
1666static int eqos_remove_resources_core(struct udevice *dev)
1667{
1668 struct eqos_priv *eqos = dev_get_priv(dev);
1669
1670 debug("%s(dev=%p):\n", __func__, dev);
1671
1672 free(eqos->rx_pkt);
1673 free(eqos->rx_dma_buf);
1674 free(eqos->tx_dma_buf);
1675 eqos_free_descs(eqos->descs);
1676
1677 debug("%s: OK\n", __func__);
1678 return 0;
1679}
1680
1681static int eqos_probe_resources_tegra186(struct udevice *dev)
1682{
1683 struct eqos_priv *eqos = dev_get_priv(dev);
1684 int ret;
1685
1686 debug("%s(dev=%p):\n", __func__, dev);
1687
1688 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1689 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001690 pr_err("reset_get_by_name(rst) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001691 return ret;
1692 }
1693
1694 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1695 &eqos->phy_reset_gpio,
1696 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1697 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001698 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001699 goto err_free_reset_eqos;
1700 }
1701
1702 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1703 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001704 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001705 goto err_free_gpio_phy_reset;
1706 }
1707
1708 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1709 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001710 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001711 goto err_free_clk_slave_bus;
1712 }
1713
1714 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1715 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001716 pr_err("clk_get_by_name(rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001717 goto err_free_clk_master_bus;
1718 }
1719
1720 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1721 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001722 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001723 goto err_free_clk_rx;
1724 return ret;
1725 }
1726
1727 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1728 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001729 pr_err("clk_get_by_name(tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001730 goto err_free_clk_ptp_ref;
1731 }
1732
1733 debug("%s: OK\n", __func__);
1734 return 0;
1735
1736err_free_clk_ptp_ref:
1737 clk_free(&eqos->clk_ptp_ref);
1738err_free_clk_rx:
1739 clk_free(&eqos->clk_rx);
1740err_free_clk_master_bus:
1741 clk_free(&eqos->clk_master_bus);
1742err_free_clk_slave_bus:
1743 clk_free(&eqos->clk_slave_bus);
1744err_free_gpio_phy_reset:
1745 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1746err_free_reset_eqos:
1747 reset_free(&eqos->reset_ctl);
1748
1749 debug("%s: returns %d\n", __func__, ret);
1750 return ret;
1751}
1752
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001753/* board-specific Ethernet Interface initializations. */
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001754__weak int board_interface_eth_init(struct udevice *dev,
1755 phy_interface_t interface_type)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001756{
1757 return 0;
1758}
1759
1760static int eqos_probe_resources_stm32(struct udevice *dev)
1761{
1762 struct eqos_priv *eqos = dev_get_priv(dev);
1763 int ret;
1764 phy_interface_t interface;
Christophe Roullier5177b312020-03-18 10:50:15 +01001765 struct ofnode_phandle_args phandle_args;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001766
1767 debug("%s(dev=%p):\n", __func__, dev);
1768
1769 interface = eqos->config->interface(dev);
1770
1771 if (interface == PHY_INTERFACE_MODE_NONE) {
1772 pr_err("Invalid PHY interface\n");
1773 return -EINVAL;
1774 }
1775
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001776 ret = board_interface_eth_init(dev, interface);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001777 if (ret)
1778 return -EINVAL;
1779
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001780 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1781
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001782 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1783 if (ret) {
1784 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1785 goto err_probe;
1786 }
1787
1788 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1789 if (ret) {
1790 pr_err("clk_get_by_name(rx) failed: %d", ret);
1791 goto err_free_clk_master_bus;
1792 }
1793
1794 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1795 if (ret) {
1796 pr_err("clk_get_by_name(tx) failed: %d", ret);
1797 goto err_free_clk_rx;
1798 }
1799
1800 /* Get ETH_CLK clocks (optional) */
1801 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1802 if (ret)
1803 pr_warn("No phy clock provided %d", ret);
1804
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001805 eqos->phyaddr = -1;
Christophe Roullier5177b312020-03-18 10:50:15 +01001806 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1807 &phandle_args);
1808 if (!ret) {
1809 /* search "reset-gpios" in phy node */
1810 ret = gpio_request_by_name_nodev(phandle_args.node,
1811 "reset-gpios", 0,
1812 &eqos->phy_reset_gpio,
1813 GPIOD_IS_OUT |
1814 GPIOD_IS_OUT_ACTIVE);
1815 if (ret)
1816 pr_warn("gpio_request_by_name(phy reset) not provided %d",
1817 ret);
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001818
1819 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1820 "reg", -1);
Christophe Roullier5177b312020-03-18 10:50:15 +01001821 }
1822
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001823 debug("%s: OK\n", __func__);
1824 return 0;
1825
1826err_free_clk_rx:
1827 clk_free(&eqos->clk_rx);
1828err_free_clk_master_bus:
1829 clk_free(&eqos->clk_master_bus);
1830err_probe:
1831
1832 debug("%s: returns %d\n", __func__, ret);
1833 return ret;
1834}
1835
1836static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1837{
1838 const char *phy_mode;
1839 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1840
1841 debug("%s(dev=%p):\n", __func__, dev);
1842
1843 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1844 NULL);
1845 if (phy_mode)
1846 interface = phy_get_interface_by_name(phy_mode);
1847
1848 return interface;
1849}
1850
1851static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1852{
1853 return PHY_INTERFACE_MODE_MII;
1854}
1855
Fugang Duan3a97da12020-05-03 22:41:17 +08001856static int eqos_probe_resources_imx(struct udevice *dev)
1857{
1858 struct eqos_priv *eqos = dev_get_priv(dev);
1859 phy_interface_t interface;
1860
1861 debug("%s(dev=%p):\n", __func__, dev);
1862
1863 interface = eqos->config->interface(dev);
1864
1865 if (interface == PHY_INTERFACE_MODE_NONE) {
1866 pr_err("Invalid PHY interface\n");
1867 return -EINVAL;
1868 }
1869
1870 debug("%s: OK\n", __func__);
1871 return 0;
1872}
1873
1874static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1875{
1876 return PHY_INTERFACE_MODE_RGMII;
1877}
1878
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001879static int eqos_remove_resources_tegra186(struct udevice *dev)
1880{
1881 struct eqos_priv *eqos = dev_get_priv(dev);
1882
1883 debug("%s(dev=%p):\n", __func__, dev);
1884
Fugang Duan3a97da12020-05-03 22:41:17 +08001885#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001886 clk_free(&eqos->clk_tx);
1887 clk_free(&eqos->clk_ptp_ref);
1888 clk_free(&eqos->clk_rx);
1889 clk_free(&eqos->clk_slave_bus);
1890 clk_free(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +08001891#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001892 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1893 reset_free(&eqos->reset_ctl);
1894
1895 debug("%s: OK\n", __func__);
1896 return 0;
1897}
1898
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001899static int eqos_remove_resources_stm32(struct udevice *dev)
1900{
Fugang Duan3a97da12020-05-03 22:41:17 +08001901#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001902 struct eqos_priv *eqos = dev_get_priv(dev);
1903
1904 debug("%s(dev=%p):\n", __func__, dev);
1905
1906 clk_free(&eqos->clk_tx);
1907 clk_free(&eqos->clk_rx);
1908 clk_free(&eqos->clk_master_bus);
1909 if (clk_valid(&eqos->clk_ck))
1910 clk_free(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +08001911#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001912
Christophe Roullier5177b312020-03-18 10:50:15 +01001913 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1914 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1915
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001916 debug("%s: OK\n", __func__);
1917 return 0;
1918}
1919
Fugang Duan3a97da12020-05-03 22:41:17 +08001920static int eqos_remove_resources_imx(struct udevice *dev)
1921{
1922 return 0;
1923}
1924
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001925static int eqos_probe(struct udevice *dev)
1926{
1927 struct eqos_priv *eqos = dev_get_priv(dev);
1928 int ret;
1929
1930 debug("%s(dev=%p):\n", __func__, dev);
1931
1932 eqos->dev = dev;
1933 eqos->config = (void *)dev_get_driver_data(dev);
1934
Simon Glassa821c4a2017-05-17 17:18:05 -06001935 eqos->regs = devfdt_get_addr(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001936 if (eqos->regs == FDT_ADDR_T_NONE) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001937 pr_err("devfdt_get_addr() failed");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001938 return -ENODEV;
1939 }
1940 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1941 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1942 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1943 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1944
1945 ret = eqos_probe_resources_core(dev);
1946 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001947 pr_err("eqos_probe_resources_core() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001948 return ret;
1949 }
1950
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001951 ret = eqos->config->ops->eqos_probe_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001952 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001953 pr_err("eqos_probe_resources() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001954 goto err_remove_resources_core;
1955 }
1956
Ye Li6a895d02020-05-03 22:41:15 +08001957#ifdef CONFIG_DM_ETH_PHY
1958 eqos->mii = eth_phy_get_mdio_bus(dev);
1959#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001960 if (!eqos->mii) {
Ye Li6a895d02020-05-03 22:41:15 +08001961 eqos->mii = mdio_alloc();
1962 if (!eqos->mii) {
1963 pr_err("mdio_alloc() failed");
1964 ret = -ENOMEM;
1965 goto err_remove_resources_tegra;
1966 }
1967 eqos->mii->read = eqos_mdio_read;
1968 eqos->mii->write = eqos_mdio_write;
1969 eqos->mii->priv = eqos;
1970 strcpy(eqos->mii->name, dev->name);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001971
Ye Li6a895d02020-05-03 22:41:15 +08001972 ret = mdio_register(eqos->mii);
1973 if (ret < 0) {
1974 pr_err("mdio_register() failed: %d", ret);
1975 goto err_free_mdio;
1976 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001977 }
1978
Ye Li6a895d02020-05-03 22:41:15 +08001979#ifdef CONFIG_DM_ETH_PHY
1980 eth_phy_set_mdio_bus(dev, eqos->mii);
1981#endif
1982
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001983 debug("%s: OK\n", __func__);
1984 return 0;
1985
1986err_free_mdio:
1987 mdio_free(eqos->mii);
1988err_remove_resources_tegra:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001989 eqos->config->ops->eqos_remove_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001990err_remove_resources_core:
1991 eqos_remove_resources_core(dev);
1992
1993 debug("%s: returns %d\n", __func__, ret);
1994 return ret;
1995}
1996
1997static int eqos_remove(struct udevice *dev)
1998{
1999 struct eqos_priv *eqos = dev_get_priv(dev);
2000
2001 debug("%s(dev=%p):\n", __func__, dev);
2002
2003 mdio_unregister(eqos->mii);
2004 mdio_free(eqos->mii);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002005 eqos->config->ops->eqos_remove_resources(dev);
2006
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002007 eqos_probe_resources_core(dev);
2008
2009 debug("%s: OK\n", __func__);
2010 return 0;
2011}
2012
2013static const struct eth_ops eqos_ops = {
2014 .start = eqos_start,
2015 .stop = eqos_stop,
2016 .send = eqos_send,
2017 .recv = eqos_recv,
2018 .free_pkt = eqos_free_pkt,
2019 .write_hwaddr = eqos_write_hwaddr,
2020};
2021
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002022static struct eqos_ops eqos_tegra186_ops = {
2023 .eqos_inval_desc = eqos_inval_desc_tegra186,
2024 .eqos_flush_desc = eqos_flush_desc_tegra186,
2025 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
2026 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
2027 .eqos_probe_resources = eqos_probe_resources_tegra186,
2028 .eqos_remove_resources = eqos_remove_resources_tegra186,
2029 .eqos_stop_resets = eqos_stop_resets_tegra186,
2030 .eqos_start_resets = eqos_start_resets_tegra186,
2031 .eqos_stop_clks = eqos_stop_clks_tegra186,
2032 .eqos_start_clks = eqos_start_clks_tegra186,
2033 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2034 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
2035 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2036 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
2037};
2038
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002039static const struct eqos_config eqos_tegra186_config = {
2040 .reg_access_always_ok = false,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002041 .mdio_wait = 10,
2042 .swr_wait = 10,
2043 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2044 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2045 .interface = eqos_get_interface_tegra186,
2046 .ops = &eqos_tegra186_ops
2047};
2048
2049static struct eqos_ops eqos_stm32_ops = {
Fugang Duan3a97da12020-05-03 22:41:17 +08002050 .eqos_inval_desc = eqos_inval_desc_generic,
2051 .eqos_flush_desc = eqos_flush_desc_generic,
2052 .eqos_inval_buffer = eqos_inval_buffer_generic,
2053 .eqos_flush_buffer = eqos_flush_buffer_generic,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002054 .eqos_probe_resources = eqos_probe_resources_stm32,
2055 .eqos_remove_resources = eqos_remove_resources_stm32,
2056 .eqos_stop_resets = eqos_stop_resets_stm32,
2057 .eqos_start_resets = eqos_start_resets_stm32,
2058 .eqos_stop_clks = eqos_stop_clks_stm32,
2059 .eqos_start_clks = eqos_start_clks_stm32,
2060 .eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2061 .eqos_disable_calibration = eqos_disable_calibration_stm32,
2062 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2063 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
2064};
2065
2066static const struct eqos_config eqos_stm32_config = {
2067 .reg_access_always_ok = false,
2068 .mdio_wait = 10000,
2069 .swr_wait = 50,
2070 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2071 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2072 .interface = eqos_get_interface_stm32,
2073 .ops = &eqos_stm32_ops
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002074};
2075
Fugang Duan3a97da12020-05-03 22:41:17 +08002076static struct eqos_ops eqos_imx_ops = {
2077 .eqos_inval_desc = eqos_inval_desc_generic,
2078 .eqos_flush_desc = eqos_flush_desc_generic,
2079 .eqos_inval_buffer = eqos_inval_buffer_generic,
2080 .eqos_flush_buffer = eqos_flush_buffer_generic,
2081 .eqos_probe_resources = eqos_probe_resources_imx,
2082 .eqos_remove_resources = eqos_remove_resources_imx,
2083 .eqos_stop_resets = eqos_stop_resets_imx,
2084 .eqos_start_resets = eqos_start_resets_imx,
2085 .eqos_stop_clks = eqos_stop_clks_imx,
2086 .eqos_start_clks = eqos_start_clks_imx,
2087 .eqos_calibrate_pads = eqos_calibrate_pads_imx,
2088 .eqos_disable_calibration = eqos_disable_calibration_imx,
2089 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2090 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2091};
2092
2093struct eqos_config eqos_imx_config = {
2094 .reg_access_always_ok = false,
2095 .mdio_wait = 10000,
2096 .swr_wait = 50,
2097 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2098 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2099 .interface = eqos_get_interface_imx,
2100 .ops = &eqos_imx_ops
2101};
2102
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002103static const struct udevice_id eqos_ids[] = {
2104 {
2105 .compatible = "nvidia,tegra186-eqos",
2106 .data = (ulong)&eqos_tegra186_config
2107 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002108 {
2109 .compatible = "snps,dwmac-4.20a",
2110 .data = (ulong)&eqos_stm32_config
2111 },
Fugang Duan3a97da12020-05-03 22:41:17 +08002112 {
2113 .compatible = "fsl,imx-eqos",
2114 .data = (ulong)&eqos_imx_config
2115 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002116
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002117 { }
2118};
2119
2120U_BOOT_DRIVER(eth_eqos) = {
2121 .name = "eth_eqos",
2122 .id = UCLASS_ETH,
Fugang Duan3a97da12020-05-03 22:41:17 +08002123 .of_match = of_match_ptr(eqos_ids),
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002124 .probe = eqos_probe,
2125 .remove = eqos_remove,
2126 .ops = &eqos_ops,
2127 .priv_auto_alloc_size = sizeof(struct eqos_priv),
2128 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
2129};