blob: e4fc76cc06848d2ad82732499c8f73b3202fb838 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
Stephen Warrenba4dfef2016-10-21 14:46:47 -06005 * Portions based on U-Boot's rtl8169.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -060029#include <common.h>
30#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070031#include <cpu_func.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060032#include <dm.h>
33#include <errno.h>
Simon Glass336d4612020-02-03 07:36:16 -070034#include <malloc.h>
Stephen Warrenba4dfef2016-10-21 14:46:47 -060035#include <memalign.h>
36#include <miiphy.h>
37#include <net.h>
38#include <netdev.h>
39#include <phy.h>
40#include <reset.h>
41#include <wait_bit.h>
42#include <asm/gpio.h>
43#include <asm/io.h>
Ye Li6a895d02020-05-03 22:41:15 +080044#include <eth_phy.h>
Fugang Duan0e9d2392020-05-03 22:41:18 +080045#ifdef CONFIG_ARCH_IMX8M
46#include <asm/arch/clock.h>
47#include <asm/mach-imx/sys_proto.h>
48#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -060049
50/* Core registers */
51
52#define EQOS_MAC_REGS_BASE 0x000
53struct eqos_mac_regs {
54 uint32_t configuration; /* 0x000 */
55 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
56 uint32_t q0_tx_flow_ctrl; /* 0x070 */
57 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
58 uint32_t rx_flow_ctrl; /* 0x090 */
59 uint32_t unused_094; /* 0x094 */
60 uint32_t txq_prty_map0; /* 0x098 */
61 uint32_t unused_09c; /* 0x09c */
62 uint32_t rxq_ctrl0; /* 0x0a0 */
63 uint32_t unused_0a4; /* 0x0a4 */
64 uint32_t rxq_ctrl2; /* 0x0a8 */
65 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
66 uint32_t us_tic_counter; /* 0x0dc */
67 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
68 uint32_t hw_feature0; /* 0x11c */
69 uint32_t hw_feature1; /* 0x120 */
70 uint32_t hw_feature2; /* 0x124 */
71 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
72 uint32_t mdio_address; /* 0x200 */
73 uint32_t mdio_data; /* 0x204 */
74 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
75 uint32_t address0_high; /* 0x300 */
76 uint32_t address0_low; /* 0x304 */
77};
78
79#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
80#define EQOS_MAC_CONFIGURATION_CST BIT(21)
81#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
82#define EQOS_MAC_CONFIGURATION_WD BIT(19)
83#define EQOS_MAC_CONFIGURATION_JD BIT(17)
84#define EQOS_MAC_CONFIGURATION_JE BIT(16)
85#define EQOS_MAC_CONFIGURATION_PS BIT(15)
86#define EQOS_MAC_CONFIGURATION_FES BIT(14)
87#define EQOS_MAC_CONFIGURATION_DM BIT(13)
Fugang Duan3a97da12020-05-03 22:41:17 +080088#define EQOS_MAC_CONFIGURATION_LM BIT(12)
Stephen Warrenba4dfef2016-10-21 14:46:47 -060089#define EQOS_MAC_CONFIGURATION_TE BIT(1)
90#define EQOS_MAC_CONFIGURATION_RE BIT(0)
91
92#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
93#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
94#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
95
96#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
97
98#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
99#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
100
101#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
102#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
103#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
104#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200105#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600106
107#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
108#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
109
Fugang Duan3a97da12020-05-03 22:41:17 +0800110#define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
111#define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
112#define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
113#define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
114
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600115#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
116#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
117#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
118#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
119
Fugang Duan3a97da12020-05-03 22:41:17 +0800120#define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
121#define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
122
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600123#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
124#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
125#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
126#define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200127#define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600128#define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
129#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
130#define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
131#define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
132#define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
133#define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
134
135#define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
136
137#define EQOS_MTL_REGS_BASE 0xd00
138struct eqos_mtl_regs {
139 uint32_t txq0_operation_mode; /* 0xd00 */
140 uint32_t unused_d04; /* 0xd04 */
141 uint32_t txq0_debug; /* 0xd08 */
142 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
143 uint32_t txq0_quantum_weight; /* 0xd18 */
144 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
145 uint32_t rxq0_operation_mode; /* 0xd30 */
146 uint32_t unused_d34; /* 0xd34 */
147 uint32_t rxq0_debug; /* 0xd38 */
148};
149
150#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
151#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
152#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
153#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
154#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
155#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
156#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
157
158#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
159#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
160#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
161
162#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
163#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
164#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
165#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
166#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
167#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
168#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
169#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
Fugang Duan3a97da12020-05-03 22:41:17 +0800170#define EQOS_MTL_RXQ0_OPERATION_MODE_FEP BIT(4)
171#define EQOS_MTL_RXQ0_OPERATION_MODE_FUP BIT(3)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600172
173#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
174#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
175#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
176#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
177
178#define EQOS_DMA_REGS_BASE 0x1000
179struct eqos_dma_regs {
180 uint32_t mode; /* 0x1000 */
181 uint32_t sysbus_mode; /* 0x1004 */
182 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
183 uint32_t ch0_control; /* 0x1100 */
184 uint32_t ch0_tx_control; /* 0x1104 */
185 uint32_t ch0_rx_control; /* 0x1108 */
186 uint32_t unused_110c; /* 0x110c */
187 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
188 uint32_t ch0_txdesc_list_address; /* 0x1114 */
189 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
190 uint32_t ch0_rxdesc_list_address; /* 0x111c */
191 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
192 uint32_t unused_1124; /* 0x1124 */
193 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
194 uint32_t ch0_txdesc_ring_length; /* 0x112c */
195 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
196};
197
198#define EQOS_DMA_MODE_SWR BIT(0)
199
200#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
201#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
202#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
203#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
204#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
205#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
206
207#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
208
209#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
210#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
211#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
212#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
213
214#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
215#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
216#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
217#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
218#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
219
220/* These registers are Tegra186-specific */
221#define EQOS_TEGRA186_REGS_BASE 0x8800
222struct eqos_tegra186_regs {
223 uint32_t sdmemcomppadctrl; /* 0x8800 */
224 uint32_t auto_cal_config; /* 0x8804 */
225 uint32_t unused_8808; /* 0x8808 */
226 uint32_t auto_cal_status; /* 0x880c */
227};
228
229#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
230
231#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
232#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
233
234#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
235
236/* Descriptors */
237
238#define EQOS_DESCRIPTOR_WORDS 4
239#define EQOS_DESCRIPTOR_SIZE (EQOS_DESCRIPTOR_WORDS * 4)
240/* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
241#define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN
242#define EQOS_DESCRIPTORS_TX 4
243#define EQOS_DESCRIPTORS_RX 4
244#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
245#define EQOS_DESCRIPTORS_SIZE ALIGN(EQOS_DESCRIPTORS_NUM * \
246 EQOS_DESCRIPTOR_SIZE, ARCH_DMA_MINALIGN)
247#define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
248#define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
249#define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
250
251/*
252 * Warn if the cache-line size is larger than the descriptor size. In such
253 * cases the driver will likely fail because the CPU needs to flush the cache
254 * when requeuing RX buffers, therefore descriptors written by the hardware
255 * may be discarded. Architectures with full IO coherence, such as x86, do not
256 * experience this issue, and hence are excluded from this condition.
257 *
258 * This can be fixed by defining CONFIG_SYS_NONCACHED_MEMORY which will cause
259 * the driver to allocate descriptors from a pool of non-cached memory.
260 */
261#if EQOS_DESCRIPTOR_SIZE < ARCH_DMA_MINALIGN
262#if !defined(CONFIG_SYS_NONCACHED_MEMORY) && \
Trevor Woerner10015022019-05-03 09:41:00 -0400263 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) && !defined(CONFIG_X86)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600264#warning Cache line size is larger than descriptor size
265#endif
266#endif
267
268struct eqos_desc {
269 u32 des0;
270 u32 des1;
271 u32 des2;
272 u32 des3;
273};
274
275#define EQOS_DESC3_OWN BIT(31)
276#define EQOS_DESC3_FD BIT(29)
277#define EQOS_DESC3_LD BIT(28)
278#define EQOS_DESC3_BUF1V BIT(24)
279
280struct eqos_config {
281 bool reg_access_always_ok;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200282 int mdio_wait;
283 int swr_wait;
284 int config_mac;
285 int config_mac_mdio;
286 phy_interface_t (*interface)(struct udevice *dev);
287 struct eqos_ops *ops;
288};
289
290struct eqos_ops {
291 void (*eqos_inval_desc)(void *desc);
292 void (*eqos_flush_desc)(void *desc);
293 void (*eqos_inval_buffer)(void *buf, size_t size);
294 void (*eqos_flush_buffer)(void *buf, size_t size);
295 int (*eqos_probe_resources)(struct udevice *dev);
296 int (*eqos_remove_resources)(struct udevice *dev);
297 int (*eqos_stop_resets)(struct udevice *dev);
298 int (*eqos_start_resets)(struct udevice *dev);
299 void (*eqos_stop_clks)(struct udevice *dev);
300 int (*eqos_start_clks)(struct udevice *dev);
301 int (*eqos_calibrate_pads)(struct udevice *dev);
302 int (*eqos_disable_calibration)(struct udevice *dev);
303 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
304 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600305};
306
307struct eqos_priv {
308 struct udevice *dev;
309 const struct eqos_config *config;
310 fdt_addr_t regs;
311 struct eqos_mac_regs *mac_regs;
312 struct eqos_mtl_regs *mtl_regs;
313 struct eqos_dma_regs *dma_regs;
314 struct eqos_tegra186_regs *tegra186_regs;
315 struct reset_ctl reset_ctl;
316 struct gpio_desc phy_reset_gpio;
317 struct clk clk_master_bus;
318 struct clk clk_rx;
319 struct clk clk_ptp_ref;
320 struct clk clk_tx;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200321 struct clk clk_ck;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600322 struct clk clk_slave_bus;
323 struct mii_dev *mii;
324 struct phy_device *phy;
Patrick Delaunay4f60a512020-03-18 10:50:16 +0100325 int phyaddr;
326 u32 max_speed;
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600327 void *descs;
328 struct eqos_desc *tx_descs;
329 struct eqos_desc *rx_descs;
330 int tx_desc_idx, rx_desc_idx;
331 void *tx_dma_buf;
332 void *rx_dma_buf;
333 void *rx_pkt;
334 bool started;
335 bool reg_access_ok;
336};
337
338/*
339 * TX and RX descriptors are 16 bytes. This causes problems with the cache
340 * maintenance on CPUs where the cache-line size exceeds the size of these
341 * descriptors. What will happen is that when the driver receives a packet
342 * it will be immediately requeued for the hardware to reuse. The CPU will
343 * therefore need to flush the cache-line containing the descriptor, which
344 * will cause all other descriptors in the same cache-line to be flushed
345 * along with it. If one of those descriptors had been written to by the
346 * device those changes (and the associated packet) will be lost.
347 *
348 * To work around this, we make use of non-cached memory if available. If
349 * descriptors are mapped uncached there's no need to manually flush them
350 * or invalidate them.
351 *
352 * Note that this only applies to descriptors. The packet data buffers do
353 * not have the same constraints since they are 1536 bytes large, so they
354 * are unlikely to share cache-lines.
355 */
356static void *eqos_alloc_descs(unsigned int num)
357{
358#ifdef CONFIG_SYS_NONCACHED_MEMORY
359 return (void *)noncached_alloc(EQOS_DESCRIPTORS_SIZE,
360 EQOS_DESCRIPTOR_ALIGN);
361#else
362 return memalign(EQOS_DESCRIPTOR_ALIGN, EQOS_DESCRIPTORS_SIZE);
363#endif
364}
365
366static void eqos_free_descs(void *descs)
367{
368#ifdef CONFIG_SYS_NONCACHED_MEMORY
369 /* FIXME: noncached_alloc() has no opposite */
370#else
371 free(descs);
372#endif
373}
374
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200375static void eqos_inval_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600376{
377#ifndef CONFIG_SYS_NONCACHED_MEMORY
378 unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
379 unsigned long end = ALIGN(start + EQOS_DESCRIPTOR_SIZE,
380 ARCH_DMA_MINALIGN);
381
382 invalidate_dcache_range(start, end);
383#endif
384}
385
Fugang Duan3a97da12020-05-03 22:41:17 +0800386static void eqos_inval_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200387{
388#ifndef CONFIG_SYS_NONCACHED_MEMORY
389 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
390 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
391 ARCH_DMA_MINALIGN);
392
393 invalidate_dcache_range(start, end);
394#endif
395}
396
397static void eqos_flush_desc_tegra186(void *desc)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600398{
399#ifndef CONFIG_SYS_NONCACHED_MEMORY
400 flush_cache((unsigned long)desc, EQOS_DESCRIPTOR_SIZE);
401#endif
402}
403
Fugang Duan3a97da12020-05-03 22:41:17 +0800404static void eqos_flush_desc_generic(void *desc)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200405{
406#ifndef CONFIG_SYS_NONCACHED_MEMORY
407 unsigned long start = rounddown((unsigned long)desc, ARCH_DMA_MINALIGN);
408 unsigned long end = roundup((unsigned long)desc + EQOS_DESCRIPTOR_SIZE,
409 ARCH_DMA_MINALIGN);
410
411 flush_dcache_range(start, end);
412#endif
413}
414
415static void eqos_inval_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600416{
417 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
418 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
419
420 invalidate_dcache_range(start, end);
421}
422
Fugang Duan3a97da12020-05-03 22:41:17 +0800423static void eqos_inval_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200424{
425 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
426 unsigned long end = roundup((unsigned long)buf + size,
427 ARCH_DMA_MINALIGN);
428
429 invalidate_dcache_range(start, end);
430}
431
432static void eqos_flush_buffer_tegra186(void *buf, size_t size)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600433{
434 flush_cache((unsigned long)buf, size);
435}
436
Fugang Duan3a97da12020-05-03 22:41:17 +0800437static void eqos_flush_buffer_generic(void *buf, size_t size)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200438{
439 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
440 unsigned long end = roundup((unsigned long)buf + size,
441 ARCH_DMA_MINALIGN);
442
443 flush_dcache_range(start, end);
444}
445
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600446static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
447{
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100448 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
449 EQOS_MAC_MDIO_ADDRESS_GB, false,
450 1000000, true);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600451}
452
453static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
454 int mdio_reg)
455{
456 struct eqos_priv *eqos = bus->priv;
457 u32 val;
458 int ret;
459
460 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
461 mdio_reg);
462
463 ret = eqos_mdio_wait_idle(eqos);
464 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900465 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600466 return ret;
467 }
468
469 val = readl(&eqos->mac_regs->mdio_address);
470 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
471 EQOS_MAC_MDIO_ADDRESS_C45E;
472 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
473 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200474 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600475 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
476 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
477 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
478 EQOS_MAC_MDIO_ADDRESS_GB;
479 writel(val, &eqos->mac_regs->mdio_address);
480
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200481 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600482
483 ret = eqos_mdio_wait_idle(eqos);
484 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900485 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600486 return ret;
487 }
488
489 val = readl(&eqos->mac_regs->mdio_data);
490 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
491
492 debug("%s: val=%x\n", __func__, val);
493
494 return val;
495}
496
497static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
498 int mdio_reg, u16 mdio_val)
499{
500 struct eqos_priv *eqos = bus->priv;
501 u32 val;
502 int ret;
503
504 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
505 mdio_addr, mdio_reg, mdio_val);
506
507 ret = eqos_mdio_wait_idle(eqos);
508 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900509 pr_err("MDIO not idle at entry");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600510 return ret;
511 }
512
513 writel(mdio_val, &eqos->mac_regs->mdio_data);
514
515 val = readl(&eqos->mac_regs->mdio_address);
516 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
517 EQOS_MAC_MDIO_ADDRESS_C45E;
518 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
519 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200520 (eqos->config->config_mac_mdio <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600521 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
522 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
523 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
524 EQOS_MAC_MDIO_ADDRESS_GB;
525 writel(val, &eqos->mac_regs->mdio_address);
526
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200527 udelay(eqos->config->mdio_wait);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600528
529 ret = eqos_mdio_wait_idle(eqos);
530 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900531 pr_err("MDIO read didn't complete");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600532 return ret;
533 }
534
535 return 0;
536}
537
538static int eqos_start_clks_tegra186(struct udevice *dev)
539{
Fugang Duan3a97da12020-05-03 22:41:17 +0800540#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600541 struct eqos_priv *eqos = dev_get_priv(dev);
542 int ret;
543
544 debug("%s(dev=%p):\n", __func__, dev);
545
546 ret = clk_enable(&eqos->clk_slave_bus);
547 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900548 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600549 goto err;
550 }
551
552 ret = clk_enable(&eqos->clk_master_bus);
553 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900554 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600555 goto err_disable_clk_slave_bus;
556 }
557
558 ret = clk_enable(&eqos->clk_rx);
559 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900560 pr_err("clk_enable(clk_rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600561 goto err_disable_clk_master_bus;
562 }
563
564 ret = clk_enable(&eqos->clk_ptp_ref);
565 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900566 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600567 goto err_disable_clk_rx;
568 }
569
570 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
571 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900572 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600573 goto err_disable_clk_ptp_ref;
574 }
575
576 ret = clk_enable(&eqos->clk_tx);
577 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900578 pr_err("clk_enable(clk_tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600579 goto err_disable_clk_ptp_ref;
580 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800581#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600582
583 debug("%s: OK\n", __func__);
584 return 0;
585
Fugang Duan3a97da12020-05-03 22:41:17 +0800586#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600587err_disable_clk_ptp_ref:
588 clk_disable(&eqos->clk_ptp_ref);
589err_disable_clk_rx:
590 clk_disable(&eqos->clk_rx);
591err_disable_clk_master_bus:
592 clk_disable(&eqos->clk_master_bus);
593err_disable_clk_slave_bus:
594 clk_disable(&eqos->clk_slave_bus);
595err:
596 debug("%s: FAILED: %d\n", __func__, ret);
597 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800598#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600599}
600
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200601static int eqos_start_clks_stm32(struct udevice *dev)
602{
Fugang Duan3a97da12020-05-03 22:41:17 +0800603#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200604 struct eqos_priv *eqos = dev_get_priv(dev);
605 int ret;
606
607 debug("%s(dev=%p):\n", __func__, dev);
608
609 ret = clk_enable(&eqos->clk_master_bus);
610 if (ret < 0) {
611 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
612 goto err;
613 }
614
615 ret = clk_enable(&eqos->clk_rx);
616 if (ret < 0) {
617 pr_err("clk_enable(clk_rx) failed: %d", ret);
618 goto err_disable_clk_master_bus;
619 }
620
621 ret = clk_enable(&eqos->clk_tx);
622 if (ret < 0) {
623 pr_err("clk_enable(clk_tx) failed: %d", ret);
624 goto err_disable_clk_rx;
625 }
626
627 if (clk_valid(&eqos->clk_ck)) {
628 ret = clk_enable(&eqos->clk_ck);
629 if (ret < 0) {
630 pr_err("clk_enable(clk_ck) failed: %d", ret);
631 goto err_disable_clk_tx;
632 }
633 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800634#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200635
636 debug("%s: OK\n", __func__);
637 return 0;
638
Fugang Duan3a97da12020-05-03 22:41:17 +0800639#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200640err_disable_clk_tx:
641 clk_disable(&eqos->clk_tx);
642err_disable_clk_rx:
643 clk_disable(&eqos->clk_rx);
644err_disable_clk_master_bus:
645 clk_disable(&eqos->clk_master_bus);
646err:
647 debug("%s: FAILED: %d\n", __func__, ret);
648 return ret;
Fugang Duan3a97da12020-05-03 22:41:17 +0800649#endif
650}
651
652static int eqos_start_clks_imx(struct udevice *dev)
653{
654 return 0;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200655}
656
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200657static void eqos_stop_clks_tegra186(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600658{
Fugang Duan3a97da12020-05-03 22:41:17 +0800659#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600660 struct eqos_priv *eqos = dev_get_priv(dev);
661
662 debug("%s(dev=%p):\n", __func__, dev);
663
664 clk_disable(&eqos->clk_tx);
665 clk_disable(&eqos->clk_ptp_ref);
666 clk_disable(&eqos->clk_rx);
667 clk_disable(&eqos->clk_master_bus);
668 clk_disable(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800669#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600670
671 debug("%s: OK\n", __func__);
672}
673
Patrick Delaunay50d86e52019-08-01 11:29:02 +0200674static void eqos_stop_clks_stm32(struct udevice *dev)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200675{
Fugang Duan3a97da12020-05-03 22:41:17 +0800676#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200677 struct eqos_priv *eqos = dev_get_priv(dev);
678
679 debug("%s(dev=%p):\n", __func__, dev);
680
681 clk_disable(&eqos->clk_tx);
682 clk_disable(&eqos->clk_rx);
683 clk_disable(&eqos->clk_master_bus);
684 if (clk_valid(&eqos->clk_ck))
685 clk_disable(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +0800686#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200687
688 debug("%s: OK\n", __func__);
689}
690
Fugang Duan3a97da12020-05-03 22:41:17 +0800691static void eqos_stop_clks_imx(struct udevice *dev)
692{
693 /* empty */
694}
695
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600696static int eqos_start_resets_tegra186(struct udevice *dev)
697{
698 struct eqos_priv *eqos = dev_get_priv(dev);
699 int ret;
700
701 debug("%s(dev=%p):\n", __func__, dev);
702
703 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
704 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900705 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600706 return ret;
707 }
708
709 udelay(2);
710
711 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
712 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900713 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600714 return ret;
715 }
716
717 ret = reset_assert(&eqos->reset_ctl);
718 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900719 pr_err("reset_assert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600720 return ret;
721 }
722
723 udelay(2);
724
725 ret = reset_deassert(&eqos->reset_ctl);
726 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900727 pr_err("reset_deassert() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600728 return ret;
729 }
730
731 debug("%s: OK\n", __func__);
732 return 0;
733}
734
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200735static int eqos_start_resets_stm32(struct udevice *dev)
736{
Christophe Roullier5177b312020-03-18 10:50:15 +0100737 struct eqos_priv *eqos = dev_get_priv(dev);
738 int ret;
739
740 debug("%s(dev=%p):\n", __func__, dev);
741 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
742 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
743 if (ret < 0) {
744 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
745 ret);
746 return ret;
747 }
748
749 udelay(2);
750
751 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
752 if (ret < 0) {
753 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
754 ret);
755 return ret;
756 }
757 }
758 debug("%s: OK\n", __func__);
759
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200760 return 0;
761}
762
Fugang Duan3a97da12020-05-03 22:41:17 +0800763static int eqos_start_resets_imx(struct udevice *dev)
764{
765 return 0;
766}
767
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600768static int eqos_stop_resets_tegra186(struct udevice *dev)
769{
770 struct eqos_priv *eqos = dev_get_priv(dev);
771
772 reset_assert(&eqos->reset_ctl);
773 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
774
775 return 0;
776}
777
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200778static int eqos_stop_resets_stm32(struct udevice *dev)
779{
Christophe Roullier5177b312020-03-18 10:50:15 +0100780 struct eqos_priv *eqos = dev_get_priv(dev);
781 int ret;
782
783 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
784 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
785 if (ret < 0) {
786 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
787 ret);
788 return ret;
789 }
790 }
791
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200792 return 0;
793}
794
Fugang Duan3a97da12020-05-03 22:41:17 +0800795static int eqos_stop_resets_imx(struct udevice *dev)
796{
797 return 0;
798}
799
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600800static int eqos_calibrate_pads_tegra186(struct udevice *dev)
801{
802 struct eqos_priv *eqos = dev_get_priv(dev);
803 int ret;
804
805 debug("%s(dev=%p):\n", __func__, dev);
806
807 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
808 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
809
810 udelay(1);
811
812 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
813 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
814
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100815 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
816 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600817 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900818 pr_err("calibrate didn't start");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600819 goto failed;
820 }
821
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100822 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
823 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600824 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900825 pr_err("calibrate didn't finish");
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600826 goto failed;
827 }
828
829 ret = 0;
830
831failed:
832 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
833 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
834
835 debug("%s: returns %d\n", __func__, ret);
836
837 return ret;
838}
839
840static int eqos_disable_calibration_tegra186(struct udevice *dev)
841{
842 struct eqos_priv *eqos = dev_get_priv(dev);
843
844 debug("%s(dev=%p):\n", __func__, dev);
845
846 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
847 EQOS_AUTO_CAL_CONFIG_ENABLE);
848
849 return 0;
850}
851
852static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
853{
Fugang Duan3a97da12020-05-03 22:41:17 +0800854#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600855 struct eqos_priv *eqos = dev_get_priv(dev);
856
857 return clk_get_rate(&eqos->clk_slave_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800858#else
859 return 0;
860#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600861}
862
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200863static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
864{
Fugang Duan3a97da12020-05-03 22:41:17 +0800865#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200866 struct eqos_priv *eqos = dev_get_priv(dev);
867
868 return clk_get_rate(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +0800869#else
870 return 0;
871#endif
872}
873
Fugang Duan0e9d2392020-05-03 22:41:18 +0800874__weak u32 imx_get_eqos_csr_clk(void)
875{
876 return 100 * 1000000;
877}
878__weak int imx_eqos_txclk_set_rate(unsigned long rate)
879{
880 return 0;
881}
882
Fugang Duan3a97da12020-05-03 22:41:17 +0800883static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
884{
Fugang Duan0e9d2392020-05-03 22:41:18 +0800885 return imx_get_eqos_csr_clk();
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200886}
887
888static int eqos_calibrate_pads_stm32(struct udevice *dev)
889{
890 return 0;
891}
892
Fugang Duan3a97da12020-05-03 22:41:17 +0800893static int eqos_calibrate_pads_imx(struct udevice *dev)
894{
895 return 0;
896}
897
Christophe Roullierac2d4ef2019-05-17 15:08:44 +0200898static int eqos_disable_calibration_stm32(struct udevice *dev)
899{
900 return 0;
901}
902
Fugang Duan3a97da12020-05-03 22:41:17 +0800903static int eqos_disable_calibration_imx(struct udevice *dev)
904{
905 return 0;
906}
907
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600908static int eqos_set_full_duplex(struct udevice *dev)
909{
910 struct eqos_priv *eqos = dev_get_priv(dev);
911
912 debug("%s(dev=%p):\n", __func__, dev);
913
914 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
915
916 return 0;
917}
918
919static int eqos_set_half_duplex(struct udevice *dev)
920{
921 struct eqos_priv *eqos = dev_get_priv(dev);
922
923 debug("%s(dev=%p):\n", __func__, dev);
924
925 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
926
927 /* WAR: Flush TX queue when switching to half-duplex */
928 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
929 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
930
931 return 0;
932}
933
934static int eqos_set_gmii_speed(struct udevice *dev)
935{
936 struct eqos_priv *eqos = dev_get_priv(dev);
937
938 debug("%s(dev=%p):\n", __func__, dev);
939
940 clrbits_le32(&eqos->mac_regs->configuration,
941 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
942
943 return 0;
944}
945
946static int eqos_set_mii_speed_100(struct udevice *dev)
947{
948 struct eqos_priv *eqos = dev_get_priv(dev);
949
950 debug("%s(dev=%p):\n", __func__, dev);
951
952 setbits_le32(&eqos->mac_regs->configuration,
953 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
954
955 return 0;
956}
957
958static int eqos_set_mii_speed_10(struct udevice *dev)
959{
960 struct eqos_priv *eqos = dev_get_priv(dev);
961
962 debug("%s(dev=%p):\n", __func__, dev);
963
964 clrsetbits_le32(&eqos->mac_regs->configuration,
965 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
966
967 return 0;
968}
969
970static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
971{
Fugang Duan3a97da12020-05-03 22:41:17 +0800972#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600973 struct eqos_priv *eqos = dev_get_priv(dev);
974 ulong rate;
975 int ret;
976
977 debug("%s(dev=%p):\n", __func__, dev);
978
979 switch (eqos->phy->speed) {
980 case SPEED_1000:
981 rate = 125 * 1000 * 1000;
982 break;
983 case SPEED_100:
984 rate = 25 * 1000 * 1000;
985 break;
986 case SPEED_10:
987 rate = 2.5 * 1000 * 1000;
988 break;
989 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900990 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600991 return -EINVAL;
992 }
993
994 ret = clk_set_rate(&eqos->clk_tx, rate);
995 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900996 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -0600997 return ret;
998 }
Fugang Duan3a97da12020-05-03 22:41:17 +0800999#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001000
1001 return 0;
1002}
1003
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001004static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
1005{
1006 return 0;
1007}
1008
Fugang Duan3a97da12020-05-03 22:41:17 +08001009static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
1010{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001011 struct eqos_priv *eqos = dev_get_priv(dev);
1012 ulong rate;
1013 int ret;
1014
1015 debug("%s(dev=%p):\n", __func__, dev);
1016
1017 switch (eqos->phy->speed) {
1018 case SPEED_1000:
1019 rate = 125 * 1000 * 1000;
1020 break;
1021 case SPEED_100:
1022 rate = 25 * 1000 * 1000;
1023 break;
1024 case SPEED_10:
1025 rate = 2.5 * 1000 * 1000;
1026 break;
1027 default:
1028 pr_err("invalid speed %d", eqos->phy->speed);
1029 return -EINVAL;
1030 }
1031
1032 ret = imx_eqos_txclk_set_rate(rate);
1033 if (ret < 0) {
1034 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
1035 return ret;
1036 }
1037
Fugang Duan3a97da12020-05-03 22:41:17 +08001038 return 0;
1039}
1040
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001041static int eqos_adjust_link(struct udevice *dev)
1042{
1043 struct eqos_priv *eqos = dev_get_priv(dev);
1044 int ret;
1045 bool en_calibration;
1046
1047 debug("%s(dev=%p):\n", __func__, dev);
1048
1049 if (eqos->phy->duplex)
1050 ret = eqos_set_full_duplex(dev);
1051 else
1052 ret = eqos_set_half_duplex(dev);
1053 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001054 pr_err("eqos_set_*_duplex() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001055 return ret;
1056 }
1057
1058 switch (eqos->phy->speed) {
1059 case SPEED_1000:
1060 en_calibration = true;
1061 ret = eqos_set_gmii_speed(dev);
1062 break;
1063 case SPEED_100:
1064 en_calibration = true;
1065 ret = eqos_set_mii_speed_100(dev);
1066 break;
1067 case SPEED_10:
1068 en_calibration = false;
1069 ret = eqos_set_mii_speed_10(dev);
1070 break;
1071 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001072 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001073 return -EINVAL;
1074 }
1075 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001076 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001077 return ret;
1078 }
1079
1080 if (en_calibration) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001081 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001082 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001083 pr_err("eqos_calibrate_pads() failed: %d",
1084 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001085 return ret;
1086 }
1087 } else {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001088 ret = eqos->config->ops->eqos_disable_calibration(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001089 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001090 pr_err("eqos_disable_calibration() failed: %d",
1091 ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001092 return ret;
1093 }
1094 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001095 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001096 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001097 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001098 return ret;
1099 }
1100
1101 return 0;
1102}
1103
1104static int eqos_write_hwaddr(struct udevice *dev)
1105{
1106 struct eth_pdata *plat = dev_get_platdata(dev);
1107 struct eqos_priv *eqos = dev_get_priv(dev);
1108 uint32_t val;
1109
1110 /*
1111 * This function may be called before start() or after stop(). At that
1112 * time, on at least some configurations of the EQoS HW, all clocks to
1113 * the EQoS HW block will be stopped, and a reset signal applied. If
1114 * any register access is attempted in this state, bus timeouts or CPU
1115 * hangs may occur. This check prevents that.
1116 *
1117 * A simple solution to this problem would be to not implement
1118 * write_hwaddr(), since start() always writes the MAC address into HW
1119 * anyway. However, it is desirable to implement write_hwaddr() to
1120 * support the case of SW that runs subsequent to U-Boot which expects
1121 * the MAC address to already be programmed into the EQoS registers,
1122 * which must happen irrespective of whether the U-Boot user (or
1123 * scripts) actually made use of the EQoS device, and hence
1124 * irrespective of whether start() was ever called.
1125 *
1126 * Note that this requirement by subsequent SW is not valid for
1127 * Tegra186, and is likely not valid for any non-PCI instantiation of
1128 * the EQoS HW block. This function is implemented solely as
1129 * future-proofing with the expectation the driver will eventually be
1130 * ported to some system where the expectation above is true.
1131 */
1132 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1133 return 0;
1134
1135 /* Update the MAC address */
1136 val = (plat->enetaddr[5] << 8) |
1137 (plat->enetaddr[4]);
1138 writel(val, &eqos->mac_regs->address0_high);
1139 val = (plat->enetaddr[3] << 24) |
1140 (plat->enetaddr[2] << 16) |
1141 (plat->enetaddr[1] << 8) |
1142 (plat->enetaddr[0]);
1143 writel(val, &eqos->mac_regs->address0_low);
1144
1145 return 0;
1146}
1147
1148static int eqos_start(struct udevice *dev)
1149{
1150 struct eqos_priv *eqos = dev_get_priv(dev);
1151 int ret, i;
1152 ulong rate;
1153 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1154 ulong last_rx_desc;
1155
1156 debug("%s(dev=%p):\n", __func__, dev);
1157
1158 eqos->tx_desc_idx = 0;
1159 eqos->rx_desc_idx = 0;
1160
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001161 ret = eqos->config->ops->eqos_start_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001162 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001163 pr_err("eqos_start_clks() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001164 goto err;
1165 }
1166
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001167 ret = eqos->config->ops->eqos_start_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001168 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001169 pr_err("eqos_start_resets() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001170 goto err_stop_clks;
1171 }
1172
1173 udelay(10);
1174
1175 eqos->reg_access_ok = true;
1176
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +01001177 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001178 EQOS_DMA_MODE_SWR, false,
1179 eqos->config->swr_wait, false);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001180 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001181 pr_err("EQOS_DMA_MODE_SWR stuck");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001182 goto err_stop_resets;
1183 }
1184
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001185 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001186 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001187 pr_err("eqos_calibrate_pads() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001188 goto err_stop_resets;
1189 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001190 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001191
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001192 val = (rate / 1000000) - 1;
1193 writel(val, &eqos->mac_regs->us_tic_counter);
1194
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001195 /*
1196 * if PHY was already connected and configured,
1197 * don't need to reconnect/reconfigure again
1198 */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001199 if (!eqos->phy) {
Ye Li6a895d02020-05-03 22:41:15 +08001200 int addr = -1;
1201#ifdef CONFIG_DM_ETH_PHY
1202 addr = eth_phy_get_addr(dev);
1203#endif
1204#ifdef DWC_NET_PHYADDR
1205 addr = DWC_NET_PHYADDR;
1206#endif
1207 eqos->phy = phy_connect(eqos->mii, addr, dev,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001208 eqos->config->interface(dev));
1209 if (!eqos->phy) {
1210 pr_err("phy_connect() failed");
1211 goto err_stop_resets;
1212 }
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001213
1214 if (eqos->max_speed) {
1215 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1216 if (ret) {
1217 pr_err("phy_set_supported() failed: %d", ret);
1218 goto err_shutdown_phy;
1219 }
1220 }
1221
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001222 ret = phy_config(eqos->phy);
1223 if (ret < 0) {
1224 pr_err("phy_config() failed: %d", ret);
1225 goto err_shutdown_phy;
1226 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001227 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001228
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001229 ret = phy_startup(eqos->phy);
1230 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001231 pr_err("phy_startup() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001232 goto err_shutdown_phy;
1233 }
1234
1235 if (!eqos->phy->link) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001236 pr_err("No link");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001237 goto err_shutdown_phy;
1238 }
1239
1240 ret = eqos_adjust_link(dev);
1241 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001242 pr_err("eqos_adjust_link() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001243 goto err_shutdown_phy;
1244 }
1245
1246 /* Configure MTL */
Fugang Duan3a97da12020-05-03 22:41:17 +08001247 writel(0x60, &eqos->mtl_regs->txq0_quantum_weight - 0x100);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001248
1249 /* Enable Store and Forward mode for TX */
1250 /* Program Tx operating mode */
1251 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1252 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1253 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1254 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1255
1256 /* Transmit Queue weight */
1257 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1258
1259 /* Enable Store and Forward mode for RX, since no jumbo frame */
1260 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
Fugang Duan3a97da12020-05-03 22:41:17 +08001261 EQOS_MTL_RXQ0_OPERATION_MODE_RSF |
1262 EQOS_MTL_RXQ0_OPERATION_MODE_FEP |
1263 EQOS_MTL_RXQ0_OPERATION_MODE_FUP);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001264
1265 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1266 val = readl(&eqos->mac_regs->hw_feature1);
1267 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1268 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1269 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1270 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1271
1272 /*
1273 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1274 * r/tqs is encoded as (n / 256) - 1.
1275 */
1276 tqs = (128 << tx_fifo_sz) / 256 - 1;
1277 rqs = (128 << rx_fifo_sz) / 256 - 1;
1278
1279 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1280 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1281 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1282 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1283 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1284 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1285 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1286 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1287
1288 /* Flow control used only if each channel gets 4KB or more FIFO */
1289 if (rqs >= ((4096 / 256) - 1)) {
1290 u32 rfd, rfa;
1291
1292 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1293 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1294
1295 /*
1296 * Set Threshold for Activating Flow Contol space for min 2
1297 * frames ie, (1500 * 1) = 1500 bytes.
1298 *
1299 * Set Threshold for Deactivating Flow Contol for space of
1300 * min 1 frame (frame size 1500bytes) in receive fifo
1301 */
1302 if (rqs == ((4096 / 256) - 1)) {
1303 /*
1304 * This violates the above formula because of FIFO size
1305 * limit therefore overflow may occur inspite of this.
1306 */
1307 rfd = 0x3; /* Full-3K */
1308 rfa = 0x1; /* Full-1.5K */
1309 } else if (rqs == ((8192 / 256) - 1)) {
1310 rfd = 0x6; /* Full-4K */
1311 rfa = 0xa; /* Full-6K */
1312 } else if (rqs == ((16384 / 256) - 1)) {
1313 rfd = 0x6; /* Full-4K */
1314 rfa = 0x12; /* Full-10K */
1315 } else {
1316 rfd = 0x6; /* Full-4K */
1317 rfa = 0x1E; /* Full-16K */
1318 }
1319
1320 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1321 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1322 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1323 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1324 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1325 (rfd <<
1326 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1327 (rfa <<
1328 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1329 }
1330
1331 /* Configure MAC */
1332
1333 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1334 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1335 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001336 eqos->config->config_mac <<
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001337 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1338
Fugang Duan3a97da12020-05-03 22:41:17 +08001339 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1340 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1341 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1342 0x2 <<
1343 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1344
1345 /* Multicast and Broadcast Queue Enable */
1346 setbits_le32(&eqos->mac_regs->unused_0a4,
1347 0x00100000);
1348 /* enable promise mode */
1349 setbits_le32(&eqos->mac_regs->unused_004[1],
1350 0x1);
1351
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001352 /* Set TX flow control parameters */
1353 /* Set Pause Time */
1354 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1355 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1356 /* Assign priority for TX flow control */
1357 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1358 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1359 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1360 /* Assign priority for RX flow control */
1361 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1362 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1363 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1364 /* Enable flow control */
1365 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1366 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1367 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1368 EQOS_MAC_RX_FLOW_CTRL_RFE);
1369
1370 clrsetbits_le32(&eqos->mac_regs->configuration,
1371 EQOS_MAC_CONFIGURATION_GPSLCE |
1372 EQOS_MAC_CONFIGURATION_WD |
1373 EQOS_MAC_CONFIGURATION_JD |
1374 EQOS_MAC_CONFIGURATION_JE,
1375 EQOS_MAC_CONFIGURATION_CST |
1376 EQOS_MAC_CONFIGURATION_ACS);
1377
1378 eqos_write_hwaddr(dev);
1379
1380 /* Configure DMA */
1381
1382 /* Enable OSP mode */
1383 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1384 EQOS_DMA_CH0_TX_CONTROL_OSP);
1385
1386 /* RX buffer size. Must be a multiple of bus width */
1387 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1388 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1389 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1390 EQOS_MAX_PACKET_SIZE <<
1391 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1392
1393 setbits_le32(&eqos->dma_regs->ch0_control,
1394 EQOS_DMA_CH0_CONTROL_PBLX8);
1395
1396 /*
1397 * Burst length must be < 1/2 FIFO size.
1398 * FIFO size in tqs is encoded as (n / 256) - 1.
1399 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1400 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1401 */
1402 pbl = tqs + 1;
1403 if (pbl > 32)
1404 pbl = 32;
1405 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1406 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1407 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1408 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1409
1410 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1411 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1412 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1413 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1414
1415 /* DMA performance configuration */
1416 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1417 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1418 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1419 writel(val, &eqos->dma_regs->sysbus_mode);
1420
1421 /* Set up descriptors */
1422
1423 memset(eqos->descs, 0, EQOS_DESCRIPTORS_SIZE);
1424 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1425 struct eqos_desc *rx_desc = &(eqos->rx_descs[i]);
1426 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1427 (i * EQOS_MAX_PACKET_SIZE));
Marek Vasut4332d802020-03-23 02:02:57 +01001428 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Fugang Duan3a97da12020-05-03 22:41:17 +08001429 mb();
Marek Vasutdd90c2e2020-03-23 02:09:01 +01001430 eqos->config->ops->eqos_flush_desc(rx_desc);
Fugang Duan3a97da12020-05-03 22:41:17 +08001431 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1432 (i * EQOS_MAX_PACKET_SIZE),
1433 EQOS_MAX_PACKET_SIZE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001434 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001435
1436 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1437 writel((ulong)eqos->tx_descs, &eqos->dma_regs->ch0_txdesc_list_address);
1438 writel(EQOS_DESCRIPTORS_TX - 1,
1439 &eqos->dma_regs->ch0_txdesc_ring_length);
1440
1441 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1442 writel((ulong)eqos->rx_descs, &eqos->dma_regs->ch0_rxdesc_list_address);
1443 writel(EQOS_DESCRIPTORS_RX - 1,
1444 &eqos->dma_regs->ch0_rxdesc_ring_length);
1445
1446 /* Enable everything */
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001447 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1448 EQOS_DMA_CH0_TX_CONTROL_ST);
1449 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1450 EQOS_DMA_CH0_RX_CONTROL_SR);
Fugang Duan3a97da12020-05-03 22:41:17 +08001451 setbits_le32(&eqos->mac_regs->configuration,
1452 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001453
1454 /* TX tail pointer not written until we need to TX a packet */
1455 /*
1456 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1457 * first descriptor, implying all descriptors were available. However,
1458 * that's not distinguishable from none of the descriptors being
1459 * available.
1460 */
1461 last_rx_desc = (ulong)&(eqos->rx_descs[(EQOS_DESCRIPTORS_RX - 1)]);
1462 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1463
1464 eqos->started = true;
1465
1466 debug("%s: OK\n", __func__);
1467 return 0;
1468
1469err_shutdown_phy:
1470 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001471err_stop_resets:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001472 eqos->config->ops->eqos_stop_resets(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001473err_stop_clks:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001474 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001475err:
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001476 pr_err("FAILED: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001477 return ret;
1478}
1479
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001480static void eqos_stop(struct udevice *dev)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001481{
1482 struct eqos_priv *eqos = dev_get_priv(dev);
1483 int i;
1484
1485 debug("%s(dev=%p):\n", __func__, dev);
1486
1487 if (!eqos->started)
1488 return;
1489 eqos->started = false;
1490 eqos->reg_access_ok = false;
1491
1492 /* Disable TX DMA */
1493 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1494 EQOS_DMA_CH0_TX_CONTROL_ST);
1495
1496 /* Wait for TX all packets to drain out of MTL */
1497 for (i = 0; i < 1000000; i++) {
1498 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1499 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1500 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1501 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1502 if ((trcsts != 1) && (!txqsts))
1503 break;
1504 }
1505
1506 /* Turn off MAC TX and RX */
1507 clrbits_le32(&eqos->mac_regs->configuration,
1508 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1509
1510 /* Wait for all RX packets to drain out of MTL */
1511 for (i = 0; i < 1000000; i++) {
1512 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1513 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1514 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1515 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1516 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1517 if ((!prxq) && (!rxqsts))
1518 break;
1519 }
1520
1521 /* Turn off RX DMA */
1522 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1523 EQOS_DMA_CH0_RX_CONTROL_SR);
1524
1525 if (eqos->phy) {
1526 phy_shutdown(eqos->phy);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001527 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001528 eqos->config->ops->eqos_stop_resets(dev);
1529 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001530
1531 debug("%s: OK\n", __func__);
1532}
1533
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001534static int eqos_send(struct udevice *dev, void *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001535{
1536 struct eqos_priv *eqos = dev_get_priv(dev);
1537 struct eqos_desc *tx_desc;
1538 int i;
1539
1540 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1541 length);
1542
1543 memcpy(eqos->tx_dma_buf, packet, length);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001544 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001545
1546 tx_desc = &(eqos->tx_descs[eqos->tx_desc_idx]);
1547 eqos->tx_desc_idx++;
1548 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1549
1550 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1551 tx_desc->des1 = 0;
1552 tx_desc->des2 = length;
1553 /*
1554 * Make sure that if HW sees the _OWN write below, it will see all the
1555 * writes to the rest of the descriptor too.
1556 */
1557 mb();
1558 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001559 eqos->config->ops->eqos_flush_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001560
Marek Vasut83858d82020-03-23 02:03:50 +01001561 writel((ulong)(&(eqos->tx_descs[eqos->tx_desc_idx])),
1562 &eqos->dma_regs->ch0_txdesc_tail_pointer);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001563
1564 for (i = 0; i < 1000000; i++) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001565 eqos->config->ops->eqos_inval_desc(tx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001566 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1567 return 0;
1568 udelay(1);
1569 }
1570
1571 debug("%s: TX timeout\n", __func__);
1572
1573 return -ETIMEDOUT;
1574}
1575
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001576static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001577{
1578 struct eqos_priv *eqos = dev_get_priv(dev);
1579 struct eqos_desc *rx_desc;
1580 int length;
1581
1582 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1583
1584 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasut738ee272020-03-23 02:09:21 +01001585 eqos->config->ops->eqos_inval_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001586 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1587 debug("%s: RX packet not available\n", __func__);
1588 return -EAGAIN;
1589 }
1590
1591 *packetp = eqos->rx_dma_buf +
1592 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1593 length = rx_desc->des3 & 0x7fff;
1594 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1595
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001596 eqos->config->ops->eqos_inval_buffer(*packetp, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001597
1598 return length;
1599}
1600
Patrick Delaunay50d86e52019-08-01 11:29:02 +02001601static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001602{
1603 struct eqos_priv *eqos = dev_get_priv(dev);
1604 uchar *packet_expected;
1605 struct eqos_desc *rx_desc;
1606
1607 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1608
1609 packet_expected = eqos->rx_dma_buf +
1610 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1611 if (packet != packet_expected) {
1612 debug("%s: Unexpected packet (expected %p)\n", __func__,
1613 packet_expected);
1614 return -EINVAL;
1615 }
1616
Fugang Duan3a97da12020-05-03 22:41:17 +08001617 eqos->config->ops->eqos_inval_buffer(packet, length);
1618
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001619 rx_desc = &(eqos->rx_descs[eqos->rx_desc_idx]);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001620
Marek Vasut24891dd2020-03-23 02:11:46 +01001621 rx_desc->des0 = 0;
1622 mb();
1623 eqos->config->ops->eqos_flush_desc(rx_desc);
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001624 eqos->config->ops->eqos_inval_buffer(packet, length);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001625 rx_desc->des0 = (u32)(ulong)packet;
1626 rx_desc->des1 = 0;
1627 rx_desc->des2 = 0;
1628 /*
1629 * Make sure that if HW sees the _OWN write below, it will see all the
1630 * writes to the rest of the descriptor too.
1631 */
1632 mb();
Marek Vasut4332d802020-03-23 02:02:57 +01001633 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001634 eqos->config->ops->eqos_flush_desc(rx_desc);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001635
1636 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1637
1638 eqos->rx_desc_idx++;
1639 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1640
1641 return 0;
1642}
1643
1644static int eqos_probe_resources_core(struct udevice *dev)
1645{
1646 struct eqos_priv *eqos = dev_get_priv(dev);
1647 int ret;
1648
1649 debug("%s(dev=%p):\n", __func__, dev);
1650
1651 eqos->descs = eqos_alloc_descs(EQOS_DESCRIPTORS_TX +
1652 EQOS_DESCRIPTORS_RX);
1653 if (!eqos->descs) {
1654 debug("%s: eqos_alloc_descs() failed\n", __func__);
1655 ret = -ENOMEM;
1656 goto err;
1657 }
1658 eqos->tx_descs = (struct eqos_desc *)eqos->descs;
1659 eqos->rx_descs = (eqos->tx_descs + EQOS_DESCRIPTORS_TX);
1660 debug("%s: tx_descs=%p, rx_descs=%p\n", __func__, eqos->tx_descs,
1661 eqos->rx_descs);
1662
1663 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1664 if (!eqos->tx_dma_buf) {
1665 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1666 ret = -ENOMEM;
1667 goto err_free_descs;
1668 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001669 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001670
1671 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1672 if (!eqos->rx_dma_buf) {
1673 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1674 ret = -ENOMEM;
1675 goto err_free_tx_dma_buf;
1676 }
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001677 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001678
1679 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1680 if (!eqos->rx_pkt) {
1681 debug("%s: malloc(rx_pkt) failed\n", __func__);
1682 ret = -ENOMEM;
1683 goto err_free_rx_dma_buf;
1684 }
1685 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1686
Marek Vasuta83ca0c2020-03-23 02:09:55 +01001687 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1688 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1689
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001690 debug("%s: OK\n", __func__);
1691 return 0;
1692
1693err_free_rx_dma_buf:
1694 free(eqos->rx_dma_buf);
1695err_free_tx_dma_buf:
1696 free(eqos->tx_dma_buf);
1697err_free_descs:
1698 eqos_free_descs(eqos->descs);
1699err:
1700
1701 debug("%s: returns %d\n", __func__, ret);
1702 return ret;
1703}
1704
1705static int eqos_remove_resources_core(struct udevice *dev)
1706{
1707 struct eqos_priv *eqos = dev_get_priv(dev);
1708
1709 debug("%s(dev=%p):\n", __func__, dev);
1710
1711 free(eqos->rx_pkt);
1712 free(eqos->rx_dma_buf);
1713 free(eqos->tx_dma_buf);
1714 eqos_free_descs(eqos->descs);
1715
1716 debug("%s: OK\n", __func__);
1717 return 0;
1718}
1719
1720static int eqos_probe_resources_tegra186(struct udevice *dev)
1721{
1722 struct eqos_priv *eqos = dev_get_priv(dev);
1723 int ret;
1724
1725 debug("%s(dev=%p):\n", __func__, dev);
1726
1727 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1728 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001729 pr_err("reset_get_by_name(rst) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001730 return ret;
1731 }
1732
1733 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1734 &eqos->phy_reset_gpio,
1735 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1736 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001737 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001738 goto err_free_reset_eqos;
1739 }
1740
1741 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1742 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001743 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001744 goto err_free_gpio_phy_reset;
1745 }
1746
1747 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1748 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001749 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001750 goto err_free_clk_slave_bus;
1751 }
1752
1753 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1754 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001755 pr_err("clk_get_by_name(rx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001756 goto err_free_clk_master_bus;
1757 }
1758
1759 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1760 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001761 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001762 goto err_free_clk_rx;
1763 return ret;
1764 }
1765
1766 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1767 if (ret) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001768 pr_err("clk_get_by_name(tx) failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001769 goto err_free_clk_ptp_ref;
1770 }
1771
1772 debug("%s: OK\n", __func__);
1773 return 0;
1774
1775err_free_clk_ptp_ref:
1776 clk_free(&eqos->clk_ptp_ref);
1777err_free_clk_rx:
1778 clk_free(&eqos->clk_rx);
1779err_free_clk_master_bus:
1780 clk_free(&eqos->clk_master_bus);
1781err_free_clk_slave_bus:
1782 clk_free(&eqos->clk_slave_bus);
1783err_free_gpio_phy_reset:
1784 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1785err_free_reset_eqos:
1786 reset_free(&eqos->reset_ctl);
1787
1788 debug("%s: returns %d\n", __func__, ret);
1789 return ret;
1790}
1791
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001792/* board-specific Ethernet Interface initializations. */
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001793__weak int board_interface_eth_init(struct udevice *dev,
1794 phy_interface_t interface_type)
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001795{
1796 return 0;
1797}
1798
1799static int eqos_probe_resources_stm32(struct udevice *dev)
1800{
1801 struct eqos_priv *eqos = dev_get_priv(dev);
1802 int ret;
1803 phy_interface_t interface;
Christophe Roullier5177b312020-03-18 10:50:15 +01001804 struct ofnode_phandle_args phandle_args;
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001805
1806 debug("%s(dev=%p):\n", __func__, dev);
1807
1808 interface = eqos->config->interface(dev);
1809
1810 if (interface == PHY_INTERFACE_MODE_NONE) {
1811 pr_err("Invalid PHY interface\n");
1812 return -EINVAL;
1813 }
1814
Patrick Delaunay53e3d522019-08-01 11:29:03 +02001815 ret = board_interface_eth_init(dev, interface);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001816 if (ret)
1817 return -EINVAL;
1818
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001819 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1820
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001821 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1822 if (ret) {
1823 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1824 goto err_probe;
1825 }
1826
1827 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1828 if (ret) {
1829 pr_err("clk_get_by_name(rx) failed: %d", ret);
1830 goto err_free_clk_master_bus;
1831 }
1832
1833 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1834 if (ret) {
1835 pr_err("clk_get_by_name(tx) failed: %d", ret);
1836 goto err_free_clk_rx;
1837 }
1838
1839 /* Get ETH_CLK clocks (optional) */
1840 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1841 if (ret)
1842 pr_warn("No phy clock provided %d", ret);
1843
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001844 eqos->phyaddr = -1;
Christophe Roullier5177b312020-03-18 10:50:15 +01001845 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1846 &phandle_args);
1847 if (!ret) {
1848 /* search "reset-gpios" in phy node */
1849 ret = gpio_request_by_name_nodev(phandle_args.node,
1850 "reset-gpios", 0,
1851 &eqos->phy_reset_gpio,
1852 GPIOD_IS_OUT |
1853 GPIOD_IS_OUT_ACTIVE);
1854 if (ret)
1855 pr_warn("gpio_request_by_name(phy reset) not provided %d",
1856 ret);
Patrick Delaunay4f60a512020-03-18 10:50:16 +01001857
1858 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1859 "reg", -1);
Christophe Roullier5177b312020-03-18 10:50:15 +01001860 }
1861
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001862 debug("%s: OK\n", __func__);
1863 return 0;
1864
1865err_free_clk_rx:
1866 clk_free(&eqos->clk_rx);
1867err_free_clk_master_bus:
1868 clk_free(&eqos->clk_master_bus);
1869err_probe:
1870
1871 debug("%s: returns %d\n", __func__, ret);
1872 return ret;
1873}
1874
1875static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1876{
1877 const char *phy_mode;
1878 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1879
1880 debug("%s(dev=%p):\n", __func__, dev);
1881
1882 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1883 NULL);
1884 if (phy_mode)
1885 interface = phy_get_interface_by_name(phy_mode);
1886
1887 return interface;
1888}
1889
1890static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1891{
1892 return PHY_INTERFACE_MODE_MII;
1893}
1894
Fugang Duan3a97da12020-05-03 22:41:17 +08001895static int eqos_probe_resources_imx(struct udevice *dev)
1896{
1897 struct eqos_priv *eqos = dev_get_priv(dev);
1898 phy_interface_t interface;
1899
1900 debug("%s(dev=%p):\n", __func__, dev);
1901
1902 interface = eqos->config->interface(dev);
1903
1904 if (interface == PHY_INTERFACE_MODE_NONE) {
1905 pr_err("Invalid PHY interface\n");
1906 return -EINVAL;
1907 }
1908
1909 debug("%s: OK\n", __func__);
1910 return 0;
1911}
1912
1913static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1914{
Fugang Duan0e9d2392020-05-03 22:41:18 +08001915 const char *phy_mode;
1916 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1917
1918 debug("%s(dev=%p):\n", __func__, dev);
1919
1920 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1921 NULL);
1922 if (phy_mode)
1923 interface = phy_get_interface_by_name(phy_mode);
1924
1925 return interface;
Fugang Duan3a97da12020-05-03 22:41:17 +08001926}
1927
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001928static int eqos_remove_resources_tegra186(struct udevice *dev)
1929{
1930 struct eqos_priv *eqos = dev_get_priv(dev);
1931
1932 debug("%s(dev=%p):\n", __func__, dev);
1933
Fugang Duan3a97da12020-05-03 22:41:17 +08001934#ifdef CONFIG_CLK
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001935 clk_free(&eqos->clk_tx);
1936 clk_free(&eqos->clk_ptp_ref);
1937 clk_free(&eqos->clk_rx);
1938 clk_free(&eqos->clk_slave_bus);
1939 clk_free(&eqos->clk_master_bus);
Fugang Duan3a97da12020-05-03 22:41:17 +08001940#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001941 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1942 reset_free(&eqos->reset_ctl);
1943
1944 debug("%s: OK\n", __func__);
1945 return 0;
1946}
1947
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001948static int eqos_remove_resources_stm32(struct udevice *dev)
1949{
Fugang Duan3a97da12020-05-03 22:41:17 +08001950#ifdef CONFIG_CLK
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001951 struct eqos_priv *eqos = dev_get_priv(dev);
1952
1953 debug("%s(dev=%p):\n", __func__, dev);
1954
1955 clk_free(&eqos->clk_tx);
1956 clk_free(&eqos->clk_rx);
1957 clk_free(&eqos->clk_master_bus);
1958 if (clk_valid(&eqos->clk_ck))
1959 clk_free(&eqos->clk_ck);
Fugang Duan3a97da12020-05-03 22:41:17 +08001960#endif
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001961
Christophe Roullier5177b312020-03-18 10:50:15 +01001962 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1963 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1964
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02001965 debug("%s: OK\n", __func__);
1966 return 0;
1967}
1968
Fugang Duan3a97da12020-05-03 22:41:17 +08001969static int eqos_remove_resources_imx(struct udevice *dev)
1970{
1971 return 0;
1972}
1973
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001974static int eqos_probe(struct udevice *dev)
1975{
1976 struct eqos_priv *eqos = dev_get_priv(dev);
1977 int ret;
1978
1979 debug("%s(dev=%p):\n", __func__, dev);
1980
1981 eqos->dev = dev;
1982 eqos->config = (void *)dev_get_driver_data(dev);
1983
Simon Glassa821c4a2017-05-17 17:18:05 -06001984 eqos->regs = devfdt_get_addr(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001985 if (eqos->regs == FDT_ADDR_T_NONE) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001986 pr_err("devfdt_get_addr() failed");
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001987 return -ENODEV;
1988 }
1989 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1990 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1991 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1992 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1993
1994 ret = eqos_probe_resources_core(dev);
1995 if (ret < 0) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +09001996 pr_err("eqos_probe_resources_core() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06001997 return ret;
1998 }
1999
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002000 ret = eqos->config->ops->eqos_probe_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002001 if (ret < 0) {
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002002 pr_err("eqos_probe_resources() failed: %d", ret);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002003 goto err_remove_resources_core;
2004 }
2005
Ye Li6a895d02020-05-03 22:41:15 +08002006#ifdef CONFIG_DM_ETH_PHY
2007 eqos->mii = eth_phy_get_mdio_bus(dev);
2008#endif
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002009 if (!eqos->mii) {
Ye Li6a895d02020-05-03 22:41:15 +08002010 eqos->mii = mdio_alloc();
2011 if (!eqos->mii) {
2012 pr_err("mdio_alloc() failed");
2013 ret = -ENOMEM;
2014 goto err_remove_resources_tegra;
2015 }
2016 eqos->mii->read = eqos_mdio_read;
2017 eqos->mii->write = eqos_mdio_write;
2018 eqos->mii->priv = eqos;
2019 strcpy(eqos->mii->name, dev->name);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002020
Ye Li6a895d02020-05-03 22:41:15 +08002021 ret = mdio_register(eqos->mii);
2022 if (ret < 0) {
2023 pr_err("mdio_register() failed: %d", ret);
2024 goto err_free_mdio;
2025 }
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002026 }
2027
Ye Li6a895d02020-05-03 22:41:15 +08002028#ifdef CONFIG_DM_ETH_PHY
2029 eth_phy_set_mdio_bus(dev, eqos->mii);
2030#endif
2031
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002032 debug("%s: OK\n", __func__);
2033 return 0;
2034
2035err_free_mdio:
2036 mdio_free(eqos->mii);
2037err_remove_resources_tegra:
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002038 eqos->config->ops->eqos_remove_resources(dev);
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002039err_remove_resources_core:
2040 eqos_remove_resources_core(dev);
2041
2042 debug("%s: returns %d\n", __func__, ret);
2043 return ret;
2044}
2045
2046static int eqos_remove(struct udevice *dev)
2047{
2048 struct eqos_priv *eqos = dev_get_priv(dev);
2049
2050 debug("%s(dev=%p):\n", __func__, dev);
2051
2052 mdio_unregister(eqos->mii);
2053 mdio_free(eqos->mii);
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002054 eqos->config->ops->eqos_remove_resources(dev);
2055
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002056 eqos_probe_resources_core(dev);
2057
2058 debug("%s: OK\n", __func__);
2059 return 0;
2060}
2061
2062static const struct eth_ops eqos_ops = {
2063 .start = eqos_start,
2064 .stop = eqos_stop,
2065 .send = eqos_send,
2066 .recv = eqos_recv,
2067 .free_pkt = eqos_free_pkt,
2068 .write_hwaddr = eqos_write_hwaddr,
2069};
2070
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002071static struct eqos_ops eqos_tegra186_ops = {
2072 .eqos_inval_desc = eqos_inval_desc_tegra186,
2073 .eqos_flush_desc = eqos_flush_desc_tegra186,
2074 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
2075 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
2076 .eqos_probe_resources = eqos_probe_resources_tegra186,
2077 .eqos_remove_resources = eqos_remove_resources_tegra186,
2078 .eqos_stop_resets = eqos_stop_resets_tegra186,
2079 .eqos_start_resets = eqos_start_resets_tegra186,
2080 .eqos_stop_clks = eqos_stop_clks_tegra186,
2081 .eqos_start_clks = eqos_start_clks_tegra186,
2082 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2083 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
2084 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2085 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
2086};
2087
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002088static const struct eqos_config eqos_tegra186_config = {
2089 .reg_access_always_ok = false,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002090 .mdio_wait = 10,
2091 .swr_wait = 10,
2092 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2093 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2094 .interface = eqos_get_interface_tegra186,
2095 .ops = &eqos_tegra186_ops
2096};
2097
2098static struct eqos_ops eqos_stm32_ops = {
Fugang Duan3a97da12020-05-03 22:41:17 +08002099 .eqos_inval_desc = eqos_inval_desc_generic,
2100 .eqos_flush_desc = eqos_flush_desc_generic,
2101 .eqos_inval_buffer = eqos_inval_buffer_generic,
2102 .eqos_flush_buffer = eqos_flush_buffer_generic,
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002103 .eqos_probe_resources = eqos_probe_resources_stm32,
2104 .eqos_remove_resources = eqos_remove_resources_stm32,
2105 .eqos_stop_resets = eqos_stop_resets_stm32,
2106 .eqos_start_resets = eqos_start_resets_stm32,
2107 .eqos_stop_clks = eqos_stop_clks_stm32,
2108 .eqos_start_clks = eqos_start_clks_stm32,
2109 .eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2110 .eqos_disable_calibration = eqos_disable_calibration_stm32,
2111 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2112 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
2113};
2114
2115static const struct eqos_config eqos_stm32_config = {
2116 .reg_access_always_ok = false,
2117 .mdio_wait = 10000,
2118 .swr_wait = 50,
2119 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2120 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2121 .interface = eqos_get_interface_stm32,
2122 .ops = &eqos_stm32_ops
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002123};
2124
Fugang Duan3a97da12020-05-03 22:41:17 +08002125static struct eqos_ops eqos_imx_ops = {
2126 .eqos_inval_desc = eqos_inval_desc_generic,
2127 .eqos_flush_desc = eqos_flush_desc_generic,
2128 .eqos_inval_buffer = eqos_inval_buffer_generic,
2129 .eqos_flush_buffer = eqos_flush_buffer_generic,
2130 .eqos_probe_resources = eqos_probe_resources_imx,
2131 .eqos_remove_resources = eqos_remove_resources_imx,
2132 .eqos_stop_resets = eqos_stop_resets_imx,
2133 .eqos_start_resets = eqos_start_resets_imx,
2134 .eqos_stop_clks = eqos_stop_clks_imx,
2135 .eqos_start_clks = eqos_start_clks_imx,
2136 .eqos_calibrate_pads = eqos_calibrate_pads_imx,
2137 .eqos_disable_calibration = eqos_disable_calibration_imx,
2138 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2139 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2140};
2141
2142struct eqos_config eqos_imx_config = {
2143 .reg_access_always_ok = false,
2144 .mdio_wait = 10000,
2145 .swr_wait = 50,
2146 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2147 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2148 .interface = eqos_get_interface_imx,
2149 .ops = &eqos_imx_ops
2150};
2151
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002152static const struct udevice_id eqos_ids[] = {
2153 {
2154 .compatible = "nvidia,tegra186-eqos",
2155 .data = (ulong)&eqos_tegra186_config
2156 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002157 {
2158 .compatible = "snps,dwmac-4.20a",
2159 .data = (ulong)&eqos_stm32_config
2160 },
Fugang Duan3a97da12020-05-03 22:41:17 +08002161 {
2162 .compatible = "fsl,imx-eqos",
2163 .data = (ulong)&eqos_imx_config
2164 },
Christophe Roullierac2d4ef2019-05-17 15:08:44 +02002165
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002166 { }
2167};
2168
2169U_BOOT_DRIVER(eth_eqos) = {
2170 .name = "eth_eqos",
2171 .id = UCLASS_ETH,
Fugang Duan3a97da12020-05-03 22:41:17 +08002172 .of_match = of_match_ptr(eqos_ids),
Stephen Warrenba4dfef2016-10-21 14:46:47 -06002173 .probe = eqos_probe,
2174 .remove = eqos_remove,
2175 .ops = &eqos_ops,
2176 .priv_auto_alloc_size = sizeof(struct eqos_priv),
2177 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
2178};