blob: c74c8a81f9426a8630181f3868a1a35ef14b7b03 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut8ae51b62017-05-13 15:54:28 +02002/*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
Marek Vasut8ae51b62017-05-13 15:54:28 +02009 */
10
11#include <common.h>
Marek Vasut1fea9e22017-07-21 23:20:35 +020012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020014#include <dm.h>
15#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020017#include <miiphy.h>
18#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Simon Glasscd93d622020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060021#include <linux/delay.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020022#include <linux/mii.h>
23#include <wait_bit.h>
24#include <asm/io.h>
Simon Glass401d1c42020-10-30 21:38:53 -060025#include <asm/global_data.h>
Marek Vasutbddb44e2017-09-15 21:11:15 +020026#include <asm/gpio.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020027
28/* Registers */
29#define RAVB_REG_CCC 0x000
30#define RAVB_REG_DBAT 0x004
31#define RAVB_REG_CSR 0x00C
32#define RAVB_REG_APSR 0x08C
33#define RAVB_REG_RCR 0x090
34#define RAVB_REG_TGC 0x300
35#define RAVB_REG_TCCR 0x304
36#define RAVB_REG_RIC0 0x360
37#define RAVB_REG_RIC1 0x368
38#define RAVB_REG_RIC2 0x370
39#define RAVB_REG_TIC 0x378
40#define RAVB_REG_ECMR 0x500
41#define RAVB_REG_RFLR 0x508
42#define RAVB_REG_ECSIPR 0x518
43#define RAVB_REG_PIR 0x520
44#define RAVB_REG_GECMR 0x5b0
45#define RAVB_REG_MAHR 0x5c0
46#define RAVB_REG_MALR 0x5c8
47
48#define CCC_OPC_CONFIG BIT(0)
49#define CCC_OPC_OPERATION BIT(1)
50#define CCC_BOC BIT(20)
51
52#define CSR_OPS 0x0000000F
53#define CSR_OPS_CONFIG BIT(1)
54
Adam Forda26c2b12022-02-25 14:32:52 -060055#define APSR_RDM BIT(13)
Marek Vasutef8c8782019-04-13 11:42:34 +020056#define APSR_TDM BIT(14)
57
Marek Vasut8ae51b62017-05-13 15:54:28 +020058#define TCCR_TSRQ0 BIT(0)
59
60#define RFLR_RFL_MIN 0x05EE
61
62#define PIR_MDI BIT(3)
63#define PIR_MDO BIT(2)
64#define PIR_MMD BIT(1)
65#define PIR_MDC BIT(0)
66
67#define ECMR_TRCCM BIT(26)
68#define ECMR_RZPF BIT(20)
69#define ECMR_PFR BIT(18)
70#define ECMR_RXF BIT(17)
71#define ECMR_RE BIT(6)
72#define ECMR_TE BIT(5)
73#define ECMR_DM BIT(1)
74#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
75
76/* DMA Descriptors */
77#define RAVB_NUM_BASE_DESC 16
78#define RAVB_NUM_TX_DESC 8
79#define RAVB_NUM_RX_DESC 8
80
81#define RAVB_TX_QUEUE_OFFSET 0
82#define RAVB_RX_QUEUE_OFFSET 4
83
84#define RAVB_DESC_DT(n) ((n) << 28)
85#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
86#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
87#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
88#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
89#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
90#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
91
92#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
93#define RAVB_DESC_DS_MASK 0xfff
94
95#define RAVB_RX_DESC_MSC_MC BIT(23)
96#define RAVB_RX_DESC_MSC_CEEF BIT(22)
97#define RAVB_RX_DESC_MSC_CRL BIT(21)
98#define RAVB_RX_DESC_MSC_FRE BIT(20)
99#define RAVB_RX_DESC_MSC_RTLF BIT(19)
100#define RAVB_RX_DESC_MSC_RTSF BIT(18)
101#define RAVB_RX_DESC_MSC_RFE BIT(17)
102#define RAVB_RX_DESC_MSC_CRC BIT(16)
103#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
104
105#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
106 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
107 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
108
109#define RAVB_TX_TIMEOUT_MS 1000
110
111struct ravb_desc {
112 u32 ctrl;
113 u32 dptr;
114};
115
116struct ravb_rxdesc {
117 struct ravb_desc data;
118 struct ravb_desc link;
119 u8 __pad[48];
120 u8 packet[PKTSIZE_ALIGN];
121};
122
123struct ravb_priv {
124 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
125 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
126 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
127 u32 rx_desc_idx;
128 u32 tx_desc_idx;
129
130 struct phy_device *phydev;
131 struct mii_dev *bus;
132 void __iomem *iobase;
Adam Ford182754f2021-12-06 10:29:26 -0600133 struct clk_bulk clks;
Marek Vasutbddb44e2017-09-15 21:11:15 +0200134 struct gpio_desc reset_gpio;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200135};
136
137static inline void ravb_flush_dcache(u32 addr, u32 len)
138{
139 flush_dcache_range(addr, addr + len);
140}
141
142static inline void ravb_invalidate_dcache(u32 addr, u32 len)
143{
144 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
145 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
146 invalidate_dcache_range(start, end);
147}
148
149static int ravb_send(struct udevice *dev, void *packet, int len)
150{
151 struct ravb_priv *eth = dev_get_priv(dev);
152 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
153 unsigned int start;
154
155 /* Update TX descriptor */
156 ravb_flush_dcache((uintptr_t)packet, len);
157 memset(desc, 0x0, sizeof(*desc));
158 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
159 desc->dptr = (uintptr_t)packet;
160 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
161
162 /* Restart the transmitter if disabled */
163 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
164 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
165
166 /* Wait until packet is transmitted */
167 start = get_timer(0);
168 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
169 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
170 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
171 break;
172 udelay(10);
173 };
174
175 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
176 return -ETIMEDOUT;
177
178 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
179 return 0;
180}
181
182static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
183{
184 struct ravb_priv *eth = dev_get_priv(dev);
185 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
186 int len;
187 u8 *packet;
188
189 /* Check if the rx descriptor is ready */
190 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
191 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
192 return -EAGAIN;
193
194 /* Check for errors */
195 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
196 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
197 return -EAGAIN;
198 }
199
200 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
201 packet = (u8 *)(uintptr_t)desc->data.dptr;
202 ravb_invalidate_dcache((uintptr_t)packet, len);
203
204 *packetp = packet;
205 return len;
206}
207
208static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
209{
210 struct ravb_priv *eth = dev_get_priv(dev);
211 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
212
213 /* Make current descriptor available again */
214 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
215 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
216
217 /* Point to the next descriptor */
218 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
219 desc = &eth->rx_desc[eth->rx_desc_idx];
220 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
221
222 return 0;
223}
224
225static int ravb_reset(struct udevice *dev)
226{
227 struct ravb_priv *eth = dev_get_priv(dev);
228
229 /* Set config mode */
230 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
231
232 /* Check the operating mode is changed to the config mode. */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100233 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
234 CSR_OPS_CONFIG, true, 100, true);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200235}
236
237static void ravb_base_desc_init(struct ravb_priv *eth)
238{
239 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
240 int i;
241
242 /* Initialize all descriptors */
243 memset(eth->base_desc, 0x0, desc_size);
244
245 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
246 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
247
248 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
249
250 /* Register the descriptor base address table */
251 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
252}
253
254static void ravb_tx_desc_init(struct ravb_priv *eth)
255{
256 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
257 int i;
258
259 /* Initialize all descriptors */
260 memset(eth->tx_desc, 0x0, desc_size);
261 eth->tx_desc_idx = 0;
262
263 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
264 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
265
266 /* Mark the end of the descriptors */
267 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
268 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
269 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
270
271 /* Point the controller to the TX descriptor list. */
272 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
273 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
274 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
275 sizeof(struct ravb_desc));
276}
277
278static void ravb_rx_desc_init(struct ravb_priv *eth)
279{
280 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
281 int i;
282
283 /* Initialize all descriptors */
284 memset(eth->rx_desc, 0x0, desc_size);
285 eth->rx_desc_idx = 0;
286
287 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
288 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
289 RAVB_DESC_DS(PKTSIZE_ALIGN);
290 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
291
292 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
293 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
294 }
295
296 /* Mark the end of the descriptors */
297 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
298 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
299 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
300
301 /* Point the controller to the rx descriptor list */
302 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
303 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
304 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
305 sizeof(struct ravb_desc));
306}
307
308static int ravb_phy_config(struct udevice *dev)
309{
310 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700311 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200312 struct phy_device *phydev;
Mikhail Lappod797a8c2023-02-28 00:04:11 +0100313 int reg;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200314
Marek Vasutbddb44e2017-09-15 21:11:15 +0200315 if (dm_gpio_is_valid(&eth->reset_gpio)) {
316 dm_gpio_set_value(&eth->reset_gpio, 1);
317 mdelay(20);
318 dm_gpio_set_value(&eth->reset_gpio, 0);
319 mdelay(1);
320 }
321
Mikhail Lappod797a8c2023-02-28 00:04:11 +0100322 phydev = phy_connect(eth->bus, -1, dev, pdata->phy_interface);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200323 if (!phydev)
324 return -ENODEV;
325
326 eth->phydev = phydev;
327
Marek Vasut536fb5d2018-06-18 05:44:53 +0200328 phydev->supported &= SUPPORTED_100baseT_Full |
329 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
330 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
331 SUPPORTED_Asym_Pause;
332
Marek Vasut8ae51b62017-05-13 15:54:28 +0200333 if (pdata->max_speed != 1000) {
Marek Vasut536fb5d2018-06-18 05:44:53 +0200334 phydev->supported &= ~SUPPORTED_1000baseT_Full;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200335 reg = phy_read(phydev, -1, MII_CTRL1000);
336 reg &= ~(BIT(9) | BIT(8));
337 phy_write(phydev, -1, MII_CTRL1000, reg);
338 }
339
340 phy_config(phydev);
341
342 return 0;
343}
344
345/* Set Mac address */
346static int ravb_write_hwaddr(struct udevice *dev)
347{
348 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700349 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200350 unsigned char *mac = pdata->enetaddr;
351
352 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
353 eth->iobase + RAVB_REG_MAHR);
354
355 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
356
357 return 0;
358}
359
360/* E-MAC init function */
361static int ravb_mac_init(struct ravb_priv *eth)
362{
363 /* Disable MAC Interrupt */
364 writel(0, eth->iobase + RAVB_REG_ECSIPR);
365
366 /* Recv frame limit set register */
367 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
368
369 return 0;
370}
371
372/* AVB-DMAC init function */
373static int ravb_dmac_init(struct udevice *dev)
374{
375 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700376 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200377 int ret = 0;
Adam Forda26c2b12022-02-25 14:32:52 -0600378 int mode = 0;
379 unsigned int delay;
380 bool explicit_delay = false;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200381
382 /* Set CONFIG mode */
383 ret = ravb_reset(dev);
384 if (ret)
385 return ret;
386
387 /* Disable all interrupts */
388 writel(0, eth->iobase + RAVB_REG_RIC0);
389 writel(0, eth->iobase + RAVB_REG_RIC1);
390 writel(0, eth->iobase + RAVB_REG_RIC2);
391 writel(0, eth->iobase + RAVB_REG_TIC);
392
393 /* Set little endian */
394 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
395
396 /* AVB rx set */
397 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
398
399 /* FIFO size set */
400 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
401
Marek Vasutef8c8782019-04-13 11:42:34 +0200402 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
403 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
404 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
405 return 0;
406
Adam Forda26c2b12022-02-25 14:32:52 -0600407 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
408 /* Valid values are 0 and 1800, according to DT bindings */
409 if (delay) {
410 mode |= APSR_RDM;
411 explicit_delay = true;
412 }
413 }
414
415 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
416 /* Valid values are 0 and 2000, according to DT bindings */
417 if (delay) {
418 mode |= APSR_TDM;
419 explicit_delay = true;
420 }
421 }
422
423 if (!explicit_delay) {
424 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
425 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
426 mode |= APSR_RDM;
427
428 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
429 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
430 mode |= APSR_TDM;
431 }
432
433 writel(mode, eth->iobase + RAVB_REG_APSR);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200434
435 return 0;
436}
437
438static int ravb_config(struct udevice *dev)
439{
440 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasutd64c7892018-02-13 17:21:15 +0100441 struct phy_device *phy = eth->phydev;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200442 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
443 int ret;
444
445 /* Configure AVB-DMAC register */
446 ravb_dmac_init(dev);
447
448 /* Configure E-MAC registers */
449 ravb_mac_init(eth);
450 ravb_write_hwaddr(dev);
451
Marek Vasut8ae51b62017-05-13 15:54:28 +0200452 ret = phy_startup(phy);
453 if (ret)
454 return ret;
455
456 /* Set the transfer speed */
457 if (phy->speed == 100)
458 writel(0, eth->iobase + RAVB_REG_GECMR);
459 else if (phy->speed == 1000)
460 writel(1, eth->iobase + RAVB_REG_GECMR);
461
462 /* Check if full duplex mode is supported by the phy */
463 if (phy->duplex)
464 mask |= ECMR_DM;
465
466 writel(mask, eth->iobase + RAVB_REG_ECMR);
467
Marek Vasut8ae51b62017-05-13 15:54:28 +0200468 return 0;
469}
470
Marek Vasute3105ea2018-01-19 23:58:32 +0100471static int ravb_start(struct udevice *dev)
Marek Vasut8ae51b62017-05-13 15:54:28 +0200472{
473 struct ravb_priv *eth = dev_get_priv(dev);
474 int ret;
475
Marek Vasut1fea9e22017-07-21 23:20:35 +0200476 ret = ravb_reset(dev);
477 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200478 return ret;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200479
Marek Vasut8ae51b62017-05-13 15:54:28 +0200480 ravb_base_desc_init(eth);
481 ravb_tx_desc_init(eth);
482 ravb_rx_desc_init(eth);
483
484 ret = ravb_config(dev);
485 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200486 return ret;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200487
488 /* Setting the control will start the AVB-DMAC process. */
489 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
490
491 return 0;
492}
493
494static void ravb_stop(struct udevice *dev)
495{
Marek Vasut1fea9e22017-07-21 23:20:35 +0200496 struct ravb_priv *eth = dev_get_priv(dev);
497
Marek Vasutd64c7892018-02-13 17:21:15 +0100498 phy_shutdown(eth->phydev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200499 ravb_reset(dev);
500}
501
502static int ravb_probe(struct udevice *dev)
503{
Simon Glassc69cda22020-12-03 16:55:20 -0700504 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200505 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasut701db6e2018-06-18 04:02:15 +0200506 struct ofnode_phandle_args phandle_args;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200507 struct mii_dev *mdiodev;
508 void __iomem *iobase;
509 int ret;
510
511 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
512 eth->iobase = iobase;
513
Adam Ford182754f2021-12-06 10:29:26 -0600514 ret = clk_get_bulk(dev, &eth->clks);
Marek Vasut1fea9e22017-07-21 23:20:35 +0200515 if (ret < 0)
516 goto err_mdio_alloc;
517
Marek Vasut701db6e2018-06-18 04:02:15 +0200518 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
519 if (!ret) {
520 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
521 &eth->reset_gpio, GPIOD_IS_OUT);
522 }
523
524 if (!dm_gpio_is_valid(&eth->reset_gpio)) {
525 gpio_request_by_name(dev, "reset-gpios", 0, &eth->reset_gpio,
526 GPIOD_IS_OUT);
527 }
Marek Vasutbddb44e2017-09-15 21:11:15 +0200528
Marek Vasut8ae51b62017-05-13 15:54:28 +0200529 mdiodev = mdio_alloc();
530 if (!mdiodev) {
531 ret = -ENOMEM;
532 goto err_mdio_alloc;
533 }
534
535 mdiodev->read = bb_miiphy_read;
536 mdiodev->write = bb_miiphy_write;
537 bb_miiphy_buses[0].priv = eth;
538 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
539
540 ret = mdio_register(mdiodev);
541 if (ret < 0)
542 goto err_mdio_register;
543
544 eth->bus = miiphy_get_dev_by_name(dev->name);
545
Marek Vasutd64c7892018-02-13 17:21:15 +0100546 /* Bring up PHY */
Adam Ford182754f2021-12-06 10:29:26 -0600547 ret = clk_enable_bulk(&eth->clks);
Marek Vasutd64c7892018-02-13 17:21:15 +0100548 if (ret)
549 goto err_mdio_register;
550
551 ret = ravb_reset(dev);
552 if (ret)
553 goto err_mdio_reset;
554
555 ret = ravb_phy_config(dev);
556 if (ret)
557 goto err_mdio_reset;
558
Marek Vasut8ae51b62017-05-13 15:54:28 +0200559 return 0;
560
Marek Vasutd64c7892018-02-13 17:21:15 +0100561err_mdio_reset:
Adam Ford182754f2021-12-06 10:29:26 -0600562 clk_release_bulk(&eth->clks);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200563err_mdio_register:
564 mdio_free(mdiodev);
565err_mdio_alloc:
566 unmap_physmem(eth->iobase, MAP_NOCACHE);
567 return ret;
568}
569
570static int ravb_remove(struct udevice *dev)
571{
572 struct ravb_priv *eth = dev_get_priv(dev);
573
Adam Ford182754f2021-12-06 10:29:26 -0600574 clk_release_bulk(&eth->clks);
Marek Vasutd64c7892018-02-13 17:21:15 +0100575
Marek Vasut8ae51b62017-05-13 15:54:28 +0200576 free(eth->phydev);
577 mdio_unregister(eth->bus);
578 mdio_free(eth->bus);
Marek Vasut90997cd2017-11-09 22:49:19 +0100579 if (dm_gpio_is_valid(&eth->reset_gpio))
580 dm_gpio_free(dev, &eth->reset_gpio);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200581 unmap_physmem(eth->iobase, MAP_NOCACHE);
582
583 return 0;
584}
585
586int ravb_bb_init(struct bb_miiphy_bus *bus)
587{
588 return 0;
589}
590
591int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
592{
593 struct ravb_priv *eth = bus->priv;
594
595 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
596
597 return 0;
598}
599
600int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
601{
602 struct ravb_priv *eth = bus->priv;
603
604 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
605
606 return 0;
607}
608
609int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
610{
611 struct ravb_priv *eth = bus->priv;
612
613 if (v)
614 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
615 else
616 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
617
618 return 0;
619}
620
621int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
622{
623 struct ravb_priv *eth = bus->priv;
624
625 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
626
627 return 0;
628}
629
630int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
631{
632 struct ravb_priv *eth = bus->priv;
633
634 if (v)
635 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
636 else
637 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
638
639 return 0;
640}
641
642int ravb_bb_delay(struct bb_miiphy_bus *bus)
643{
644 udelay(10);
645
646 return 0;
647}
648
649struct bb_miiphy_bus bb_miiphy_buses[] = {
650 {
651 .name = "ravb",
652 .init = ravb_bb_init,
653 .mdio_active = ravb_bb_mdio_active,
654 .mdio_tristate = ravb_bb_mdio_tristate,
655 .set_mdio = ravb_bb_set_mdio,
656 .get_mdio = ravb_bb_get_mdio,
657 .set_mdc = ravb_bb_set_mdc,
658 .delay = ravb_bb_delay,
659 },
660};
661int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
662
663static const struct eth_ops ravb_ops = {
664 .start = ravb_start,
665 .send = ravb_send,
666 .recv = ravb_recv,
667 .free_pkt = ravb_free_pkt,
668 .stop = ravb_stop,
669 .write_hwaddr = ravb_write_hwaddr,
670};
671
Simon Glassd1998a92020-12-03 16:55:21 -0700672int ravb_of_to_plat(struct udevice *dev)
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200673{
Simon Glassc69cda22020-12-03 16:55:20 -0700674 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200675 const fdt32_t *cell;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200676
Masahiro Yamada25484932020-07-17 14:36:48 +0900677 pdata->iobase = dev_read_addr(dev);
Marek Behún123ca112022-04-07 00:33:01 +0200678
679 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behúnffb0f6f2022-04-07 00:33:03 +0200680 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200681 return -EINVAL;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200682
683 pdata->max_speed = 1000;
684 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
685 if (cell)
686 pdata->max_speed = fdt32_to_cpu(*cell);
687
688 sprintf(bb_miiphy_buses[0].name, dev->name);
689
Marek Behún123ca112022-04-07 00:33:01 +0200690 return 0;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200691}
692
693static const struct udevice_id ravb_ids[] = {
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200694 { .compatible = "renesas,etheravb-rcar-gen3" },
Hai Phamc2fbaaf2023-04-07 17:12:17 +0200695 { .compatible = "renesas,etheravb-rcar-gen4" },
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200696 { }
697};
698
Marek Vasut8ae51b62017-05-13 15:54:28 +0200699U_BOOT_DRIVER(eth_ravb) = {
700 .name = "ravb",
701 .id = UCLASS_ETH,
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200702 .of_match = ravb_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700703 .of_to_plat = ravb_of_to_plat,
Marek Vasut8ae51b62017-05-13 15:54:28 +0200704 .probe = ravb_probe,
705 .remove = ravb_remove,
706 .ops = &ravb_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700707 .priv_auto = sizeof(struct ravb_priv),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700708 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut8ae51b62017-05-13 15:54:28 +0200709 .flags = DM_FLAG_ALLOC_PRIV_DMA,
710};