blob: c28680565fc934a4244e30c3d0aa837a4dbccdd8 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut8ae51b62017-05-13 15:54:28 +02002/*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
Marek Vasut8ae51b62017-05-13 15:54:28 +02009 */
10
11#include <common.h>
Marek Vasut1fea9e22017-07-21 23:20:35 +020012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020014#include <dm.h>
15#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020017#include <miiphy.h>
18#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Simon Glasscd93d622020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060021#include <linux/delay.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020022#include <linux/mii.h>
23#include <wait_bit.h>
24#include <asm/io.h>
Simon Glass401d1c42020-10-30 21:38:53 -060025#include <asm/global_data.h>
Marek Vasutbddb44e2017-09-15 21:11:15 +020026#include <asm/gpio.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020027
28/* Registers */
29#define RAVB_REG_CCC 0x000
30#define RAVB_REG_DBAT 0x004
31#define RAVB_REG_CSR 0x00C
32#define RAVB_REG_APSR 0x08C
33#define RAVB_REG_RCR 0x090
34#define RAVB_REG_TGC 0x300
35#define RAVB_REG_TCCR 0x304
36#define RAVB_REG_RIC0 0x360
37#define RAVB_REG_RIC1 0x368
38#define RAVB_REG_RIC2 0x370
39#define RAVB_REG_TIC 0x378
40#define RAVB_REG_ECMR 0x500
41#define RAVB_REG_RFLR 0x508
42#define RAVB_REG_ECSIPR 0x518
43#define RAVB_REG_PIR 0x520
44#define RAVB_REG_GECMR 0x5b0
45#define RAVB_REG_MAHR 0x5c0
46#define RAVB_REG_MALR 0x5c8
47
48#define CCC_OPC_CONFIG BIT(0)
49#define CCC_OPC_OPERATION BIT(1)
50#define CCC_BOC BIT(20)
51
52#define CSR_OPS 0x0000000F
53#define CSR_OPS_CONFIG BIT(1)
54
Adam Forda26c2b12022-02-25 14:32:52 -060055#define APSR_RDM BIT(13)
Marek Vasutef8c8782019-04-13 11:42:34 +020056#define APSR_TDM BIT(14)
57
Marek Vasut8ae51b62017-05-13 15:54:28 +020058#define TCCR_TSRQ0 BIT(0)
59
60#define RFLR_RFL_MIN 0x05EE
61
62#define PIR_MDI BIT(3)
63#define PIR_MDO BIT(2)
64#define PIR_MMD BIT(1)
65#define PIR_MDC BIT(0)
66
67#define ECMR_TRCCM BIT(26)
68#define ECMR_RZPF BIT(20)
69#define ECMR_PFR BIT(18)
70#define ECMR_RXF BIT(17)
71#define ECMR_RE BIT(6)
72#define ECMR_TE BIT(5)
73#define ECMR_DM BIT(1)
74#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
75
76/* DMA Descriptors */
77#define RAVB_NUM_BASE_DESC 16
78#define RAVB_NUM_TX_DESC 8
79#define RAVB_NUM_RX_DESC 8
80
81#define RAVB_TX_QUEUE_OFFSET 0
82#define RAVB_RX_QUEUE_OFFSET 4
83
84#define RAVB_DESC_DT(n) ((n) << 28)
85#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
86#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
87#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
88#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
89#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
90#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
91
92#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
93#define RAVB_DESC_DS_MASK 0xfff
94
95#define RAVB_RX_DESC_MSC_MC BIT(23)
96#define RAVB_RX_DESC_MSC_CEEF BIT(22)
97#define RAVB_RX_DESC_MSC_CRL BIT(21)
98#define RAVB_RX_DESC_MSC_FRE BIT(20)
99#define RAVB_RX_DESC_MSC_RTLF BIT(19)
100#define RAVB_RX_DESC_MSC_RTSF BIT(18)
101#define RAVB_RX_DESC_MSC_RFE BIT(17)
102#define RAVB_RX_DESC_MSC_CRC BIT(16)
103#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
104
105#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
106 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
107 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
108
109#define RAVB_TX_TIMEOUT_MS 1000
110
111struct ravb_desc {
112 u32 ctrl;
113 u32 dptr;
114};
115
116struct ravb_rxdesc {
117 struct ravb_desc data;
118 struct ravb_desc link;
119 u8 __pad[48];
120 u8 packet[PKTSIZE_ALIGN];
121};
122
123struct ravb_priv {
124 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
125 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
126 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
127 u32 rx_desc_idx;
128 u32 tx_desc_idx;
129
130 struct phy_device *phydev;
131 struct mii_dev *bus;
132 void __iomem *iobase;
Adam Ford182754f2021-12-06 10:29:26 -0600133 struct clk_bulk clks;
Marek Vasutbddb44e2017-09-15 21:11:15 +0200134 struct gpio_desc reset_gpio;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200135};
136
137static inline void ravb_flush_dcache(u32 addr, u32 len)
138{
139 flush_dcache_range(addr, addr + len);
140}
141
142static inline void ravb_invalidate_dcache(u32 addr, u32 len)
143{
144 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
145 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
146 invalidate_dcache_range(start, end);
147}
148
149static int ravb_send(struct udevice *dev, void *packet, int len)
150{
151 struct ravb_priv *eth = dev_get_priv(dev);
152 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
153 unsigned int start;
154
155 /* Update TX descriptor */
156 ravb_flush_dcache((uintptr_t)packet, len);
157 memset(desc, 0x0, sizeof(*desc));
158 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
159 desc->dptr = (uintptr_t)packet;
160 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
161
162 /* Restart the transmitter if disabled */
163 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
164 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
165
166 /* Wait until packet is transmitted */
167 start = get_timer(0);
168 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
169 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
170 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
171 break;
172 udelay(10);
173 };
174
175 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
176 return -ETIMEDOUT;
177
178 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
179 return 0;
180}
181
182static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
183{
184 struct ravb_priv *eth = dev_get_priv(dev);
185 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
186 int len;
187 u8 *packet;
188
189 /* Check if the rx descriptor is ready */
190 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
191 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
192 return -EAGAIN;
193
194 /* Check for errors */
195 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
196 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
197 return -EAGAIN;
198 }
199
200 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
201 packet = (u8 *)(uintptr_t)desc->data.dptr;
202 ravb_invalidate_dcache((uintptr_t)packet, len);
203
204 *packetp = packet;
205 return len;
206}
207
208static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
209{
210 struct ravb_priv *eth = dev_get_priv(dev);
211 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
212
213 /* Make current descriptor available again */
214 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
215 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
216
217 /* Point to the next descriptor */
218 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
219 desc = &eth->rx_desc[eth->rx_desc_idx];
220 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
221
222 return 0;
223}
224
225static int ravb_reset(struct udevice *dev)
226{
227 struct ravb_priv *eth = dev_get_priv(dev);
228
229 /* Set config mode */
230 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
231
232 /* Check the operating mode is changed to the config mode. */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100233 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
234 CSR_OPS_CONFIG, true, 100, true);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200235}
236
237static void ravb_base_desc_init(struct ravb_priv *eth)
238{
239 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
240 int i;
241
242 /* Initialize all descriptors */
243 memset(eth->base_desc, 0x0, desc_size);
244
245 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
246 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
247
248 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
249
250 /* Register the descriptor base address table */
251 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
252}
253
254static void ravb_tx_desc_init(struct ravb_priv *eth)
255{
256 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
257 int i;
258
259 /* Initialize all descriptors */
260 memset(eth->tx_desc, 0x0, desc_size);
261 eth->tx_desc_idx = 0;
262
263 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
264 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
265
266 /* Mark the end of the descriptors */
267 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
268 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
269 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
270
271 /* Point the controller to the TX descriptor list. */
272 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
273 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
274 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
275 sizeof(struct ravb_desc));
276}
277
278static void ravb_rx_desc_init(struct ravb_priv *eth)
279{
280 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
281 int i;
282
283 /* Initialize all descriptors */
284 memset(eth->rx_desc, 0x0, desc_size);
285 eth->rx_desc_idx = 0;
286
287 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
288 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
289 RAVB_DESC_DS(PKTSIZE_ALIGN);
290 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
291
292 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
293 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
294 }
295
296 /* Mark the end of the descriptors */
297 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
298 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
299 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
300
301 /* Point the controller to the rx descriptor list */
302 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
303 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
304 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
305 sizeof(struct ravb_desc));
306}
307
308static int ravb_phy_config(struct udevice *dev)
309{
310 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700311 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200312 struct phy_device *phydev;
Marek Vasute821a7b2017-07-21 23:20:34 +0200313 int mask = 0xffffffff, reg;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200314
Marek Vasutbddb44e2017-09-15 21:11:15 +0200315 if (dm_gpio_is_valid(&eth->reset_gpio)) {
316 dm_gpio_set_value(&eth->reset_gpio, 1);
317 mdelay(20);
318 dm_gpio_set_value(&eth->reset_gpio, 0);
319 mdelay(1);
320 }
321
Marek Behúne24b58f2022-04-07 00:33:08 +0200322 phydev = phy_find_by_mask(eth->bus, mask);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200323 if (!phydev)
324 return -ENODEV;
325
Marek Behúne24b58f2022-04-07 00:33:08 +0200326 phy_connect_dev(phydev, dev, pdata->phy_interface);
Marek Vasute821a7b2017-07-21 23:20:34 +0200327
Marek Vasut8ae51b62017-05-13 15:54:28 +0200328 eth->phydev = phydev;
329
Marek Vasut536fb5d2018-06-18 05:44:53 +0200330 phydev->supported &= SUPPORTED_100baseT_Full |
331 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
332 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
333 SUPPORTED_Asym_Pause;
334
Marek Vasut8ae51b62017-05-13 15:54:28 +0200335 if (pdata->max_speed != 1000) {
Marek Vasut536fb5d2018-06-18 05:44:53 +0200336 phydev->supported &= ~SUPPORTED_1000baseT_Full;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200337 reg = phy_read(phydev, -1, MII_CTRL1000);
338 reg &= ~(BIT(9) | BIT(8));
339 phy_write(phydev, -1, MII_CTRL1000, reg);
340 }
341
342 phy_config(phydev);
343
344 return 0;
345}
346
347/* Set Mac address */
348static int ravb_write_hwaddr(struct udevice *dev)
349{
350 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700351 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200352 unsigned char *mac = pdata->enetaddr;
353
354 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
355 eth->iobase + RAVB_REG_MAHR);
356
357 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
358
359 return 0;
360}
361
362/* E-MAC init function */
363static int ravb_mac_init(struct ravb_priv *eth)
364{
365 /* Disable MAC Interrupt */
366 writel(0, eth->iobase + RAVB_REG_ECSIPR);
367
368 /* Recv frame limit set register */
369 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
370
371 return 0;
372}
373
374/* AVB-DMAC init function */
375static int ravb_dmac_init(struct udevice *dev)
376{
377 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700378 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200379 int ret = 0;
Adam Forda26c2b12022-02-25 14:32:52 -0600380 int mode = 0;
381 unsigned int delay;
382 bool explicit_delay = false;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200383
384 /* Set CONFIG mode */
385 ret = ravb_reset(dev);
386 if (ret)
387 return ret;
388
389 /* Disable all interrupts */
390 writel(0, eth->iobase + RAVB_REG_RIC0);
391 writel(0, eth->iobase + RAVB_REG_RIC1);
392 writel(0, eth->iobase + RAVB_REG_RIC2);
393 writel(0, eth->iobase + RAVB_REG_TIC);
394
395 /* Set little endian */
396 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
397
398 /* AVB rx set */
399 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
400
401 /* FIFO size set */
402 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
403
Marek Vasutef8c8782019-04-13 11:42:34 +0200404 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
405 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
406 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
407 return 0;
408
Adam Forda26c2b12022-02-25 14:32:52 -0600409 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
410 /* Valid values are 0 and 1800, according to DT bindings */
411 if (delay) {
412 mode |= APSR_RDM;
413 explicit_delay = true;
414 }
415 }
416
417 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
418 /* Valid values are 0 and 2000, according to DT bindings */
419 if (delay) {
420 mode |= APSR_TDM;
421 explicit_delay = true;
422 }
423 }
424
425 if (!explicit_delay) {
426 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
427 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
428 mode |= APSR_RDM;
429
430 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
431 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
432 mode |= APSR_TDM;
433 }
434
435 writel(mode, eth->iobase + RAVB_REG_APSR);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200436
437 return 0;
438}
439
440static int ravb_config(struct udevice *dev)
441{
442 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasutd64c7892018-02-13 17:21:15 +0100443 struct phy_device *phy = eth->phydev;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200444 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
445 int ret;
446
447 /* Configure AVB-DMAC register */
448 ravb_dmac_init(dev);
449
450 /* Configure E-MAC registers */
451 ravb_mac_init(eth);
452 ravb_write_hwaddr(dev);
453
Marek Vasut8ae51b62017-05-13 15:54:28 +0200454 ret = phy_startup(phy);
455 if (ret)
456 return ret;
457
458 /* Set the transfer speed */
459 if (phy->speed == 100)
460 writel(0, eth->iobase + RAVB_REG_GECMR);
461 else if (phy->speed == 1000)
462 writel(1, eth->iobase + RAVB_REG_GECMR);
463
464 /* Check if full duplex mode is supported by the phy */
465 if (phy->duplex)
466 mask |= ECMR_DM;
467
468 writel(mask, eth->iobase + RAVB_REG_ECMR);
469
Marek Vasut8ae51b62017-05-13 15:54:28 +0200470 return 0;
471}
472
Marek Vasute3105ea2018-01-19 23:58:32 +0100473static int ravb_start(struct udevice *dev)
Marek Vasut8ae51b62017-05-13 15:54:28 +0200474{
475 struct ravb_priv *eth = dev_get_priv(dev);
476 int ret;
477
Marek Vasut1fea9e22017-07-21 23:20:35 +0200478 ret = ravb_reset(dev);
479 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200480 return ret;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200481
Marek Vasut8ae51b62017-05-13 15:54:28 +0200482 ravb_base_desc_init(eth);
483 ravb_tx_desc_init(eth);
484 ravb_rx_desc_init(eth);
485
486 ret = ravb_config(dev);
487 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200488 return ret;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200489
490 /* Setting the control will start the AVB-DMAC process. */
491 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
492
493 return 0;
494}
495
496static void ravb_stop(struct udevice *dev)
497{
Marek Vasut1fea9e22017-07-21 23:20:35 +0200498 struct ravb_priv *eth = dev_get_priv(dev);
499
Marek Vasutd64c7892018-02-13 17:21:15 +0100500 phy_shutdown(eth->phydev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200501 ravb_reset(dev);
502}
503
504static int ravb_probe(struct udevice *dev)
505{
Simon Glassc69cda22020-12-03 16:55:20 -0700506 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200507 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasut701db6e2018-06-18 04:02:15 +0200508 struct ofnode_phandle_args phandle_args;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200509 struct mii_dev *mdiodev;
510 void __iomem *iobase;
511 int ret;
512
513 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
514 eth->iobase = iobase;
515
Adam Ford182754f2021-12-06 10:29:26 -0600516 ret = clk_get_bulk(dev, &eth->clks);
Marek Vasut1fea9e22017-07-21 23:20:35 +0200517 if (ret < 0)
518 goto err_mdio_alloc;
519
Marek Vasut701db6e2018-06-18 04:02:15 +0200520 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
521 if (!ret) {
522 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
523 &eth->reset_gpio, GPIOD_IS_OUT);
524 }
525
526 if (!dm_gpio_is_valid(&eth->reset_gpio)) {
527 gpio_request_by_name(dev, "reset-gpios", 0, &eth->reset_gpio,
528 GPIOD_IS_OUT);
529 }
Marek Vasutbddb44e2017-09-15 21:11:15 +0200530
Marek Vasut8ae51b62017-05-13 15:54:28 +0200531 mdiodev = mdio_alloc();
532 if (!mdiodev) {
533 ret = -ENOMEM;
534 goto err_mdio_alloc;
535 }
536
537 mdiodev->read = bb_miiphy_read;
538 mdiodev->write = bb_miiphy_write;
539 bb_miiphy_buses[0].priv = eth;
540 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
541
542 ret = mdio_register(mdiodev);
543 if (ret < 0)
544 goto err_mdio_register;
545
546 eth->bus = miiphy_get_dev_by_name(dev->name);
547
Marek Vasutd64c7892018-02-13 17:21:15 +0100548 /* Bring up PHY */
Adam Ford182754f2021-12-06 10:29:26 -0600549 ret = clk_enable_bulk(&eth->clks);
Marek Vasutd64c7892018-02-13 17:21:15 +0100550 if (ret)
551 goto err_mdio_register;
552
553 ret = ravb_reset(dev);
554 if (ret)
555 goto err_mdio_reset;
556
557 ret = ravb_phy_config(dev);
558 if (ret)
559 goto err_mdio_reset;
560
Marek Vasut8ae51b62017-05-13 15:54:28 +0200561 return 0;
562
Marek Vasutd64c7892018-02-13 17:21:15 +0100563err_mdio_reset:
Adam Ford182754f2021-12-06 10:29:26 -0600564 clk_release_bulk(&eth->clks);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200565err_mdio_register:
566 mdio_free(mdiodev);
567err_mdio_alloc:
568 unmap_physmem(eth->iobase, MAP_NOCACHE);
569 return ret;
570}
571
572static int ravb_remove(struct udevice *dev)
573{
574 struct ravb_priv *eth = dev_get_priv(dev);
575
Adam Ford182754f2021-12-06 10:29:26 -0600576 clk_release_bulk(&eth->clks);
Marek Vasutd64c7892018-02-13 17:21:15 +0100577
Marek Vasut8ae51b62017-05-13 15:54:28 +0200578 free(eth->phydev);
579 mdio_unregister(eth->bus);
580 mdio_free(eth->bus);
Marek Vasut90997cd2017-11-09 22:49:19 +0100581 if (dm_gpio_is_valid(&eth->reset_gpio))
582 dm_gpio_free(dev, &eth->reset_gpio);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200583 unmap_physmem(eth->iobase, MAP_NOCACHE);
584
585 return 0;
586}
587
588int ravb_bb_init(struct bb_miiphy_bus *bus)
589{
590 return 0;
591}
592
593int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
594{
595 struct ravb_priv *eth = bus->priv;
596
597 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
598
599 return 0;
600}
601
602int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
603{
604 struct ravb_priv *eth = bus->priv;
605
606 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
607
608 return 0;
609}
610
611int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
612{
613 struct ravb_priv *eth = bus->priv;
614
615 if (v)
616 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
617 else
618 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
619
620 return 0;
621}
622
623int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
624{
625 struct ravb_priv *eth = bus->priv;
626
627 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
628
629 return 0;
630}
631
632int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
633{
634 struct ravb_priv *eth = bus->priv;
635
636 if (v)
637 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
638 else
639 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
640
641 return 0;
642}
643
644int ravb_bb_delay(struct bb_miiphy_bus *bus)
645{
646 udelay(10);
647
648 return 0;
649}
650
651struct bb_miiphy_bus bb_miiphy_buses[] = {
652 {
653 .name = "ravb",
654 .init = ravb_bb_init,
655 .mdio_active = ravb_bb_mdio_active,
656 .mdio_tristate = ravb_bb_mdio_tristate,
657 .set_mdio = ravb_bb_set_mdio,
658 .get_mdio = ravb_bb_get_mdio,
659 .set_mdc = ravb_bb_set_mdc,
660 .delay = ravb_bb_delay,
661 },
662};
663int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
664
665static const struct eth_ops ravb_ops = {
666 .start = ravb_start,
667 .send = ravb_send,
668 .recv = ravb_recv,
669 .free_pkt = ravb_free_pkt,
670 .stop = ravb_stop,
671 .write_hwaddr = ravb_write_hwaddr,
672};
673
Simon Glassd1998a92020-12-03 16:55:21 -0700674int ravb_of_to_plat(struct udevice *dev)
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200675{
Simon Glassc69cda22020-12-03 16:55:20 -0700676 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200677 const fdt32_t *cell;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200678
Masahiro Yamada25484932020-07-17 14:36:48 +0900679 pdata->iobase = dev_read_addr(dev);
Marek Behún123ca112022-04-07 00:33:01 +0200680
681 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behúnffb0f6f2022-04-07 00:33:03 +0200682 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200683 return -EINVAL;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200684
685 pdata->max_speed = 1000;
686 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
687 if (cell)
688 pdata->max_speed = fdt32_to_cpu(*cell);
689
690 sprintf(bb_miiphy_buses[0].name, dev->name);
691
Marek Behún123ca112022-04-07 00:33:01 +0200692 return 0;
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200693}
694
695static const struct udevice_id ravb_ids[] = {
696 { .compatible = "renesas,etheravb-r8a7795" },
697 { .compatible = "renesas,etheravb-r8a7796" },
Marek Vasut7a7081e2018-02-26 10:35:15 +0100698 { .compatible = "renesas,etheravb-r8a77965" },
Marek Vasutdc3bb3d2017-10-21 11:33:17 +0200699 { .compatible = "renesas,etheravb-r8a77970" },
Marek Vasut34f1dba2018-04-26 13:20:10 +0200700 { .compatible = "renesas,etheravb-r8a77990" },
Marek Vasut9e4a6372017-10-21 11:35:49 +0200701 { .compatible = "renesas,etheravb-r8a77995" },
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200702 { .compatible = "renesas,etheravb-rcar-gen3" },
703 { }
704};
705
Marek Vasut8ae51b62017-05-13 15:54:28 +0200706U_BOOT_DRIVER(eth_ravb) = {
707 .name = "ravb",
708 .id = UCLASS_ETH,
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200709 .of_match = ravb_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700710 .of_to_plat = ravb_of_to_plat,
Marek Vasut8ae51b62017-05-13 15:54:28 +0200711 .probe = ravb_probe,
712 .remove = ravb_remove,
713 .ops = &ravb_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700714 .priv_auto = sizeof(struct ravb_priv),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700715 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut8ae51b62017-05-13 15:54:28 +0200716 .flags = DM_FLAG_ALLOC_PRIV_DMA,
717};