blob: 98883cd15b41b59deb09825a1e60fa51adc966c3 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut8ae51b62017-05-13 15:54:28 +02002/*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
Marek Vasut8ae51b62017-05-13 15:54:28 +02009 */
10
11#include <common.h>
Marek Vasut1fea9e22017-07-21 23:20:35 +020012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020014#include <dm.h>
15#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020017#include <miiphy.h>
18#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Simon Glasscd93d622020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060021#include <linux/delay.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020022#include <linux/mii.h>
23#include <wait_bit.h>
24#include <asm/io.h>
Marek Vasutbddb44e2017-09-15 21:11:15 +020025#include <asm/gpio.h>
Marek Vasut8ae51b62017-05-13 15:54:28 +020026
27/* Registers */
28#define RAVB_REG_CCC 0x000
29#define RAVB_REG_DBAT 0x004
30#define RAVB_REG_CSR 0x00C
31#define RAVB_REG_APSR 0x08C
32#define RAVB_REG_RCR 0x090
33#define RAVB_REG_TGC 0x300
34#define RAVB_REG_TCCR 0x304
35#define RAVB_REG_RIC0 0x360
36#define RAVB_REG_RIC1 0x368
37#define RAVB_REG_RIC2 0x370
38#define RAVB_REG_TIC 0x378
39#define RAVB_REG_ECMR 0x500
40#define RAVB_REG_RFLR 0x508
41#define RAVB_REG_ECSIPR 0x518
42#define RAVB_REG_PIR 0x520
43#define RAVB_REG_GECMR 0x5b0
44#define RAVB_REG_MAHR 0x5c0
45#define RAVB_REG_MALR 0x5c8
46
47#define CCC_OPC_CONFIG BIT(0)
48#define CCC_OPC_OPERATION BIT(1)
49#define CCC_BOC BIT(20)
50
51#define CSR_OPS 0x0000000F
52#define CSR_OPS_CONFIG BIT(1)
53
Marek Vasutef8c8782019-04-13 11:42:34 +020054#define APSR_TDM BIT(14)
55
Marek Vasut8ae51b62017-05-13 15:54:28 +020056#define TCCR_TSRQ0 BIT(0)
57
58#define RFLR_RFL_MIN 0x05EE
59
60#define PIR_MDI BIT(3)
61#define PIR_MDO BIT(2)
62#define PIR_MMD BIT(1)
63#define PIR_MDC BIT(0)
64
65#define ECMR_TRCCM BIT(26)
66#define ECMR_RZPF BIT(20)
67#define ECMR_PFR BIT(18)
68#define ECMR_RXF BIT(17)
69#define ECMR_RE BIT(6)
70#define ECMR_TE BIT(5)
71#define ECMR_DM BIT(1)
72#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
73
74/* DMA Descriptors */
75#define RAVB_NUM_BASE_DESC 16
76#define RAVB_NUM_TX_DESC 8
77#define RAVB_NUM_RX_DESC 8
78
79#define RAVB_TX_QUEUE_OFFSET 0
80#define RAVB_RX_QUEUE_OFFSET 4
81
82#define RAVB_DESC_DT(n) ((n) << 28)
83#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
84#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
85#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
86#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
87#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
88#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
89
90#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
91#define RAVB_DESC_DS_MASK 0xfff
92
93#define RAVB_RX_DESC_MSC_MC BIT(23)
94#define RAVB_RX_DESC_MSC_CEEF BIT(22)
95#define RAVB_RX_DESC_MSC_CRL BIT(21)
96#define RAVB_RX_DESC_MSC_FRE BIT(20)
97#define RAVB_RX_DESC_MSC_RTLF BIT(19)
98#define RAVB_RX_DESC_MSC_RTSF BIT(18)
99#define RAVB_RX_DESC_MSC_RFE BIT(17)
100#define RAVB_RX_DESC_MSC_CRC BIT(16)
101#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
102
103#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
104 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
105 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
106
107#define RAVB_TX_TIMEOUT_MS 1000
108
109struct ravb_desc {
110 u32 ctrl;
111 u32 dptr;
112};
113
114struct ravb_rxdesc {
115 struct ravb_desc data;
116 struct ravb_desc link;
117 u8 __pad[48];
118 u8 packet[PKTSIZE_ALIGN];
119};
120
121struct ravb_priv {
122 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
123 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
124 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
125 u32 rx_desc_idx;
126 u32 tx_desc_idx;
127
128 struct phy_device *phydev;
129 struct mii_dev *bus;
130 void __iomem *iobase;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200131 struct clk clk;
Marek Vasutbddb44e2017-09-15 21:11:15 +0200132 struct gpio_desc reset_gpio;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200133};
134
135static inline void ravb_flush_dcache(u32 addr, u32 len)
136{
137 flush_dcache_range(addr, addr + len);
138}
139
140static inline void ravb_invalidate_dcache(u32 addr, u32 len)
141{
142 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
143 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
144 invalidate_dcache_range(start, end);
145}
146
147static int ravb_send(struct udevice *dev, void *packet, int len)
148{
149 struct ravb_priv *eth = dev_get_priv(dev);
150 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
151 unsigned int start;
152
153 /* Update TX descriptor */
154 ravb_flush_dcache((uintptr_t)packet, len);
155 memset(desc, 0x0, sizeof(*desc));
156 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
157 desc->dptr = (uintptr_t)packet;
158 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
159
160 /* Restart the transmitter if disabled */
161 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
162 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
163
164 /* Wait until packet is transmitted */
165 start = get_timer(0);
166 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
167 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
168 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
169 break;
170 udelay(10);
171 };
172
173 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
174 return -ETIMEDOUT;
175
176 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
177 return 0;
178}
179
180static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
181{
182 struct ravb_priv *eth = dev_get_priv(dev);
183 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
184 int len;
185 u8 *packet;
186
187 /* Check if the rx descriptor is ready */
188 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
189 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
190 return -EAGAIN;
191
192 /* Check for errors */
193 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
194 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
195 return -EAGAIN;
196 }
197
198 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
199 packet = (u8 *)(uintptr_t)desc->data.dptr;
200 ravb_invalidate_dcache((uintptr_t)packet, len);
201
202 *packetp = packet;
203 return len;
204}
205
206static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
207{
208 struct ravb_priv *eth = dev_get_priv(dev);
209 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
210
211 /* Make current descriptor available again */
212 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
213 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
214
215 /* Point to the next descriptor */
216 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
217 desc = &eth->rx_desc[eth->rx_desc_idx];
218 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
219
220 return 0;
221}
222
223static int ravb_reset(struct udevice *dev)
224{
225 struct ravb_priv *eth = dev_get_priv(dev);
226
227 /* Set config mode */
228 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
229
230 /* Check the operating mode is changed to the config mode. */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100231 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
232 CSR_OPS_CONFIG, true, 100, true);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200233}
234
235static void ravb_base_desc_init(struct ravb_priv *eth)
236{
237 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
238 int i;
239
240 /* Initialize all descriptors */
241 memset(eth->base_desc, 0x0, desc_size);
242
243 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
244 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
245
246 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
247
248 /* Register the descriptor base address table */
249 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
250}
251
252static void ravb_tx_desc_init(struct ravb_priv *eth)
253{
254 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
255 int i;
256
257 /* Initialize all descriptors */
258 memset(eth->tx_desc, 0x0, desc_size);
259 eth->tx_desc_idx = 0;
260
261 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
262 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
263
264 /* Mark the end of the descriptors */
265 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
266 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
267 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
268
269 /* Point the controller to the TX descriptor list. */
270 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
271 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
272 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
273 sizeof(struct ravb_desc));
274}
275
276static void ravb_rx_desc_init(struct ravb_priv *eth)
277{
278 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
279 int i;
280
281 /* Initialize all descriptors */
282 memset(eth->rx_desc, 0x0, desc_size);
283 eth->rx_desc_idx = 0;
284
285 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
286 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
287 RAVB_DESC_DS(PKTSIZE_ALIGN);
288 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
289
290 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
291 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
292 }
293
294 /* Mark the end of the descriptors */
295 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
296 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
297 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
298
299 /* Point the controller to the rx descriptor list */
300 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
301 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
302 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
303 sizeof(struct ravb_desc));
304}
305
306static int ravb_phy_config(struct udevice *dev)
307{
308 struct ravb_priv *eth = dev_get_priv(dev);
309 struct eth_pdata *pdata = dev_get_platdata(dev);
310 struct phy_device *phydev;
Marek Vasute821a7b2017-07-21 23:20:34 +0200311 int mask = 0xffffffff, reg;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200312
Marek Vasutbddb44e2017-09-15 21:11:15 +0200313 if (dm_gpio_is_valid(&eth->reset_gpio)) {
314 dm_gpio_set_value(&eth->reset_gpio, 1);
315 mdelay(20);
316 dm_gpio_set_value(&eth->reset_gpio, 0);
317 mdelay(1);
318 }
319
Marek Vasute821a7b2017-07-21 23:20:34 +0200320 phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200321 if (!phydev)
322 return -ENODEV;
323
Marek Vasute821a7b2017-07-21 23:20:34 +0200324 phy_connect_dev(phydev, dev);
325
Marek Vasut8ae51b62017-05-13 15:54:28 +0200326 eth->phydev = phydev;
327
Marek Vasut536fb5d2018-06-18 05:44:53 +0200328 phydev->supported &= SUPPORTED_100baseT_Full |
329 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
330 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
331 SUPPORTED_Asym_Pause;
332
Marek Vasut8ae51b62017-05-13 15:54:28 +0200333 if (pdata->max_speed != 1000) {
Marek Vasut536fb5d2018-06-18 05:44:53 +0200334 phydev->supported &= ~SUPPORTED_1000baseT_Full;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200335 reg = phy_read(phydev, -1, MII_CTRL1000);
336 reg &= ~(BIT(9) | BIT(8));
337 phy_write(phydev, -1, MII_CTRL1000, reg);
338 }
339
340 phy_config(phydev);
341
342 return 0;
343}
344
345/* Set Mac address */
346static int ravb_write_hwaddr(struct udevice *dev)
347{
348 struct ravb_priv *eth = dev_get_priv(dev);
349 struct eth_pdata *pdata = dev_get_platdata(dev);
350 unsigned char *mac = pdata->enetaddr;
351
352 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
353 eth->iobase + RAVB_REG_MAHR);
354
355 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
356
357 return 0;
358}
359
360/* E-MAC init function */
361static int ravb_mac_init(struct ravb_priv *eth)
362{
363 /* Disable MAC Interrupt */
364 writel(0, eth->iobase + RAVB_REG_ECSIPR);
365
366 /* Recv frame limit set register */
367 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
368
369 return 0;
370}
371
372/* AVB-DMAC init function */
373static int ravb_dmac_init(struct udevice *dev)
374{
375 struct ravb_priv *eth = dev_get_priv(dev);
376 struct eth_pdata *pdata = dev_get_platdata(dev);
377 int ret = 0;
378
379 /* Set CONFIG mode */
380 ret = ravb_reset(dev);
381 if (ret)
382 return ret;
383
384 /* Disable all interrupts */
385 writel(0, eth->iobase + RAVB_REG_RIC0);
386 writel(0, eth->iobase + RAVB_REG_RIC1);
387 writel(0, eth->iobase + RAVB_REG_RIC2);
388 writel(0, eth->iobase + RAVB_REG_TIC);
389
390 /* Set little endian */
391 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
392
393 /* AVB rx set */
394 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
395
396 /* FIFO size set */
397 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
398
Marek Vasutef8c8782019-04-13 11:42:34 +0200399 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
400 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
401 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
402 return 0;
403
404 if ((pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
405 (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID))
406 writel(APSR_TDM, eth->iobase + RAVB_REG_APSR);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200407
408 return 0;
409}
410
411static int ravb_config(struct udevice *dev)
412{
413 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasutd64c7892018-02-13 17:21:15 +0100414 struct phy_device *phy = eth->phydev;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200415 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
416 int ret;
417
418 /* Configure AVB-DMAC register */
419 ravb_dmac_init(dev);
420
421 /* Configure E-MAC registers */
422 ravb_mac_init(eth);
423 ravb_write_hwaddr(dev);
424
Marek Vasut8ae51b62017-05-13 15:54:28 +0200425 ret = phy_startup(phy);
426 if (ret)
427 return ret;
428
429 /* Set the transfer speed */
430 if (phy->speed == 100)
431 writel(0, eth->iobase + RAVB_REG_GECMR);
432 else if (phy->speed == 1000)
433 writel(1, eth->iobase + RAVB_REG_GECMR);
434
435 /* Check if full duplex mode is supported by the phy */
436 if (phy->duplex)
437 mask |= ECMR_DM;
438
439 writel(mask, eth->iobase + RAVB_REG_ECMR);
440
Marek Vasut8ae51b62017-05-13 15:54:28 +0200441 return 0;
442}
443
Marek Vasute3105ea2018-01-19 23:58:32 +0100444static int ravb_start(struct udevice *dev)
Marek Vasut8ae51b62017-05-13 15:54:28 +0200445{
446 struct ravb_priv *eth = dev_get_priv(dev);
447 int ret;
448
Marek Vasut1fea9e22017-07-21 23:20:35 +0200449 ret = ravb_reset(dev);
450 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200451 return ret;
Marek Vasut1fea9e22017-07-21 23:20:35 +0200452
Marek Vasut8ae51b62017-05-13 15:54:28 +0200453 ravb_base_desc_init(eth);
454 ravb_tx_desc_init(eth);
455 ravb_rx_desc_init(eth);
456
457 ret = ravb_config(dev);
458 if (ret)
Marek Vasutc4a8d9c2018-06-18 09:35:45 +0200459 return ret;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200460
461 /* Setting the control will start the AVB-DMAC process. */
462 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
463
464 return 0;
465}
466
467static void ravb_stop(struct udevice *dev)
468{
Marek Vasut1fea9e22017-07-21 23:20:35 +0200469 struct ravb_priv *eth = dev_get_priv(dev);
470
Marek Vasutd64c7892018-02-13 17:21:15 +0100471 phy_shutdown(eth->phydev);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200472 ravb_reset(dev);
473}
474
475static int ravb_probe(struct udevice *dev)
476{
477 struct eth_pdata *pdata = dev_get_platdata(dev);
478 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasut701db6e2018-06-18 04:02:15 +0200479 struct ofnode_phandle_args phandle_args;
Marek Vasut8ae51b62017-05-13 15:54:28 +0200480 struct mii_dev *mdiodev;
481 void __iomem *iobase;
482 int ret;
483
484 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
485 eth->iobase = iobase;
486
Marek Vasut1fea9e22017-07-21 23:20:35 +0200487 ret = clk_get_by_index(dev, 0, &eth->clk);
488 if (ret < 0)
489 goto err_mdio_alloc;
490
Marek Vasut701db6e2018-06-18 04:02:15 +0200491 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
492 if (!ret) {
493 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
494 &eth->reset_gpio, GPIOD_IS_OUT);
495 }
496
497 if (!dm_gpio_is_valid(&eth->reset_gpio)) {
498 gpio_request_by_name(dev, "reset-gpios", 0, &eth->reset_gpio,
499 GPIOD_IS_OUT);
500 }
Marek Vasutbddb44e2017-09-15 21:11:15 +0200501
Marek Vasut8ae51b62017-05-13 15:54:28 +0200502 mdiodev = mdio_alloc();
503 if (!mdiodev) {
504 ret = -ENOMEM;
505 goto err_mdio_alloc;
506 }
507
508 mdiodev->read = bb_miiphy_read;
509 mdiodev->write = bb_miiphy_write;
510 bb_miiphy_buses[0].priv = eth;
511 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
512
513 ret = mdio_register(mdiodev);
514 if (ret < 0)
515 goto err_mdio_register;
516
517 eth->bus = miiphy_get_dev_by_name(dev->name);
518
Marek Vasutd64c7892018-02-13 17:21:15 +0100519 /* Bring up PHY */
520 ret = clk_enable(&eth->clk);
521 if (ret)
522 goto err_mdio_register;
523
524 ret = ravb_reset(dev);
525 if (ret)
526 goto err_mdio_reset;
527
528 ret = ravb_phy_config(dev);
529 if (ret)
530 goto err_mdio_reset;
531
Marek Vasut8ae51b62017-05-13 15:54:28 +0200532 return 0;
533
Marek Vasutd64c7892018-02-13 17:21:15 +0100534err_mdio_reset:
535 clk_disable(&eth->clk);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200536err_mdio_register:
537 mdio_free(mdiodev);
538err_mdio_alloc:
539 unmap_physmem(eth->iobase, MAP_NOCACHE);
540 return ret;
541}
542
543static int ravb_remove(struct udevice *dev)
544{
545 struct ravb_priv *eth = dev_get_priv(dev);
546
Marek Vasutd64c7892018-02-13 17:21:15 +0100547 clk_disable(&eth->clk);
548
Marek Vasut8ae51b62017-05-13 15:54:28 +0200549 free(eth->phydev);
550 mdio_unregister(eth->bus);
551 mdio_free(eth->bus);
Marek Vasut90997cd2017-11-09 22:49:19 +0100552 if (dm_gpio_is_valid(&eth->reset_gpio))
553 dm_gpio_free(dev, &eth->reset_gpio);
Marek Vasut8ae51b62017-05-13 15:54:28 +0200554 unmap_physmem(eth->iobase, MAP_NOCACHE);
555
556 return 0;
557}
558
559int ravb_bb_init(struct bb_miiphy_bus *bus)
560{
561 return 0;
562}
563
564int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
565{
566 struct ravb_priv *eth = bus->priv;
567
568 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
569
570 return 0;
571}
572
573int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
574{
575 struct ravb_priv *eth = bus->priv;
576
577 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
578
579 return 0;
580}
581
582int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
583{
584 struct ravb_priv *eth = bus->priv;
585
586 if (v)
587 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
588 else
589 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
590
591 return 0;
592}
593
594int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
595{
596 struct ravb_priv *eth = bus->priv;
597
598 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
599
600 return 0;
601}
602
603int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
604{
605 struct ravb_priv *eth = bus->priv;
606
607 if (v)
608 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
609 else
610 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
611
612 return 0;
613}
614
615int ravb_bb_delay(struct bb_miiphy_bus *bus)
616{
617 udelay(10);
618
619 return 0;
620}
621
622struct bb_miiphy_bus bb_miiphy_buses[] = {
623 {
624 .name = "ravb",
625 .init = ravb_bb_init,
626 .mdio_active = ravb_bb_mdio_active,
627 .mdio_tristate = ravb_bb_mdio_tristate,
628 .set_mdio = ravb_bb_set_mdio,
629 .get_mdio = ravb_bb_get_mdio,
630 .set_mdc = ravb_bb_set_mdc,
631 .delay = ravb_bb_delay,
632 },
633};
634int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
635
636static const struct eth_ops ravb_ops = {
637 .start = ravb_start,
638 .send = ravb_send,
639 .recv = ravb_recv,
640 .free_pkt = ravb_free_pkt,
641 .stop = ravb_stop,
642 .write_hwaddr = ravb_write_hwaddr,
643};
644
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200645int ravb_ofdata_to_platdata(struct udevice *dev)
646{
647 struct eth_pdata *pdata = dev_get_platdata(dev);
648 const char *phy_mode;
649 const fdt32_t *cell;
650 int ret = 0;
651
Masahiro Yamada25484932020-07-17 14:36:48 +0900652 pdata->iobase = dev_read_addr(dev);
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200653 pdata->phy_interface = -1;
654 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
655 NULL);
656 if (phy_mode)
657 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
658 if (pdata->phy_interface == -1) {
659 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
660 return -EINVAL;
661 }
662
663 pdata->max_speed = 1000;
664 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
665 if (cell)
666 pdata->max_speed = fdt32_to_cpu(*cell);
667
668 sprintf(bb_miiphy_buses[0].name, dev->name);
669
670 return ret;
671}
672
673static const struct udevice_id ravb_ids[] = {
674 { .compatible = "renesas,etheravb-r8a7795" },
675 { .compatible = "renesas,etheravb-r8a7796" },
Marek Vasut7a7081e2018-02-26 10:35:15 +0100676 { .compatible = "renesas,etheravb-r8a77965" },
Marek Vasutdc3bb3d2017-10-21 11:33:17 +0200677 { .compatible = "renesas,etheravb-r8a77970" },
Marek Vasut34f1dba2018-04-26 13:20:10 +0200678 { .compatible = "renesas,etheravb-r8a77990" },
Marek Vasut9e4a6372017-10-21 11:35:49 +0200679 { .compatible = "renesas,etheravb-r8a77995" },
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200680 { .compatible = "renesas,etheravb-rcar-gen3" },
681 { }
682};
683
Marek Vasut8ae51b62017-05-13 15:54:28 +0200684U_BOOT_DRIVER(eth_ravb) = {
685 .name = "ravb",
686 .id = UCLASS_ETH,
Marek Vasut5ee8b4d2017-07-21 23:20:33 +0200687 .of_match = ravb_ids,
688 .ofdata_to_platdata = ravb_ofdata_to_platdata,
Marek Vasut8ae51b62017-05-13 15:54:28 +0200689 .probe = ravb_probe,
690 .remove = ravb_remove,
691 .ops = &ravb_ops,
692 .priv_auto_alloc_size = sizeof(struct ravb_priv),
693 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
694 .flags = DM_FLAG_ALLOC_PRIV_DMA,
695};