blob: 5d92257e74d09c099ac0040bdec466a5b88fc667 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05302/*
3 * (C) Copyright 2010
4 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05305 */
6
7/*
Simon Glass64dcd252015-04-05 16:07:40 -06008 * Designware ethernet IP driver for U-Boot
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05309 */
10
11#include <common.h>
Patrice Chotardba1f9662017-11-29 09:06:11 +010012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass75577ba2015-04-05 16:07:41 -060014#include <dm.h>
Simon Glass64dcd252015-04-05 16:07:40 -060015#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053017#include <miiphy.h>
18#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <net.h>
Bin Meng8b7ee662015-09-11 03:24:35 -070020#include <pci.h>
Ley Foon Tan495c70f2018-06-14 18:45:23 +080021#include <reset.h>
Simon Glass90526e92020-05-10 11:39:56 -060022#include <asm/cache.h>
Simon Glass336d4612020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Neil Armstrong5160b452021-02-24 15:02:39 +010024#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070025#include <dm/devres.h>
Neil Armstrong5160b452021-02-24 15:02:39 +010026#include <dm/lists.h>
Stefan Roeseef760252012-05-07 12:04:25 +020027#include <linux/compiler.h>
Simon Glassc05ed002020-05-10 11:40:11 -060028#include <linux/delay.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053029#include <linux/err.h>
Florian Fainelli7a9ca9d2017-12-09 14:59:55 -080030#include <linux/kernel.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053031#include <asm/io.h>
Jacob Chen6ec922f2017-03-27 16:54:17 +080032#include <power/regulator.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053033#include "designware.h"
34
Alexey Brodkin92a190a2014-01-22 20:54:06 +040035static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
36{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010037#ifdef CONFIG_DM_ETH
38 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
39 struct eth_mac_regs *mac_p = priv->mac_regs_p;
40#else
Alexey Brodkin92a190a2014-01-22 20:54:06 +040041 struct eth_mac_regs *mac_p = bus->priv;
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010042#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +040043 ulong start;
44 u16 miiaddr;
45 int timeout = CONFIG_MDIO_TIMEOUT;
46
47 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
48 ((reg << MIIREGSHIFT) & MII_REGMSK);
49
50 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
51
52 start = get_timer(0);
53 while (get_timer(start) < timeout) {
54 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
55 return readl(&mac_p->miidata);
56 udelay(10);
57 };
58
Simon Glass64dcd252015-04-05 16:07:40 -060059 return -ETIMEDOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040060}
61
62static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
63 u16 val)
64{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010065#ifdef CONFIG_DM_ETH
66 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
67 struct eth_mac_regs *mac_p = priv->mac_regs_p;
68#else
Alexey Brodkin92a190a2014-01-22 20:54:06 +040069 struct eth_mac_regs *mac_p = bus->priv;
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010070#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +040071 ulong start;
72 u16 miiaddr;
Simon Glass64dcd252015-04-05 16:07:40 -060073 int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040074
75 writel(val, &mac_p->miidata);
76 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
77 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
78
79 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
80
81 start = get_timer(0);
82 while (get_timer(start) < timeout) {
83 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
84 ret = 0;
85 break;
86 }
87 udelay(10);
88 };
89
90 return ret;
91}
92
Simon Glassbcee8d62019-12-06 21:41:35 -070093#if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
Neil Armstrong98b82042021-04-21 10:58:01 +020094static int __dw_mdio_reset(struct udevice *dev)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010095{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010096 struct dw_eth_dev *priv = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -070097 struct dw_eth_pdata *pdata = dev_get_plat(dev);
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010098 int ret;
99
100 if (!dm_gpio_is_valid(&priv->reset_gpio))
101 return 0;
102
103 /* reset the phy */
104 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
105 if (ret)
106 return ret;
107
108 udelay(pdata->reset_delays[0]);
109
110 ret = dm_gpio_set_value(&priv->reset_gpio, 1);
111 if (ret)
112 return ret;
113
114 udelay(pdata->reset_delays[1]);
115
116 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
117 if (ret)
118 return ret;
119
120 udelay(pdata->reset_delays[2]);
121
122 return 0;
123}
Neil Armstrong98b82042021-04-21 10:58:01 +0200124
125static int dw_mdio_reset(struct mii_dev *bus)
126{
127 struct udevice *dev = bus->priv;
128
129 return __dw_mdio_reset(dev);
130}
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100131#endif
132
Neil Armstrong5160b452021-02-24 15:02:39 +0100133#if IS_ENABLED(CONFIG_DM_MDIO)
134int designware_eth_mdio_read(struct udevice *mdio_dev, int addr, int devad, int reg)
135{
136 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
137
138 return dw_mdio_read(pdata->mii_bus, addr, devad, reg);
139}
140
141int designware_eth_mdio_write(struct udevice *mdio_dev, int addr, int devad, int reg, u16 val)
142{
143 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
144
145 return dw_mdio_write(pdata->mii_bus, addr, devad, reg, val);
146}
147
148#if CONFIG_IS_ENABLED(DM_GPIO)
149int designware_eth_mdio_reset(struct udevice *mdio_dev)
150{
Neil Armstrong98b82042021-04-21 10:58:01 +0200151 struct mdio_perdev_priv *mdio_pdata = dev_get_uclass_priv(mdio_dev);
152 struct udevice *dev = mdio_pdata->mii_bus->priv;
Neil Armstrong5160b452021-02-24 15:02:39 +0100153
Neil Armstrong98b82042021-04-21 10:58:01 +0200154 return __dw_mdio_reset(dev->parent);
Neil Armstrong5160b452021-02-24 15:02:39 +0100155}
156#endif
157
158static const struct mdio_ops designware_eth_mdio_ops = {
159 .read = designware_eth_mdio_read,
160 .write = designware_eth_mdio_write,
161#if CONFIG_IS_ENABLED(DM_GPIO)
162 .reset = designware_eth_mdio_reset,
163#endif
164};
165
166static int designware_eth_mdio_probe(struct udevice *dev)
167{
168 /* Use the priv data of parent */
169 dev_set_priv(dev, dev_get_priv(dev->parent));
170
171 return 0;
172}
173
174U_BOOT_DRIVER(designware_eth_mdio) = {
175 .name = "eth_designware_mdio",
176 .id = UCLASS_MDIO,
177 .probe = designware_eth_mdio_probe,
178 .ops = &designware_eth_mdio_ops,
179 .plat_auto = sizeof(struct mdio_perdev_priv),
180};
181#endif
182
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100183static int dw_mdio_init(const char *name, void *priv)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400184{
185 struct mii_dev *bus = mdio_alloc();
186
187 if (!bus) {
188 printf("Failed to allocate MDIO bus\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600189 return -ENOMEM;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400190 }
191
192 bus->read = dw_mdio_read;
193 bus->write = dw_mdio_write;
Ben Whitten192bc692015-12-30 13:05:58 +0000194 snprintf(bus->name, sizeof(bus->name), "%s", name);
Simon Glassbcee8d62019-12-06 21:41:35 -0700195#if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100196 bus->reset = dw_mdio_reset;
197#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400198
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100199 bus->priv = priv;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400200
201 return mdio_register(bus);
202}
Vipin Kumar13edd172012-03-26 00:09:56 +0000203
Neil Armstrong5160b452021-02-24 15:02:39 +0100204#if IS_ENABLED(CONFIG_DM_MDIO)
205static int dw_dm_mdio_init(const char *name, void *priv)
206{
207 struct udevice *dev = priv;
208 ofnode node;
209 int ret;
210
211 ofnode_for_each_subnode(node, dev_ofnode(dev)) {
212 const char *subnode_name = ofnode_get_name(node);
213 struct udevice *mdiodev;
214
215 if (strcmp(subnode_name, "mdio"))
216 continue;
217
218 ret = device_bind_driver_to_node(dev, "eth_designware_mdio",
219 subnode_name, node, &mdiodev);
220 if (ret)
221 debug("%s: not able to bind mdio device node\n", __func__);
222
223 return 0;
224 }
225
226 printf("%s: mdio node is missing, registering legacy mdio bus", __func__);
227
228 return dw_mdio_init(name, priv);
229}
230#endif
231
Simon Glass64dcd252015-04-05 16:07:40 -0600232static void tx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530233{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530234 struct eth_dma_regs *dma_p = priv->dma_regs_p;
235 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
236 char *txbuffs = &priv->txbuffs[0];
237 struct dmamacdescr *desc_p;
238 u32 idx;
239
240 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
241 desc_p = &desc_table_p[idx];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200242 desc_p->dmamac_addr = (ulong)&txbuffs[idx * CONFIG_ETH_BUFSIZE];
243 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530244
245#if defined(CONFIG_DW_ALTDESCRIPTOR)
246 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
Marek Vasut2b261092015-12-20 03:59:23 +0100247 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
248 DESC_TXSTS_TXCHECKINSCTRL |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530249 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
250
251 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
252 desc_p->dmamac_cntl = 0;
253 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
254#else
255 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
256 desc_p->txrx_status = 0;
257#endif
258 }
259
260 /* Correcting the last pointer of the chain */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200261 desc_p->dmamac_next = (ulong)&desc_table_p[0];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530262
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400263 /* Flush all Tx buffer descriptors at once */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200264 flush_dcache_range((ulong)priv->tx_mac_descrtable,
265 (ulong)priv->tx_mac_descrtable +
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400266 sizeof(priv->tx_mac_descrtable));
267
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530268 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400269 priv->tx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530270}
271
Simon Glass64dcd252015-04-05 16:07:40 -0600272static void rx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530273{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530274 struct eth_dma_regs *dma_p = priv->dma_regs_p;
275 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
276 char *rxbuffs = &priv->rxbuffs[0];
277 struct dmamacdescr *desc_p;
278 u32 idx;
279
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400280 /* Before passing buffers to GMAC we need to make sure zeros
281 * written there right after "priv" structure allocation were
282 * flushed into RAM.
283 * Otherwise there's a chance to get some of them flushed in RAM when
284 * GMAC is already pushing data to RAM via DMA. This way incoming from
285 * GMAC data will be corrupted. */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200286 flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400287
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530288 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
289 desc_p = &desc_table_p[idx];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200290 desc_p->dmamac_addr = (ulong)&rxbuffs[idx * CONFIG_ETH_BUFSIZE];
291 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530292
293 desc_p->dmamac_cntl =
Marek Vasut2b261092015-12-20 03:59:23 +0100294 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530295 DESC_RXCTRL_RXCHAIN;
296
297 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
298 }
299
300 /* Correcting the last pointer of the chain */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200301 desc_p->dmamac_next = (ulong)&desc_table_p[0];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530302
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400303 /* Flush all Rx buffer descriptors at once */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200304 flush_dcache_range((ulong)priv->rx_mac_descrtable,
305 (ulong)priv->rx_mac_descrtable +
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400306 sizeof(priv->rx_mac_descrtable));
307
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530308 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400309 priv->rx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530310}
311
Simon Glass64dcd252015-04-05 16:07:40 -0600312static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530313{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530314 struct eth_mac_regs *mac_p = priv->mac_regs_p;
315 u32 macid_lo, macid_hi;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530316
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400317 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
318 (mac_id[3] << 24);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530319 macid_hi = mac_id[4] + (mac_id[5] << 8);
320
321 writel(macid_hi, &mac_p->macaddr0hi);
322 writel(macid_lo, &mac_p->macaddr0lo);
323
324 return 0;
325}
326
Simon Glass0ea38db2017-01-11 11:46:08 +0100327static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
328 struct phy_device *phydev)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400329{
330 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
331
332 if (!phydev->link) {
333 printf("%s: No link.\n", phydev->dev->name);
Simon Glass0ea38db2017-01-11 11:46:08 +0100334 return 0;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400335 }
336
337 if (phydev->speed != 1000)
338 conf |= MII_PORTSELECT;
Alexey Brodkinb884c3f2016-01-13 16:59:36 +0300339 else
340 conf &= ~MII_PORTSELECT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400341
342 if (phydev->speed == 100)
343 conf |= FES_100;
344
345 if (phydev->duplex)
346 conf |= FULLDPLXMODE;
347
348 writel(conf, &mac_p->conf);
349
350 printf("Speed: %d, %s duplex%s\n", phydev->speed,
351 (phydev->duplex) ? "full" : "half",
352 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
Simon Glass0ea38db2017-01-11 11:46:08 +0100353
354 return 0;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400355}
356
Simon Glass64dcd252015-04-05 16:07:40 -0600357static void _dw_eth_halt(struct dw_eth_dev *priv)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400358{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400359 struct eth_mac_regs *mac_p = priv->mac_regs_p;
360 struct eth_dma_regs *dma_p = priv->dma_regs_p;
361
362 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
363 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
364
365 phy_shutdown(priv->phydev);
366}
367
Simon Glasse72ced22017-01-11 11:46:10 +0100368int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530369{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530370 struct eth_mac_regs *mac_p = priv->mac_regs_p;
371 struct eth_dma_regs *dma_p = priv->dma_regs_p;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400372 unsigned int start;
Simon Glass64dcd252015-04-05 16:07:40 -0600373 int ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530374
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400375 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
Vipin Kumar13edd172012-03-26 00:09:56 +0000376
Quentin Schulzc6122192018-06-04 12:17:33 +0200377 /*
378 * When a MII PHY is used, we must set the PS bit for the DMA
379 * reset to succeed.
380 */
381 if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
382 writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
383 else
384 writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
385
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400386 start = get_timer(0);
387 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
Alexey Brodkin875143f2015-01-13 17:10:24 +0300388 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
389 printf("DMA reset timeout\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600390 return -ETIMEDOUT;
Alexey Brodkin875143f2015-01-13 17:10:24 +0300391 }
Stefan Roeseef760252012-05-07 12:04:25 +0200392
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400393 mdelay(100);
394 };
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530395
Bin Mengf3edfd32015-06-15 18:40:19 +0800396 /*
397 * Soft reset above clears HW address registers.
398 * So we have to set it here once again.
399 */
400 _dw_write_hwaddr(priv, enetaddr);
401
Simon Glass64dcd252015-04-05 16:07:40 -0600402 rx_descs_init(priv);
403 tx_descs_init(priv);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530404
Ian Campbell49692c52014-05-08 22:26:35 +0100405 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530406
Sonic Zhangd2279222015-01-29 14:38:50 +0800407#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400408 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
409 &dma_p->opmode);
Sonic Zhangd2279222015-01-29 14:38:50 +0800410#else
411 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
412 &dma_p->opmode);
413#endif
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530414
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400415 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530416
Sonic Zhang2ddaf132015-01-29 13:37:31 +0800417#ifdef CONFIG_DW_AXI_BURST_LEN
418 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
419#endif
420
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400421 /* Start up the PHY */
Simon Glass64dcd252015-04-05 16:07:40 -0600422 ret = phy_startup(priv->phydev);
423 if (ret) {
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400424 printf("Could not initialize PHY %s\n",
425 priv->phydev->dev->name);
Simon Glass64dcd252015-04-05 16:07:40 -0600426 return ret;
Vipin Kumar9afc1af2012-05-07 13:06:44 +0530427 }
428
Simon Glass0ea38db2017-01-11 11:46:08 +0100429 ret = dw_adjust_link(priv, mac_p, priv->phydev);
430 if (ret)
431 return ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530432
Simon Glassf63f28e2017-01-11 11:46:09 +0100433 return 0;
434}
435
Simon Glasse72ced22017-01-11 11:46:10 +0100436int designware_eth_enable(struct dw_eth_dev *priv)
Simon Glassf63f28e2017-01-11 11:46:09 +0100437{
438 struct eth_mac_regs *mac_p = priv->mac_regs_p;
439
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400440 if (!priv->phydev->link)
Simon Glass64dcd252015-04-05 16:07:40 -0600441 return -EIO;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530442
Armando Viscontiaa510052012-03-26 00:09:55 +0000443 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530444
445 return 0;
446}
447
Florian Fainelli7a9ca9d2017-12-09 14:59:55 -0800448#define ETH_ZLEN 60
449
Simon Glass64dcd252015-04-05 16:07:40 -0600450static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530451{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530452 struct eth_dma_regs *dma_p = priv->dma_regs_p;
453 u32 desc_num = priv->tx_currdescnum;
454 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200455 ulong desc_start = (ulong)desc_p;
456 ulong desc_end = desc_start +
Marek Vasut96cec172014-09-15 01:05:23 +0200457 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200458 ulong data_start = desc_p->dmamac_addr;
459 ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
Ian Campbell964ea7c2014-05-08 22:26:33 +0100460 /*
461 * Strictly we only need to invalidate the "txrx_status" field
462 * for the following check, but on some platforms we cannot
Marek Vasut96cec172014-09-15 01:05:23 +0200463 * invalidate only 4 bytes, so we flush the entire descriptor,
464 * which is 16 bytes in total. This is safe because the
465 * individual descriptors in the array are each aligned to
466 * ARCH_DMA_MINALIGN and padded appropriately.
Ian Campbell964ea7c2014-05-08 22:26:33 +0100467 */
Marek Vasut96cec172014-09-15 01:05:23 +0200468 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400469
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530470 /* Check if the descriptor is owned by CPU */
471 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
472 printf("CPU not owner of tx frame\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600473 return -EPERM;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530474 }
475
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200476 memcpy((void *)data_start, packet, length);
Simon Goldschmidt7efb75b2018-11-17 10:24:42 +0100477 if (length < ETH_ZLEN) {
478 memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
479 length = ETH_ZLEN;
480 }
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530481
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400482 /* Flush data to be sent */
Marek Vasut96cec172014-09-15 01:05:23 +0200483 flush_dcache_range(data_start, data_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400484
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530485#if defined(CONFIG_DW_ALTDESCRIPTOR)
486 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
Simon Goldschmidtae8ac8d2018-11-17 10:24:41 +0100487 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
488 ((length << DESC_TXCTRL_SIZE1SHFT) &
489 DESC_TXCTRL_SIZE1MASK);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530490
491 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
492 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
493#else
Simon Goldschmidtae8ac8d2018-11-17 10:24:41 +0100494 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
495 ((length << DESC_TXCTRL_SIZE1SHFT) &
496 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
497 DESC_TXCTRL_TXFIRST;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530498
499 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
500#endif
501
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400502 /* Flush modified buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200503 flush_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400504
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530505 /* Test the wrap-around condition. */
506 if (++desc_num >= CONFIG_TX_DESCR_NUM)
507 desc_num = 0;
508
509 priv->tx_currdescnum = desc_num;
510
511 /* Start the transmission */
512 writel(POLL_DATA, &dma_p->txpolldemand);
513
514 return 0;
515}
516
Simon Glass75577ba2015-04-05 16:07:41 -0600517static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530518{
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400519 u32 status, desc_num = priv->rx_currdescnum;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530520 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Simon Glass75577ba2015-04-05 16:07:41 -0600521 int length = -EAGAIN;
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200522 ulong desc_start = (ulong)desc_p;
523 ulong desc_end = desc_start +
Marek Vasut96cec172014-09-15 01:05:23 +0200524 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200525 ulong data_start = desc_p->dmamac_addr;
526 ulong data_end;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530527
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400528 /* Invalidate entire buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200529 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400530
531 status = desc_p->txrx_status;
532
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530533 /* Check if the owner is the CPU */
534 if (!(status & DESC_RXSTS_OWNBYDMA)) {
535
Marek Vasut2b261092015-12-20 03:59:23 +0100536 length = (status & DESC_RXSTS_FRMLENMSK) >>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530537 DESC_RXSTS_FRMLENSHFT;
538
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400539 /* Invalidate received data */
Marek Vasut96cec172014-09-15 01:05:23 +0200540 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
541 invalidate_dcache_range(data_start, data_end);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200542 *packetp = (uchar *)(ulong)desc_p->dmamac_addr;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530543 }
544
Simon Glass75577ba2015-04-05 16:07:41 -0600545 return length;
546}
547
548static int _dw_free_pkt(struct dw_eth_dev *priv)
549{
550 u32 desc_num = priv->rx_currdescnum;
551 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200552 ulong desc_start = (ulong)desc_p;
553 ulong desc_end = desc_start +
Simon Glass75577ba2015-04-05 16:07:41 -0600554 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
555
556 /*
557 * Make the current descriptor valid again and go to
558 * the next one
559 */
560 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
561
562 /* Flush only status field - others weren't changed */
563 flush_dcache_range(desc_start, desc_end);
564
565 /* Test the wrap-around condition. */
566 if (++desc_num >= CONFIG_RX_DESCR_NUM)
567 desc_num = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530568 priv->rx_currdescnum = desc_num;
569
Simon Glass75577ba2015-04-05 16:07:41 -0600570 return 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530571}
572
Simon Glass64dcd252015-04-05 16:07:40 -0600573static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530574{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400575 struct phy_device *phydev;
Neil Armstrong5160b452021-02-24 15:02:39 +0100576 int ret;
577
578#if IS_ENABLED(CONFIG_DM_MDIO) && IS_ENABLED(CONFIG_DM_ETH)
579 phydev = dm_eth_phy_connect(dev);
580 if (!phydev)
581 return -ENODEV;
582#else
583 int phy_addr = -1;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530584
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400585#ifdef CONFIG_PHY_ADDR
Simon Goldschmidt5dce9df2019-07-15 21:53:05 +0200586 phy_addr = CONFIG_PHY_ADDR;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530587#endif
588
Simon Goldschmidt5dce9df2019-07-15 21:53:05 +0200589 phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400590 if (!phydev)
Simon Glass64dcd252015-04-05 16:07:40 -0600591 return -ENODEV;
Neil Armstrong5160b452021-02-24 15:02:39 +0100592#endif
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530593
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400594 phydev->supported &= PHY_GBIT_FEATURES;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300595 if (priv->max_speed) {
596 ret = phy_set_supported(phydev, priv->max_speed);
597 if (ret)
598 return ret;
599 }
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400600 phydev->advertising = phydev->supported;
601
602 priv->phydev = phydev;
603 phy_config(phydev);
604
Simon Glass64dcd252015-04-05 16:07:40 -0600605 return 0;
606}
607
Simon Glass75577ba2015-04-05 16:07:41 -0600608#ifndef CONFIG_DM_ETH
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +0900609static int dw_eth_init(struct eth_device *dev, struct bd_info *bis)
Simon Glass64dcd252015-04-05 16:07:40 -0600610{
Simon Glassf63f28e2017-01-11 11:46:09 +0100611 int ret;
612
Simon Glasse72ced22017-01-11 11:46:10 +0100613 ret = designware_eth_init(dev->priv, dev->enetaddr);
Simon Glassf63f28e2017-01-11 11:46:09 +0100614 if (!ret)
615 ret = designware_eth_enable(dev->priv);
616
617 return ret;
Simon Glass64dcd252015-04-05 16:07:40 -0600618}
619
620static int dw_eth_send(struct eth_device *dev, void *packet, int length)
621{
622 return _dw_eth_send(dev->priv, packet, length);
623}
624
625static int dw_eth_recv(struct eth_device *dev)
626{
Simon Glass75577ba2015-04-05 16:07:41 -0600627 uchar *packet;
628 int length;
629
630 length = _dw_eth_recv(dev->priv, &packet);
631 if (length == -EAGAIN)
632 return 0;
633 net_process_received_packet(packet, length);
634
635 _dw_free_pkt(dev->priv);
636
637 return 0;
Simon Glass64dcd252015-04-05 16:07:40 -0600638}
639
640static void dw_eth_halt(struct eth_device *dev)
641{
642 return _dw_eth_halt(dev->priv);
643}
644
645static int dw_write_hwaddr(struct eth_device *dev)
646{
647 return _dw_write_hwaddr(dev->priv, dev->enetaddr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530648}
649
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400650int designware_initialize(ulong base_addr, u32 interface)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530651{
652 struct eth_device *dev;
653 struct dw_eth_dev *priv;
654
655 dev = (struct eth_device *) malloc(sizeof(struct eth_device));
656 if (!dev)
657 return -ENOMEM;
658
659 /*
660 * Since the priv structure contains the descriptors which need a strict
661 * buswidth alignment, memalign is used to allocate memory
662 */
Ian Campbell1c848a22014-05-08 22:26:32 +0100663 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
664 sizeof(struct dw_eth_dev));
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530665 if (!priv) {
666 free(dev);
667 return -ENOMEM;
668 }
669
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200670 if ((phys_addr_t)priv + sizeof(*priv) > (1ULL << 32)) {
671 printf("designware: buffers are outside DMA memory\n");
672 return -EINVAL;
673 }
674
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530675 memset(dev, 0, sizeof(struct eth_device));
676 memset(priv, 0, sizeof(struct dw_eth_dev));
677
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400678 sprintf(dev->name, "dwmac.%lx", base_addr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530679 dev->iobase = (int)base_addr;
680 dev->priv = priv;
681
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530682 priv->dev = dev;
683 priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
684 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
685 DW_DMA_BASE_OFFSET);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530686
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530687 dev->init = dw_eth_init;
688 dev->send = dw_eth_send;
689 dev->recv = dw_eth_recv;
690 dev->halt = dw_eth_halt;
691 dev->write_hwaddr = dw_write_hwaddr;
692
693 eth_register(dev);
694
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400695 priv->interface = interface;
696
697 dw_mdio_init(dev->name, priv->mac_regs_p);
698 priv->bus = miiphy_get_dev_by_name(dev->name);
699
Simon Glass64dcd252015-04-05 16:07:40 -0600700 return dw_phy_init(priv, dev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530701}
Simon Glass75577ba2015-04-05 16:07:41 -0600702#endif
703
704#ifdef CONFIG_DM_ETH
705static int designware_eth_start(struct udevice *dev)
706{
Simon Glassc69cda22020-12-03 16:55:20 -0700707 struct eth_pdata *pdata = dev_get_plat(dev);
Simon Glassf63f28e2017-01-11 11:46:09 +0100708 struct dw_eth_dev *priv = dev_get_priv(dev);
709 int ret;
Simon Glass75577ba2015-04-05 16:07:41 -0600710
Simon Glasse72ced22017-01-11 11:46:10 +0100711 ret = designware_eth_init(priv, pdata->enetaddr);
Simon Glassf63f28e2017-01-11 11:46:09 +0100712 if (ret)
713 return ret;
714 ret = designware_eth_enable(priv);
715 if (ret)
716 return ret;
717
718 return 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600719}
720
Simon Glasse72ced22017-01-11 11:46:10 +0100721int designware_eth_send(struct udevice *dev, void *packet, int length)
Simon Glass75577ba2015-04-05 16:07:41 -0600722{
723 struct dw_eth_dev *priv = dev_get_priv(dev);
724
725 return _dw_eth_send(priv, packet, length);
726}
727
Simon Glasse72ced22017-01-11 11:46:10 +0100728int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
Simon Glass75577ba2015-04-05 16:07:41 -0600729{
730 struct dw_eth_dev *priv = dev_get_priv(dev);
731
732 return _dw_eth_recv(priv, packetp);
733}
734
Simon Glasse72ced22017-01-11 11:46:10 +0100735int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
Simon Glass75577ba2015-04-05 16:07:41 -0600736{
737 struct dw_eth_dev *priv = dev_get_priv(dev);
738
739 return _dw_free_pkt(priv);
740}
741
Simon Glasse72ced22017-01-11 11:46:10 +0100742void designware_eth_stop(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600743{
744 struct dw_eth_dev *priv = dev_get_priv(dev);
745
746 return _dw_eth_halt(priv);
747}
748
Simon Glasse72ced22017-01-11 11:46:10 +0100749int designware_eth_write_hwaddr(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600750{
Simon Glassc69cda22020-12-03 16:55:20 -0700751 struct eth_pdata *pdata = dev_get_plat(dev);
Simon Glass75577ba2015-04-05 16:07:41 -0600752 struct dw_eth_dev *priv = dev_get_priv(dev);
753
754 return _dw_write_hwaddr(priv, pdata->enetaddr);
755}
756
Bin Meng8b7ee662015-09-11 03:24:35 -0700757static int designware_eth_bind(struct udevice *dev)
758{
759#ifdef CONFIG_DM_PCI
760 static int num_cards;
761 char name[20];
762
763 /* Create a unique device name for PCI type devices */
764 if (device_is_on_pci_bus(dev)) {
765 sprintf(name, "eth_designware#%u", num_cards++);
766 device_set_name(dev, name);
767 }
768#endif
769
770 return 0;
771}
772
Sjoerd Simonsb9e08d02017-01-11 11:46:07 +0100773int designware_eth_probe(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600774{
Simon Glassc69cda22020-12-03 16:55:20 -0700775 struct eth_pdata *pdata = dev_get_plat(dev);
Simon Glass75577ba2015-04-05 16:07:41 -0600776 struct dw_eth_dev *priv = dev_get_priv(dev);
Bin Mengf0dc73c2015-09-03 05:37:29 -0700777 u32 iobase = pdata->iobase;
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200778 ulong ioaddr;
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200779 int ret, err;
Ley Foon Tan495c70f2018-06-14 18:45:23 +0800780 struct reset_ctl_bulk reset_bulk;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100781#ifdef CONFIG_CLK
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200782 int i, clock_nb;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100783
784 priv->clock_count = 0;
Patrick Delaunay89f68302020-09-25 09:41:14 +0200785 clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells",
786 0);
Patrice Chotardba1f9662017-11-29 09:06:11 +0100787 if (clock_nb > 0) {
788 priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
789 GFP_KERNEL);
790 if (!priv->clocks)
791 return -ENOMEM;
792
793 for (i = 0; i < clock_nb; i++) {
794 err = clk_get_by_index(dev, i, &priv->clocks[i]);
795 if (err < 0)
796 break;
797
798 err = clk_enable(&priv->clocks[i]);
Eugeniy Paltsev1693a572018-02-06 17:12:09 +0300799 if (err && err != -ENOSYS && err != -ENOTSUPP) {
Patrice Chotardba1f9662017-11-29 09:06:11 +0100800 pr_err("failed to enable clock %d\n", i);
801 clk_free(&priv->clocks[i]);
802 goto clk_err;
803 }
804 priv->clock_count++;
805 }
806 } else if (clock_nb != -ENOENT) {
807 pr_err("failed to get clock phandle(%d)\n", clock_nb);
808 return clock_nb;
809 }
810#endif
Simon Glass75577ba2015-04-05 16:07:41 -0600811
Jacob Chen6ec922f2017-03-27 16:54:17 +0800812#if defined(CONFIG_DM_REGULATOR)
813 struct udevice *phy_supply;
814
815 ret = device_get_supply_regulator(dev, "phy-supply",
816 &phy_supply);
817 if (ret) {
818 debug("%s: No phy supply\n", dev->name);
819 } else {
820 ret = regulator_set_enable(phy_supply, true);
821 if (ret) {
822 puts("Error enabling phy supply\n");
823 return ret;
824 }
825 }
826#endif
827
Ley Foon Tan495c70f2018-06-14 18:45:23 +0800828 ret = reset_get_bulk(dev, &reset_bulk);
829 if (ret)
830 dev_warn(dev, "Can't get reset: %d\n", ret);
831 else
832 reset_deassert_bulk(&reset_bulk);
833
Bin Meng8b7ee662015-09-11 03:24:35 -0700834#ifdef CONFIG_DM_PCI
835 /*
836 * If we are on PCI bus, either directly attached to a PCI root port,
Simon Glasscaa4daa2020-12-03 16:55:18 -0700837 * or via a PCI bridge, fill in plat before we probe the hardware.
Bin Meng8b7ee662015-09-11 03:24:35 -0700838 */
839 if (device_is_on_pci_bus(dev)) {
Bin Meng8b7ee662015-09-11 03:24:35 -0700840 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
841 iobase &= PCI_BASE_ADDRESS_MEM_MASK;
Bin Meng6758a6c2016-02-02 05:58:00 -0800842 iobase = dm_pci_mem_to_phys(dev, iobase);
Bin Meng8b7ee662015-09-11 03:24:35 -0700843
844 pdata->iobase = iobase;
845 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
846 }
847#endif
848
Bin Mengf0dc73c2015-09-03 05:37:29 -0700849 debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200850 ioaddr = iobase;
851 priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
852 priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
Simon Glass75577ba2015-04-05 16:07:41 -0600853 priv->interface = pdata->phy_interface;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300854 priv->max_speed = pdata->max_speed;
Simon Glass75577ba2015-04-05 16:07:41 -0600855
Neil Armstrong5160b452021-02-24 15:02:39 +0100856#if IS_ENABLED(CONFIG_DM_MDIO)
857 ret = dw_dm_mdio_init(dev->name, dev);
858#else
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200859 ret = dw_mdio_init(dev->name, dev);
Neil Armstrong5160b452021-02-24 15:02:39 +0100860#endif
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200861 if (ret) {
862 err = ret;
863 goto mdio_err;
864 }
Simon Glass75577ba2015-04-05 16:07:41 -0600865 priv->bus = miiphy_get_dev_by_name(dev->name);
866
867 ret = dw_phy_init(priv, dev);
868 debug("%s, ret=%d\n", __func__, ret);
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200869 if (!ret)
870 return 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600871
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200872 /* continue here for cleanup if no PHY found */
873 err = ret;
874 mdio_unregister(priv->bus);
875 mdio_free(priv->bus);
876mdio_err:
Patrice Chotardba1f9662017-11-29 09:06:11 +0100877
878#ifdef CONFIG_CLK
879clk_err:
880 ret = clk_release_all(priv->clocks, priv->clock_count);
881 if (ret)
882 pr_err("failed to disable all clocks\n");
883
Patrice Chotardba1f9662017-11-29 09:06:11 +0100884#endif
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200885 return err;
Simon Glass75577ba2015-04-05 16:07:41 -0600886}
887
Bin Meng5d2459f2015-10-07 21:32:38 -0700888static int designware_eth_remove(struct udevice *dev)
889{
890 struct dw_eth_dev *priv = dev_get_priv(dev);
891
892 free(priv->phydev);
893 mdio_unregister(priv->bus);
894 mdio_free(priv->bus);
895
Patrice Chotardba1f9662017-11-29 09:06:11 +0100896#ifdef CONFIG_CLK
897 return clk_release_all(priv->clocks, priv->clock_count);
898#else
Bin Meng5d2459f2015-10-07 21:32:38 -0700899 return 0;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100900#endif
Bin Meng5d2459f2015-10-07 21:32:38 -0700901}
902
Sjoerd Simonsb9e08d02017-01-11 11:46:07 +0100903const struct eth_ops designware_eth_ops = {
Simon Glass75577ba2015-04-05 16:07:41 -0600904 .start = designware_eth_start,
905 .send = designware_eth_send,
906 .recv = designware_eth_recv,
907 .free_pkt = designware_eth_free_pkt,
908 .stop = designware_eth_stop,
909 .write_hwaddr = designware_eth_write_hwaddr,
910};
911
Simon Glassd1998a92020-12-03 16:55:21 -0700912int designware_eth_of_to_plat(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600913{
Simon Glassc69cda22020-12-03 16:55:20 -0700914 struct dw_eth_pdata *dw_pdata = dev_get_plat(dev);
Simon Glassbcee8d62019-12-06 21:41:35 -0700915#if CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100916 struct dw_eth_dev *priv = dev_get_priv(dev);
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300917#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100918 struct eth_pdata *pdata = &dw_pdata->eth_pdata;
Simon Glass75577ba2015-04-05 16:07:41 -0600919 const char *phy_mode;
Simon Glassbcee8d62019-12-06 21:41:35 -0700920#if CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100921 int reset_flags = GPIOD_IS_OUT;
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300922#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100923 int ret = 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600924
Philipp Tomsich15050f12017-09-11 22:04:13 +0200925 pdata->iobase = dev_read_addr(dev);
Simon Glass75577ba2015-04-05 16:07:41 -0600926 pdata->phy_interface = -1;
Philipp Tomsich15050f12017-09-11 22:04:13 +0200927 phy_mode = dev_read_string(dev, "phy-mode");
Simon Glass75577ba2015-04-05 16:07:41 -0600928 if (phy_mode)
929 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
930 if (pdata->phy_interface == -1) {
931 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
932 return -EINVAL;
933 }
934
Philipp Tomsich15050f12017-09-11 22:04:13 +0200935 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300936
Simon Glassbcee8d62019-12-06 21:41:35 -0700937#if CONFIG_IS_ENABLED(DM_GPIO)
Philipp Tomsich7ad326a2017-06-07 18:46:01 +0200938 if (dev_read_bool(dev, "snps,reset-active-low"))
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100939 reset_flags |= GPIOD_ACTIVE_LOW;
940
941 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
942 &priv->reset_gpio, reset_flags);
943 if (ret == 0) {
Philipp Tomsich7ad326a2017-06-07 18:46:01 +0200944 ret = dev_read_u32_array(dev, "snps,reset-delays-us",
945 dw_pdata->reset_delays, 3);
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100946 } else if (ret == -ENOENT) {
947 ret = 0;
948 }
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300949#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100950
951 return ret;
Simon Glass75577ba2015-04-05 16:07:41 -0600952}
953
954static const struct udevice_id designware_eth_ids[] = {
955 { .compatible = "allwinner,sun7i-a20-gmac" },
Beniamino Galvanicfe25562016-08-16 11:49:50 +0200956 { .compatible = "amlogic,meson6-dwmac" },
Michael Kurzb20b70f2017-01-22 16:04:27 +0100957 { .compatible = "st,stm32-dwmac" },
Eugeniy Paltsev2a723232019-10-07 19:10:50 +0300958 { .compatible = "snps,arc-dwmac-3.70a" },
Simon Glass75577ba2015-04-05 16:07:41 -0600959 { }
960};
961
Marek Vasut9f76f102015-07-25 18:42:34 +0200962U_BOOT_DRIVER(eth_designware) = {
Simon Glass75577ba2015-04-05 16:07:41 -0600963 .name = "eth_designware",
964 .id = UCLASS_ETH,
965 .of_match = designware_eth_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700966 .of_to_plat = designware_eth_of_to_plat,
Bin Meng8b7ee662015-09-11 03:24:35 -0700967 .bind = designware_eth_bind,
Simon Glass75577ba2015-04-05 16:07:41 -0600968 .probe = designware_eth_probe,
Bin Meng5d2459f2015-10-07 21:32:38 -0700969 .remove = designware_eth_remove,
Simon Glass75577ba2015-04-05 16:07:41 -0600970 .ops = &designware_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700971 .priv_auto = sizeof(struct dw_eth_dev),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700972 .plat_auto = sizeof(struct dw_eth_pdata),
Simon Glass75577ba2015-04-05 16:07:41 -0600973 .flags = DM_FLAG_ALLOC_PRIV_DMA,
974};
Bin Meng8b7ee662015-09-11 03:24:35 -0700975
976static struct pci_device_id supported[] = {
977 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
978 { }
979};
980
981U_BOOT_PCI_DEVICE(eth_designware, supported);
Simon Glass75577ba2015-04-05 16:07:41 -0600982#endif