blob: 19fc34f771cb6af93dd2b5cdcabe5e557a1ad4c1 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05302/*
3 * (C) Copyright 2010
4 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05305 */
6
7/*
Simon Glass64dcd252015-04-05 16:07:40 -06008 * Designware ethernet IP driver for U-Boot
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05309 */
10
11#include <common.h>
Patrice Chotardba1f9662017-11-29 09:06:11 +010012#include <clk.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass75577ba2015-04-05 16:07:41 -060014#include <dm.h>
Simon Glass64dcd252015-04-05 16:07:40 -060015#include <errno.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053016#include <miiphy.h>
17#include <malloc.h>
Bin Meng8b7ee662015-09-11 03:24:35 -070018#include <pci.h>
Ley Foon Tan495c70f2018-06-14 18:45:23 +080019#include <reset.h>
Stefan Roeseef760252012-05-07 12:04:25 +020020#include <linux/compiler.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053021#include <linux/err.h>
Florian Fainelli7a9ca9d2017-12-09 14:59:55 -080022#include <linux/kernel.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053023#include <asm/io.h>
Jacob Chen6ec922f2017-03-27 16:54:17 +080024#include <power/regulator.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053025#include "designware.h"
26
Alexey Brodkin92a190a2014-01-22 20:54:06 +040027static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
28{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010029#ifdef CONFIG_DM_ETH
30 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
31 struct eth_mac_regs *mac_p = priv->mac_regs_p;
32#else
Alexey Brodkin92a190a2014-01-22 20:54:06 +040033 struct eth_mac_regs *mac_p = bus->priv;
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010034#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +040035 ulong start;
36 u16 miiaddr;
37 int timeout = CONFIG_MDIO_TIMEOUT;
38
39 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
40 ((reg << MIIREGSHIFT) & MII_REGMSK);
41
42 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
43
44 start = get_timer(0);
45 while (get_timer(start) < timeout) {
46 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
47 return readl(&mac_p->miidata);
48 udelay(10);
49 };
50
Simon Glass64dcd252015-04-05 16:07:40 -060051 return -ETIMEDOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040052}
53
54static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
55 u16 val)
56{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010057#ifdef CONFIG_DM_ETH
58 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
59 struct eth_mac_regs *mac_p = priv->mac_regs_p;
60#else
Alexey Brodkin92a190a2014-01-22 20:54:06 +040061 struct eth_mac_regs *mac_p = bus->priv;
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010062#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +040063 ulong start;
64 u16 miiaddr;
Simon Glass64dcd252015-04-05 16:07:40 -060065 int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040066
67 writel(val, &mac_p->miidata);
68 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
69 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
70
71 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
72
73 start = get_timer(0);
74 while (get_timer(start) < timeout) {
75 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
76 ret = 0;
77 break;
78 }
79 udelay(10);
80 };
81
82 return ret;
83}
84
Simon Glassbcee8d62019-12-06 21:41:35 -070085#if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +010086static int dw_mdio_reset(struct mii_dev *bus)
87{
88 struct udevice *dev = bus->priv;
89 struct dw_eth_dev *priv = dev_get_priv(dev);
90 struct dw_eth_pdata *pdata = dev_get_platdata(dev);
91 int ret;
92
93 if (!dm_gpio_is_valid(&priv->reset_gpio))
94 return 0;
95
96 /* reset the phy */
97 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
98 if (ret)
99 return ret;
100
101 udelay(pdata->reset_delays[0]);
102
103 ret = dm_gpio_set_value(&priv->reset_gpio, 1);
104 if (ret)
105 return ret;
106
107 udelay(pdata->reset_delays[1]);
108
109 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
110 if (ret)
111 return ret;
112
113 udelay(pdata->reset_delays[2]);
114
115 return 0;
116}
117#endif
118
119static int dw_mdio_init(const char *name, void *priv)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400120{
121 struct mii_dev *bus = mdio_alloc();
122
123 if (!bus) {
124 printf("Failed to allocate MDIO bus\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600125 return -ENOMEM;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400126 }
127
128 bus->read = dw_mdio_read;
129 bus->write = dw_mdio_write;
Ben Whitten192bc692015-12-30 13:05:58 +0000130 snprintf(bus->name, sizeof(bus->name), "%s", name);
Simon Glassbcee8d62019-12-06 21:41:35 -0700131#if defined(CONFIG_DM_ETH) && CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100132 bus->reset = dw_mdio_reset;
133#endif
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400134
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100135 bus->priv = priv;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400136
137 return mdio_register(bus);
138}
Vipin Kumar13edd172012-03-26 00:09:56 +0000139
Simon Glass64dcd252015-04-05 16:07:40 -0600140static void tx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530141{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530142 struct eth_dma_regs *dma_p = priv->dma_regs_p;
143 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
144 char *txbuffs = &priv->txbuffs[0];
145 struct dmamacdescr *desc_p;
146 u32 idx;
147
148 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
149 desc_p = &desc_table_p[idx];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200150 desc_p->dmamac_addr = (ulong)&txbuffs[idx * CONFIG_ETH_BUFSIZE];
151 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530152
153#if defined(CONFIG_DW_ALTDESCRIPTOR)
154 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
Marek Vasut2b261092015-12-20 03:59:23 +0100155 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
156 DESC_TXSTS_TXCHECKINSCTRL |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530157 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
158
159 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
160 desc_p->dmamac_cntl = 0;
161 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
162#else
163 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
164 desc_p->txrx_status = 0;
165#endif
166 }
167
168 /* Correcting the last pointer of the chain */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200169 desc_p->dmamac_next = (ulong)&desc_table_p[0];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530170
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400171 /* Flush all Tx buffer descriptors at once */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200172 flush_dcache_range((ulong)priv->tx_mac_descrtable,
173 (ulong)priv->tx_mac_descrtable +
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400174 sizeof(priv->tx_mac_descrtable));
175
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530176 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400177 priv->tx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530178}
179
Simon Glass64dcd252015-04-05 16:07:40 -0600180static void rx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530181{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530182 struct eth_dma_regs *dma_p = priv->dma_regs_p;
183 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
184 char *rxbuffs = &priv->rxbuffs[0];
185 struct dmamacdescr *desc_p;
186 u32 idx;
187
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400188 /* Before passing buffers to GMAC we need to make sure zeros
189 * written there right after "priv" structure allocation were
190 * flushed into RAM.
191 * Otherwise there's a chance to get some of them flushed in RAM when
192 * GMAC is already pushing data to RAM via DMA. This way incoming from
193 * GMAC data will be corrupted. */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200194 flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400195
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530196 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
197 desc_p = &desc_table_p[idx];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200198 desc_p->dmamac_addr = (ulong)&rxbuffs[idx * CONFIG_ETH_BUFSIZE];
199 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530200
201 desc_p->dmamac_cntl =
Marek Vasut2b261092015-12-20 03:59:23 +0100202 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530203 DESC_RXCTRL_RXCHAIN;
204
205 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
206 }
207
208 /* Correcting the last pointer of the chain */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200209 desc_p->dmamac_next = (ulong)&desc_table_p[0];
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530210
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400211 /* Flush all Rx buffer descriptors at once */
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200212 flush_dcache_range((ulong)priv->rx_mac_descrtable,
213 (ulong)priv->rx_mac_descrtable +
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400214 sizeof(priv->rx_mac_descrtable));
215
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530216 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400217 priv->rx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530218}
219
Simon Glass64dcd252015-04-05 16:07:40 -0600220static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530221{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530222 struct eth_mac_regs *mac_p = priv->mac_regs_p;
223 u32 macid_lo, macid_hi;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530224
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400225 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
226 (mac_id[3] << 24);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530227 macid_hi = mac_id[4] + (mac_id[5] << 8);
228
229 writel(macid_hi, &mac_p->macaddr0hi);
230 writel(macid_lo, &mac_p->macaddr0lo);
231
232 return 0;
233}
234
Simon Glass0ea38db2017-01-11 11:46:08 +0100235static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
236 struct phy_device *phydev)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400237{
238 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
239
240 if (!phydev->link) {
241 printf("%s: No link.\n", phydev->dev->name);
Simon Glass0ea38db2017-01-11 11:46:08 +0100242 return 0;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400243 }
244
245 if (phydev->speed != 1000)
246 conf |= MII_PORTSELECT;
Alexey Brodkinb884c3f2016-01-13 16:59:36 +0300247 else
248 conf &= ~MII_PORTSELECT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400249
250 if (phydev->speed == 100)
251 conf |= FES_100;
252
253 if (phydev->duplex)
254 conf |= FULLDPLXMODE;
255
256 writel(conf, &mac_p->conf);
257
258 printf("Speed: %d, %s duplex%s\n", phydev->speed,
259 (phydev->duplex) ? "full" : "half",
260 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
Simon Glass0ea38db2017-01-11 11:46:08 +0100261
262 return 0;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400263}
264
Simon Glass64dcd252015-04-05 16:07:40 -0600265static void _dw_eth_halt(struct dw_eth_dev *priv)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400266{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400267 struct eth_mac_regs *mac_p = priv->mac_regs_p;
268 struct eth_dma_regs *dma_p = priv->dma_regs_p;
269
270 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
271 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
272
273 phy_shutdown(priv->phydev);
274}
275
Simon Glasse72ced22017-01-11 11:46:10 +0100276int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530277{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530278 struct eth_mac_regs *mac_p = priv->mac_regs_p;
279 struct eth_dma_regs *dma_p = priv->dma_regs_p;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400280 unsigned int start;
Simon Glass64dcd252015-04-05 16:07:40 -0600281 int ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530282
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400283 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
Vipin Kumar13edd172012-03-26 00:09:56 +0000284
Quentin Schulzc6122192018-06-04 12:17:33 +0200285 /*
286 * When a MII PHY is used, we must set the PS bit for the DMA
287 * reset to succeed.
288 */
289 if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
290 writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
291 else
292 writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
293
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400294 start = get_timer(0);
295 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
Alexey Brodkin875143f2015-01-13 17:10:24 +0300296 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
297 printf("DMA reset timeout\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600298 return -ETIMEDOUT;
Alexey Brodkin875143f2015-01-13 17:10:24 +0300299 }
Stefan Roeseef760252012-05-07 12:04:25 +0200300
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400301 mdelay(100);
302 };
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530303
Bin Mengf3edfd32015-06-15 18:40:19 +0800304 /*
305 * Soft reset above clears HW address registers.
306 * So we have to set it here once again.
307 */
308 _dw_write_hwaddr(priv, enetaddr);
309
Simon Glass64dcd252015-04-05 16:07:40 -0600310 rx_descs_init(priv);
311 tx_descs_init(priv);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530312
Ian Campbell49692c52014-05-08 22:26:35 +0100313 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530314
Sonic Zhangd2279222015-01-29 14:38:50 +0800315#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400316 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
317 &dma_p->opmode);
Sonic Zhangd2279222015-01-29 14:38:50 +0800318#else
319 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
320 &dma_p->opmode);
321#endif
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530322
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400323 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530324
Sonic Zhang2ddaf132015-01-29 13:37:31 +0800325#ifdef CONFIG_DW_AXI_BURST_LEN
326 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
327#endif
328
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400329 /* Start up the PHY */
Simon Glass64dcd252015-04-05 16:07:40 -0600330 ret = phy_startup(priv->phydev);
331 if (ret) {
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400332 printf("Could not initialize PHY %s\n",
333 priv->phydev->dev->name);
Simon Glass64dcd252015-04-05 16:07:40 -0600334 return ret;
Vipin Kumar9afc1af2012-05-07 13:06:44 +0530335 }
336
Simon Glass0ea38db2017-01-11 11:46:08 +0100337 ret = dw_adjust_link(priv, mac_p, priv->phydev);
338 if (ret)
339 return ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530340
Simon Glassf63f28e2017-01-11 11:46:09 +0100341 return 0;
342}
343
Simon Glasse72ced22017-01-11 11:46:10 +0100344int designware_eth_enable(struct dw_eth_dev *priv)
Simon Glassf63f28e2017-01-11 11:46:09 +0100345{
346 struct eth_mac_regs *mac_p = priv->mac_regs_p;
347
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400348 if (!priv->phydev->link)
Simon Glass64dcd252015-04-05 16:07:40 -0600349 return -EIO;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530350
Armando Viscontiaa510052012-03-26 00:09:55 +0000351 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530352
353 return 0;
354}
355
Florian Fainelli7a9ca9d2017-12-09 14:59:55 -0800356#define ETH_ZLEN 60
357
Simon Glass64dcd252015-04-05 16:07:40 -0600358static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530359{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530360 struct eth_dma_regs *dma_p = priv->dma_regs_p;
361 u32 desc_num = priv->tx_currdescnum;
362 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200363 ulong desc_start = (ulong)desc_p;
364 ulong desc_end = desc_start +
Marek Vasut96cec172014-09-15 01:05:23 +0200365 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200366 ulong data_start = desc_p->dmamac_addr;
367 ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
Ian Campbell964ea7c2014-05-08 22:26:33 +0100368 /*
369 * Strictly we only need to invalidate the "txrx_status" field
370 * for the following check, but on some platforms we cannot
Marek Vasut96cec172014-09-15 01:05:23 +0200371 * invalidate only 4 bytes, so we flush the entire descriptor,
372 * which is 16 bytes in total. This is safe because the
373 * individual descriptors in the array are each aligned to
374 * ARCH_DMA_MINALIGN and padded appropriately.
Ian Campbell964ea7c2014-05-08 22:26:33 +0100375 */
Marek Vasut96cec172014-09-15 01:05:23 +0200376 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400377
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530378 /* Check if the descriptor is owned by CPU */
379 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
380 printf("CPU not owner of tx frame\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600381 return -EPERM;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530382 }
383
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200384 memcpy((void *)data_start, packet, length);
Simon Goldschmidt7efb75b2018-11-17 10:24:42 +0100385 if (length < ETH_ZLEN) {
386 memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
387 length = ETH_ZLEN;
388 }
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530389
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400390 /* Flush data to be sent */
Marek Vasut96cec172014-09-15 01:05:23 +0200391 flush_dcache_range(data_start, data_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400392
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530393#if defined(CONFIG_DW_ALTDESCRIPTOR)
394 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
Simon Goldschmidtae8ac8d2018-11-17 10:24:41 +0100395 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
396 ((length << DESC_TXCTRL_SIZE1SHFT) &
397 DESC_TXCTRL_SIZE1MASK);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530398
399 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
400 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
401#else
Simon Goldschmidtae8ac8d2018-11-17 10:24:41 +0100402 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
403 ((length << DESC_TXCTRL_SIZE1SHFT) &
404 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
405 DESC_TXCTRL_TXFIRST;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530406
407 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
408#endif
409
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400410 /* Flush modified buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200411 flush_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400412
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530413 /* Test the wrap-around condition. */
414 if (++desc_num >= CONFIG_TX_DESCR_NUM)
415 desc_num = 0;
416
417 priv->tx_currdescnum = desc_num;
418
419 /* Start the transmission */
420 writel(POLL_DATA, &dma_p->txpolldemand);
421
422 return 0;
423}
424
Simon Glass75577ba2015-04-05 16:07:41 -0600425static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530426{
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400427 u32 status, desc_num = priv->rx_currdescnum;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530428 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Simon Glass75577ba2015-04-05 16:07:41 -0600429 int length = -EAGAIN;
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200430 ulong desc_start = (ulong)desc_p;
431 ulong desc_end = desc_start +
Marek Vasut96cec172014-09-15 01:05:23 +0200432 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200433 ulong data_start = desc_p->dmamac_addr;
434 ulong data_end;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530435
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400436 /* Invalidate entire buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200437 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400438
439 status = desc_p->txrx_status;
440
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530441 /* Check if the owner is the CPU */
442 if (!(status & DESC_RXSTS_OWNBYDMA)) {
443
Marek Vasut2b261092015-12-20 03:59:23 +0100444 length = (status & DESC_RXSTS_FRMLENMSK) >>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530445 DESC_RXSTS_FRMLENSHFT;
446
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400447 /* Invalidate received data */
Marek Vasut96cec172014-09-15 01:05:23 +0200448 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
449 invalidate_dcache_range(data_start, data_end);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200450 *packetp = (uchar *)(ulong)desc_p->dmamac_addr;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530451 }
452
Simon Glass75577ba2015-04-05 16:07:41 -0600453 return length;
454}
455
456static int _dw_free_pkt(struct dw_eth_dev *priv)
457{
458 u32 desc_num = priv->rx_currdescnum;
459 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200460 ulong desc_start = (ulong)desc_p;
461 ulong desc_end = desc_start +
Simon Glass75577ba2015-04-05 16:07:41 -0600462 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
463
464 /*
465 * Make the current descriptor valid again and go to
466 * the next one
467 */
468 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
469
470 /* Flush only status field - others weren't changed */
471 flush_dcache_range(desc_start, desc_end);
472
473 /* Test the wrap-around condition. */
474 if (++desc_num >= CONFIG_RX_DESCR_NUM)
475 desc_num = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530476 priv->rx_currdescnum = desc_num;
477
Simon Glass75577ba2015-04-05 16:07:41 -0600478 return 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530479}
480
Simon Glass64dcd252015-04-05 16:07:40 -0600481static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530482{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400483 struct phy_device *phydev;
Simon Goldschmidt5dce9df2019-07-15 21:53:05 +0200484 int phy_addr = -1, ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530485
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400486#ifdef CONFIG_PHY_ADDR
Simon Goldschmidt5dce9df2019-07-15 21:53:05 +0200487 phy_addr = CONFIG_PHY_ADDR;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530488#endif
489
Simon Goldschmidt5dce9df2019-07-15 21:53:05 +0200490 phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400491 if (!phydev)
Simon Glass64dcd252015-04-05 16:07:40 -0600492 return -ENODEV;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530493
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400494 phydev->supported &= PHY_GBIT_FEATURES;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300495 if (priv->max_speed) {
496 ret = phy_set_supported(phydev, priv->max_speed);
497 if (ret)
498 return ret;
499 }
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400500 phydev->advertising = phydev->supported;
501
502 priv->phydev = phydev;
503 phy_config(phydev);
504
Simon Glass64dcd252015-04-05 16:07:40 -0600505 return 0;
506}
507
Simon Glass75577ba2015-04-05 16:07:41 -0600508#ifndef CONFIG_DM_ETH
Simon Glass64dcd252015-04-05 16:07:40 -0600509static int dw_eth_init(struct eth_device *dev, bd_t *bis)
510{
Simon Glassf63f28e2017-01-11 11:46:09 +0100511 int ret;
512
Simon Glasse72ced22017-01-11 11:46:10 +0100513 ret = designware_eth_init(dev->priv, dev->enetaddr);
Simon Glassf63f28e2017-01-11 11:46:09 +0100514 if (!ret)
515 ret = designware_eth_enable(dev->priv);
516
517 return ret;
Simon Glass64dcd252015-04-05 16:07:40 -0600518}
519
520static int dw_eth_send(struct eth_device *dev, void *packet, int length)
521{
522 return _dw_eth_send(dev->priv, packet, length);
523}
524
525static int dw_eth_recv(struct eth_device *dev)
526{
Simon Glass75577ba2015-04-05 16:07:41 -0600527 uchar *packet;
528 int length;
529
530 length = _dw_eth_recv(dev->priv, &packet);
531 if (length == -EAGAIN)
532 return 0;
533 net_process_received_packet(packet, length);
534
535 _dw_free_pkt(dev->priv);
536
537 return 0;
Simon Glass64dcd252015-04-05 16:07:40 -0600538}
539
540static void dw_eth_halt(struct eth_device *dev)
541{
542 return _dw_eth_halt(dev->priv);
543}
544
545static int dw_write_hwaddr(struct eth_device *dev)
546{
547 return _dw_write_hwaddr(dev->priv, dev->enetaddr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530548}
549
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400550int designware_initialize(ulong base_addr, u32 interface)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530551{
552 struct eth_device *dev;
553 struct dw_eth_dev *priv;
554
555 dev = (struct eth_device *) malloc(sizeof(struct eth_device));
556 if (!dev)
557 return -ENOMEM;
558
559 /*
560 * Since the priv structure contains the descriptors which need a strict
561 * buswidth alignment, memalign is used to allocate memory
562 */
Ian Campbell1c848a22014-05-08 22:26:32 +0100563 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
564 sizeof(struct dw_eth_dev));
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530565 if (!priv) {
566 free(dev);
567 return -ENOMEM;
568 }
569
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200570 if ((phys_addr_t)priv + sizeof(*priv) > (1ULL << 32)) {
571 printf("designware: buffers are outside DMA memory\n");
572 return -EINVAL;
573 }
574
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530575 memset(dev, 0, sizeof(struct eth_device));
576 memset(priv, 0, sizeof(struct dw_eth_dev));
577
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400578 sprintf(dev->name, "dwmac.%lx", base_addr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530579 dev->iobase = (int)base_addr;
580 dev->priv = priv;
581
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530582 priv->dev = dev;
583 priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
584 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
585 DW_DMA_BASE_OFFSET);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530586
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530587 dev->init = dw_eth_init;
588 dev->send = dw_eth_send;
589 dev->recv = dw_eth_recv;
590 dev->halt = dw_eth_halt;
591 dev->write_hwaddr = dw_write_hwaddr;
592
593 eth_register(dev);
594
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400595 priv->interface = interface;
596
597 dw_mdio_init(dev->name, priv->mac_regs_p);
598 priv->bus = miiphy_get_dev_by_name(dev->name);
599
Simon Glass64dcd252015-04-05 16:07:40 -0600600 return dw_phy_init(priv, dev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530601}
Simon Glass75577ba2015-04-05 16:07:41 -0600602#endif
603
604#ifdef CONFIG_DM_ETH
605static int designware_eth_start(struct udevice *dev)
606{
607 struct eth_pdata *pdata = dev_get_platdata(dev);
Simon Glassf63f28e2017-01-11 11:46:09 +0100608 struct dw_eth_dev *priv = dev_get_priv(dev);
609 int ret;
Simon Glass75577ba2015-04-05 16:07:41 -0600610
Simon Glasse72ced22017-01-11 11:46:10 +0100611 ret = designware_eth_init(priv, pdata->enetaddr);
Simon Glassf63f28e2017-01-11 11:46:09 +0100612 if (ret)
613 return ret;
614 ret = designware_eth_enable(priv);
615 if (ret)
616 return ret;
617
618 return 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600619}
620
Simon Glasse72ced22017-01-11 11:46:10 +0100621int designware_eth_send(struct udevice *dev, void *packet, int length)
Simon Glass75577ba2015-04-05 16:07:41 -0600622{
623 struct dw_eth_dev *priv = dev_get_priv(dev);
624
625 return _dw_eth_send(priv, packet, length);
626}
627
Simon Glasse72ced22017-01-11 11:46:10 +0100628int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
Simon Glass75577ba2015-04-05 16:07:41 -0600629{
630 struct dw_eth_dev *priv = dev_get_priv(dev);
631
632 return _dw_eth_recv(priv, packetp);
633}
634
Simon Glasse72ced22017-01-11 11:46:10 +0100635int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
Simon Glass75577ba2015-04-05 16:07:41 -0600636{
637 struct dw_eth_dev *priv = dev_get_priv(dev);
638
639 return _dw_free_pkt(priv);
640}
641
Simon Glasse72ced22017-01-11 11:46:10 +0100642void designware_eth_stop(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600643{
644 struct dw_eth_dev *priv = dev_get_priv(dev);
645
646 return _dw_eth_halt(priv);
647}
648
Simon Glasse72ced22017-01-11 11:46:10 +0100649int designware_eth_write_hwaddr(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600650{
651 struct eth_pdata *pdata = dev_get_platdata(dev);
652 struct dw_eth_dev *priv = dev_get_priv(dev);
653
654 return _dw_write_hwaddr(priv, pdata->enetaddr);
655}
656
Bin Meng8b7ee662015-09-11 03:24:35 -0700657static int designware_eth_bind(struct udevice *dev)
658{
659#ifdef CONFIG_DM_PCI
660 static int num_cards;
661 char name[20];
662
663 /* Create a unique device name for PCI type devices */
664 if (device_is_on_pci_bus(dev)) {
665 sprintf(name, "eth_designware#%u", num_cards++);
666 device_set_name(dev, name);
667 }
668#endif
669
670 return 0;
671}
672
Sjoerd Simonsb9e08d02017-01-11 11:46:07 +0100673int designware_eth_probe(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600674{
675 struct eth_pdata *pdata = dev_get_platdata(dev);
676 struct dw_eth_dev *priv = dev_get_priv(dev);
Bin Mengf0dc73c2015-09-03 05:37:29 -0700677 u32 iobase = pdata->iobase;
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200678 ulong ioaddr;
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200679 int ret, err;
Ley Foon Tan495c70f2018-06-14 18:45:23 +0800680 struct reset_ctl_bulk reset_bulk;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100681#ifdef CONFIG_CLK
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200682 int i, clock_nb;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100683
684 priv->clock_count = 0;
685 clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
686 if (clock_nb > 0) {
687 priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
688 GFP_KERNEL);
689 if (!priv->clocks)
690 return -ENOMEM;
691
692 for (i = 0; i < clock_nb; i++) {
693 err = clk_get_by_index(dev, i, &priv->clocks[i]);
694 if (err < 0)
695 break;
696
697 err = clk_enable(&priv->clocks[i]);
Eugeniy Paltsev1693a572018-02-06 17:12:09 +0300698 if (err && err != -ENOSYS && err != -ENOTSUPP) {
Patrice Chotardba1f9662017-11-29 09:06:11 +0100699 pr_err("failed to enable clock %d\n", i);
700 clk_free(&priv->clocks[i]);
701 goto clk_err;
702 }
703 priv->clock_count++;
704 }
705 } else if (clock_nb != -ENOENT) {
706 pr_err("failed to get clock phandle(%d)\n", clock_nb);
707 return clock_nb;
708 }
709#endif
Simon Glass75577ba2015-04-05 16:07:41 -0600710
Jacob Chen6ec922f2017-03-27 16:54:17 +0800711#if defined(CONFIG_DM_REGULATOR)
712 struct udevice *phy_supply;
713
714 ret = device_get_supply_regulator(dev, "phy-supply",
715 &phy_supply);
716 if (ret) {
717 debug("%s: No phy supply\n", dev->name);
718 } else {
719 ret = regulator_set_enable(phy_supply, true);
720 if (ret) {
721 puts("Error enabling phy supply\n");
722 return ret;
723 }
724 }
725#endif
726
Ley Foon Tan495c70f2018-06-14 18:45:23 +0800727 ret = reset_get_bulk(dev, &reset_bulk);
728 if (ret)
729 dev_warn(dev, "Can't get reset: %d\n", ret);
730 else
731 reset_deassert_bulk(&reset_bulk);
732
Bin Meng8b7ee662015-09-11 03:24:35 -0700733#ifdef CONFIG_DM_PCI
734 /*
735 * If we are on PCI bus, either directly attached to a PCI root port,
736 * or via a PCI bridge, fill in platdata before we probe the hardware.
737 */
738 if (device_is_on_pci_bus(dev)) {
Bin Meng8b7ee662015-09-11 03:24:35 -0700739 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
740 iobase &= PCI_BASE_ADDRESS_MEM_MASK;
Bin Meng6758a6c2016-02-02 05:58:00 -0800741 iobase = dm_pci_mem_to_phys(dev, iobase);
Bin Meng8b7ee662015-09-11 03:24:35 -0700742
743 pdata->iobase = iobase;
744 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
745 }
746#endif
747
Bin Mengf0dc73c2015-09-03 05:37:29 -0700748 debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
Beniamino Galvani0e1a3e32016-05-08 08:30:15 +0200749 ioaddr = iobase;
750 priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
751 priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
Simon Glass75577ba2015-04-05 16:07:41 -0600752 priv->interface = pdata->phy_interface;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300753 priv->max_speed = pdata->max_speed;
Simon Glass75577ba2015-04-05 16:07:41 -0600754
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200755 ret = dw_mdio_init(dev->name, dev);
756 if (ret) {
757 err = ret;
758 goto mdio_err;
759 }
Simon Glass75577ba2015-04-05 16:07:41 -0600760 priv->bus = miiphy_get_dev_by_name(dev->name);
761
762 ret = dw_phy_init(priv, dev);
763 debug("%s, ret=%d\n", __func__, ret);
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200764 if (!ret)
765 return 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600766
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200767 /* continue here for cleanup if no PHY found */
768 err = ret;
769 mdio_unregister(priv->bus);
770 mdio_free(priv->bus);
771mdio_err:
Patrice Chotardba1f9662017-11-29 09:06:11 +0100772
773#ifdef CONFIG_CLK
774clk_err:
775 ret = clk_release_all(priv->clocks, priv->clock_count);
776 if (ret)
777 pr_err("failed to disable all clocks\n");
778
Patrice Chotardba1f9662017-11-29 09:06:11 +0100779#endif
Simon Goldschmidt4ee587e2019-07-12 21:07:03 +0200780 return err;
Simon Glass75577ba2015-04-05 16:07:41 -0600781}
782
Bin Meng5d2459f2015-10-07 21:32:38 -0700783static int designware_eth_remove(struct udevice *dev)
784{
785 struct dw_eth_dev *priv = dev_get_priv(dev);
786
787 free(priv->phydev);
788 mdio_unregister(priv->bus);
789 mdio_free(priv->bus);
790
Patrice Chotardba1f9662017-11-29 09:06:11 +0100791#ifdef CONFIG_CLK
792 return clk_release_all(priv->clocks, priv->clock_count);
793#else
Bin Meng5d2459f2015-10-07 21:32:38 -0700794 return 0;
Patrice Chotardba1f9662017-11-29 09:06:11 +0100795#endif
Bin Meng5d2459f2015-10-07 21:32:38 -0700796}
797
Sjoerd Simonsb9e08d02017-01-11 11:46:07 +0100798const struct eth_ops designware_eth_ops = {
Simon Glass75577ba2015-04-05 16:07:41 -0600799 .start = designware_eth_start,
800 .send = designware_eth_send,
801 .recv = designware_eth_recv,
802 .free_pkt = designware_eth_free_pkt,
803 .stop = designware_eth_stop,
804 .write_hwaddr = designware_eth_write_hwaddr,
805};
806
Sjoerd Simonsb9e08d02017-01-11 11:46:07 +0100807int designware_eth_ofdata_to_platdata(struct udevice *dev)
Simon Glass75577ba2015-04-05 16:07:41 -0600808{
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100809 struct dw_eth_pdata *dw_pdata = dev_get_platdata(dev);
Simon Glassbcee8d62019-12-06 21:41:35 -0700810#if CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100811 struct dw_eth_dev *priv = dev_get_priv(dev);
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300812#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100813 struct eth_pdata *pdata = &dw_pdata->eth_pdata;
Simon Glass75577ba2015-04-05 16:07:41 -0600814 const char *phy_mode;
Simon Glassbcee8d62019-12-06 21:41:35 -0700815#if CONFIG_IS_ENABLED(DM_GPIO)
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100816 int reset_flags = GPIOD_IS_OUT;
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300817#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100818 int ret = 0;
Simon Glass75577ba2015-04-05 16:07:41 -0600819
Philipp Tomsich15050f12017-09-11 22:04:13 +0200820 pdata->iobase = dev_read_addr(dev);
Simon Glass75577ba2015-04-05 16:07:41 -0600821 pdata->phy_interface = -1;
Philipp Tomsich15050f12017-09-11 22:04:13 +0200822 phy_mode = dev_read_string(dev, "phy-mode");
Simon Glass75577ba2015-04-05 16:07:41 -0600823 if (phy_mode)
824 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
825 if (pdata->phy_interface == -1) {
826 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
827 return -EINVAL;
828 }
829
Philipp Tomsich15050f12017-09-11 22:04:13 +0200830 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300831
Simon Glassbcee8d62019-12-06 21:41:35 -0700832#if CONFIG_IS_ENABLED(DM_GPIO)
Philipp Tomsich7ad326a2017-06-07 18:46:01 +0200833 if (dev_read_bool(dev, "snps,reset-active-low"))
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100834 reset_flags |= GPIOD_ACTIVE_LOW;
835
836 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
837 &priv->reset_gpio, reset_flags);
838 if (ret == 0) {
Philipp Tomsich7ad326a2017-06-07 18:46:01 +0200839 ret = dev_read_u32_array(dev, "snps,reset-delays-us",
840 dw_pdata->reset_delays, 3);
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100841 } else if (ret == -ENOENT) {
842 ret = 0;
843 }
Alexey Brodkin66d027e2016-06-27 13:17:51 +0300844#endif
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100845
846 return ret;
Simon Glass75577ba2015-04-05 16:07:41 -0600847}
848
849static const struct udevice_id designware_eth_ids[] = {
850 { .compatible = "allwinner,sun7i-a20-gmac" },
Beniamino Galvanicfe25562016-08-16 11:49:50 +0200851 { .compatible = "amlogic,meson6-dwmac" },
Heiner Kallweit655217d2017-01-27 21:25:59 +0100852 { .compatible = "amlogic,meson-gx-dwmac" },
Neil Armstrongec353ad2018-09-10 16:44:14 +0200853 { .compatible = "amlogic,meson-gxbb-dwmac" },
Neil Armstrong71a38a82018-11-08 17:16:11 +0100854 { .compatible = "amlogic,meson-axg-dwmac" },
Michael Kurzb20b70f2017-01-22 16:04:27 +0100855 { .compatible = "st,stm32-dwmac" },
Eugeniy Paltsev2a723232019-10-07 19:10:50 +0300856 { .compatible = "snps,arc-dwmac-3.70a" },
Simon Glass75577ba2015-04-05 16:07:41 -0600857 { }
858};
859
Marek Vasut9f76f102015-07-25 18:42:34 +0200860U_BOOT_DRIVER(eth_designware) = {
Simon Glass75577ba2015-04-05 16:07:41 -0600861 .name = "eth_designware",
862 .id = UCLASS_ETH,
863 .of_match = designware_eth_ids,
864 .ofdata_to_platdata = designware_eth_ofdata_to_platdata,
Bin Meng8b7ee662015-09-11 03:24:35 -0700865 .bind = designware_eth_bind,
Simon Glass75577ba2015-04-05 16:07:41 -0600866 .probe = designware_eth_probe,
Bin Meng5d2459f2015-10-07 21:32:38 -0700867 .remove = designware_eth_remove,
Simon Glass75577ba2015-04-05 16:07:41 -0600868 .ops = &designware_eth_ops,
869 .priv_auto_alloc_size = sizeof(struct dw_eth_dev),
Sjoerd Simons90b7fc92016-02-28 22:24:55 +0100870 .platdata_auto_alloc_size = sizeof(struct dw_eth_pdata),
Simon Glass75577ba2015-04-05 16:07:41 -0600871 .flags = DM_FLAG_ALLOC_PRIV_DMA,
872};
Bin Meng8b7ee662015-09-11 03:24:35 -0700873
874static struct pci_device_id supported[] = {
875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
876 { }
877};
878
879U_BOOT_PCI_DEVICE(eth_designware, supported);
Simon Glass75577ba2015-04-05 16:07:41 -0600880#endif