blob: f1bcc92a81d42c9c08e811cf6a94214f7a4269c0 [file] [log] [blame]
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05301/*
2 * (C) Copyright 2010
3 * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com.
4 *
Wolfgang Denk1a459662013-07-08 09:37:19 +02005 * SPDX-License-Identifier: GPL-2.0+
Vipin KUMAR5b1b1882010-06-29 10:53:34 +05306 */
7
8/*
Simon Glass64dcd252015-04-05 16:07:40 -06009 * Designware ethernet IP driver for U-Boot
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053010 */
11
12#include <common.h>
Simon Glass75577ba2015-04-05 16:07:41 -060013#include <dm.h>
Simon Glass64dcd252015-04-05 16:07:40 -060014#include <errno.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053015#include <miiphy.h>
16#include <malloc.h>
Bin Meng8b7ee662015-09-11 03:24:35 -070017#include <pci.h>
Stefan Roeseef760252012-05-07 12:04:25 +020018#include <linux/compiler.h>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053019#include <linux/err.h>
20#include <asm/io.h>
21#include "designware.h"
22
Simon Glass75577ba2015-04-05 16:07:41 -060023DECLARE_GLOBAL_DATA_PTR;
24
Alexey Brodkin92a190a2014-01-22 20:54:06 +040025static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
26{
27 struct eth_mac_regs *mac_p = bus->priv;
28 ulong start;
29 u16 miiaddr;
30 int timeout = CONFIG_MDIO_TIMEOUT;
31
32 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
33 ((reg << MIIREGSHIFT) & MII_REGMSK);
34
35 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
36
37 start = get_timer(0);
38 while (get_timer(start) < timeout) {
39 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
40 return readl(&mac_p->miidata);
41 udelay(10);
42 };
43
Simon Glass64dcd252015-04-05 16:07:40 -060044 return -ETIMEDOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040045}
46
47static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
48 u16 val)
49{
50 struct eth_mac_regs *mac_p = bus->priv;
51 ulong start;
52 u16 miiaddr;
Simon Glass64dcd252015-04-05 16:07:40 -060053 int ret = -ETIMEDOUT, timeout = CONFIG_MDIO_TIMEOUT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040054
55 writel(val, &mac_p->miidata);
56 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
57 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
58
59 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
60
61 start = get_timer(0);
62 while (get_timer(start) < timeout) {
63 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
64 ret = 0;
65 break;
66 }
67 udelay(10);
68 };
69
70 return ret;
71}
72
Simon Glass64dcd252015-04-05 16:07:40 -060073static int dw_mdio_init(const char *name, struct eth_mac_regs *mac_regs_p)
Alexey Brodkin92a190a2014-01-22 20:54:06 +040074{
75 struct mii_dev *bus = mdio_alloc();
76
77 if (!bus) {
78 printf("Failed to allocate MDIO bus\n");
Simon Glass64dcd252015-04-05 16:07:40 -060079 return -ENOMEM;
Alexey Brodkin92a190a2014-01-22 20:54:06 +040080 }
81
82 bus->read = dw_mdio_read;
83 bus->write = dw_mdio_write;
Simon Glass64dcd252015-04-05 16:07:40 -060084 snprintf(bus->name, sizeof(bus->name), name);
Alexey Brodkin92a190a2014-01-22 20:54:06 +040085
86 bus->priv = (void *)mac_regs_p;
87
88 return mdio_register(bus);
89}
Vipin Kumar13edd172012-03-26 00:09:56 +000090
Simon Glass64dcd252015-04-05 16:07:40 -060091static void tx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053092{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +053093 struct eth_dma_regs *dma_p = priv->dma_regs_p;
94 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
95 char *txbuffs = &priv->txbuffs[0];
96 struct dmamacdescr *desc_p;
97 u32 idx;
98
99 for (idx = 0; idx < CONFIG_TX_DESCR_NUM; idx++) {
100 desc_p = &desc_table_p[idx];
101 desc_p->dmamac_addr = &txbuffs[idx * CONFIG_ETH_BUFSIZE];
102 desc_p->dmamac_next = &desc_table_p[idx + 1];
103
104#if defined(CONFIG_DW_ALTDESCRIPTOR)
105 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
Marek Vasut2b261092015-12-20 03:59:23 +0100106 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
107 DESC_TXSTS_TXCHECKINSCTRL |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530108 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
109
110 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
111 desc_p->dmamac_cntl = 0;
112 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
113#else
114 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
115 desc_p->txrx_status = 0;
116#endif
117 }
118
119 /* Correcting the last pointer of the chain */
120 desc_p->dmamac_next = &desc_table_p[0];
121
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400122 /* Flush all Tx buffer descriptors at once */
123 flush_dcache_range((unsigned int)priv->tx_mac_descrtable,
124 (unsigned int)priv->tx_mac_descrtable +
125 sizeof(priv->tx_mac_descrtable));
126
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530127 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400128 priv->tx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530129}
130
Simon Glass64dcd252015-04-05 16:07:40 -0600131static void rx_descs_init(struct dw_eth_dev *priv)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530132{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530133 struct eth_dma_regs *dma_p = priv->dma_regs_p;
134 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
135 char *rxbuffs = &priv->rxbuffs[0];
136 struct dmamacdescr *desc_p;
137 u32 idx;
138
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400139 /* Before passing buffers to GMAC we need to make sure zeros
140 * written there right after "priv" structure allocation were
141 * flushed into RAM.
142 * Otherwise there's a chance to get some of them flushed in RAM when
143 * GMAC is already pushing data to RAM via DMA. This way incoming from
144 * GMAC data will be corrupted. */
145 flush_dcache_range((unsigned int)rxbuffs, (unsigned int)rxbuffs +
146 RX_TOTAL_BUFSIZE);
147
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530148 for (idx = 0; idx < CONFIG_RX_DESCR_NUM; idx++) {
149 desc_p = &desc_table_p[idx];
150 desc_p->dmamac_addr = &rxbuffs[idx * CONFIG_ETH_BUFSIZE];
151 desc_p->dmamac_next = &desc_table_p[idx + 1];
152
153 desc_p->dmamac_cntl =
Marek Vasut2b261092015-12-20 03:59:23 +0100154 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530155 DESC_RXCTRL_RXCHAIN;
156
157 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
158 }
159
160 /* Correcting the last pointer of the chain */
161 desc_p->dmamac_next = &desc_table_p[0];
162
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400163 /* Flush all Rx buffer descriptors at once */
164 flush_dcache_range((unsigned int)priv->rx_mac_descrtable,
165 (unsigned int)priv->rx_mac_descrtable +
166 sizeof(priv->rx_mac_descrtable));
167
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530168 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
Alexey Brodkin74cb7082014-01-13 13:28:38 +0400169 priv->rx_currdescnum = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530170}
171
Simon Glass64dcd252015-04-05 16:07:40 -0600172static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530173{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530174 struct eth_mac_regs *mac_p = priv->mac_regs_p;
175 u32 macid_lo, macid_hi;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530176
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400177 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
178 (mac_id[3] << 24);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530179 macid_hi = mac_id[4] + (mac_id[5] << 8);
180
181 writel(macid_hi, &mac_p->macaddr0hi);
182 writel(macid_lo, &mac_p->macaddr0lo);
183
184 return 0;
185}
186
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400187static void dw_adjust_link(struct eth_mac_regs *mac_p,
188 struct phy_device *phydev)
189{
190 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
191
192 if (!phydev->link) {
193 printf("%s: No link.\n", phydev->dev->name);
194 return;
195 }
196
197 if (phydev->speed != 1000)
198 conf |= MII_PORTSELECT;
Alexey Brodkinb884c3f2016-01-13 16:59:36 +0300199 else
200 conf &= ~MII_PORTSELECT;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400201
202 if (phydev->speed == 100)
203 conf |= FES_100;
204
205 if (phydev->duplex)
206 conf |= FULLDPLXMODE;
207
208 writel(conf, &mac_p->conf);
209
210 printf("Speed: %d, %s duplex%s\n", phydev->speed,
211 (phydev->duplex) ? "full" : "half",
212 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
213}
214
Simon Glass64dcd252015-04-05 16:07:40 -0600215static void _dw_eth_halt(struct dw_eth_dev *priv)
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400216{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400217 struct eth_mac_regs *mac_p = priv->mac_regs_p;
218 struct eth_dma_regs *dma_p = priv->dma_regs_p;
219
220 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
221 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
222
223 phy_shutdown(priv->phydev);
224}
225
Simon Glass64dcd252015-04-05 16:07:40 -0600226static int _dw_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530227{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530228 struct eth_mac_regs *mac_p = priv->mac_regs_p;
229 struct eth_dma_regs *dma_p = priv->dma_regs_p;
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400230 unsigned int start;
Simon Glass64dcd252015-04-05 16:07:40 -0600231 int ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530232
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400233 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
Vipin Kumar13edd172012-03-26 00:09:56 +0000234
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400235 start = get_timer(0);
236 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
Alexey Brodkin875143f2015-01-13 17:10:24 +0300237 if (get_timer(start) >= CONFIG_MACRESET_TIMEOUT) {
238 printf("DMA reset timeout\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600239 return -ETIMEDOUT;
Alexey Brodkin875143f2015-01-13 17:10:24 +0300240 }
Stefan Roeseef760252012-05-07 12:04:25 +0200241
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400242 mdelay(100);
243 };
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530244
Bin Mengf3edfd32015-06-15 18:40:19 +0800245 /*
246 * Soft reset above clears HW address registers.
247 * So we have to set it here once again.
248 */
249 _dw_write_hwaddr(priv, enetaddr);
250
Simon Glass64dcd252015-04-05 16:07:40 -0600251 rx_descs_init(priv);
252 tx_descs_init(priv);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530253
Ian Campbell49692c52014-05-08 22:26:35 +0100254 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530255
Sonic Zhangd2279222015-01-29 14:38:50 +0800256#ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400257 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
258 &dma_p->opmode);
Sonic Zhangd2279222015-01-29 14:38:50 +0800259#else
260 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
261 &dma_p->opmode);
262#endif
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530263
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400264 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530265
Sonic Zhang2ddaf132015-01-29 13:37:31 +0800266#ifdef CONFIG_DW_AXI_BURST_LEN
267 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
268#endif
269
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400270 /* Start up the PHY */
Simon Glass64dcd252015-04-05 16:07:40 -0600271 ret = phy_startup(priv->phydev);
272 if (ret) {
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400273 printf("Could not initialize PHY %s\n",
274 priv->phydev->dev->name);
Simon Glass64dcd252015-04-05 16:07:40 -0600275 return ret;
Vipin Kumar9afc1af2012-05-07 13:06:44 +0530276 }
277
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400278 dw_adjust_link(mac_p, priv->phydev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530279
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400280 if (!priv->phydev->link)
Simon Glass64dcd252015-04-05 16:07:40 -0600281 return -EIO;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530282
Armando Viscontiaa510052012-03-26 00:09:55 +0000283 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530284
285 return 0;
286}
287
Simon Glass64dcd252015-04-05 16:07:40 -0600288static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530289{
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530290 struct eth_dma_regs *dma_p = priv->dma_regs_p;
291 u32 desc_num = priv->tx_currdescnum;
292 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
Marek Vasut96cec172014-09-15 01:05:23 +0200293 uint32_t desc_start = (uint32_t)desc_p;
294 uint32_t desc_end = desc_start +
295 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
296 uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
297 uint32_t data_end = data_start +
298 roundup(length, ARCH_DMA_MINALIGN);
Ian Campbell964ea7c2014-05-08 22:26:33 +0100299 /*
300 * Strictly we only need to invalidate the "txrx_status" field
301 * for the following check, but on some platforms we cannot
Marek Vasut96cec172014-09-15 01:05:23 +0200302 * invalidate only 4 bytes, so we flush the entire descriptor,
303 * which is 16 bytes in total. This is safe because the
304 * individual descriptors in the array are each aligned to
305 * ARCH_DMA_MINALIGN and padded appropriately.
Ian Campbell964ea7c2014-05-08 22:26:33 +0100306 */
Marek Vasut96cec172014-09-15 01:05:23 +0200307 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400308
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530309 /* Check if the descriptor is owned by CPU */
310 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
311 printf("CPU not owner of tx frame\n");
Simon Glass64dcd252015-04-05 16:07:40 -0600312 return -EPERM;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530313 }
314
Marek Vasut96cec172014-09-15 01:05:23 +0200315 memcpy(desc_p->dmamac_addr, packet, length);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530316
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400317 /* Flush data to be sent */
Marek Vasut96cec172014-09-15 01:05:23 +0200318 flush_dcache_range(data_start, data_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400319
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530320#if defined(CONFIG_DW_ALTDESCRIPTOR)
321 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
Marek Vasut2b261092015-12-20 03:59:23 +0100322 desc_p->dmamac_cntl |= (length << DESC_TXCTRL_SIZE1SHFT) &
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530323 DESC_TXCTRL_SIZE1MASK;
324
325 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
326 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
327#else
Marek Vasut2b261092015-12-20 03:59:23 +0100328 desc_p->dmamac_cntl |= ((length << DESC_TXCTRL_SIZE1SHFT) &
329 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530330 DESC_TXCTRL_TXFIRST;
331
332 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
333#endif
334
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400335 /* Flush modified buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200336 flush_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400337
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530338 /* Test the wrap-around condition. */
339 if (++desc_num >= CONFIG_TX_DESCR_NUM)
340 desc_num = 0;
341
342 priv->tx_currdescnum = desc_num;
343
344 /* Start the transmission */
345 writel(POLL_DATA, &dma_p->txpolldemand);
346
347 return 0;
348}
349
Simon Glass75577ba2015-04-05 16:07:41 -0600350static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530351{
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400352 u32 status, desc_num = priv->rx_currdescnum;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530353 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
Simon Glass75577ba2015-04-05 16:07:41 -0600354 int length = -EAGAIN;
Marek Vasut96cec172014-09-15 01:05:23 +0200355 uint32_t desc_start = (uint32_t)desc_p;
356 uint32_t desc_end = desc_start +
357 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
358 uint32_t data_start = (uint32_t)desc_p->dmamac_addr;
359 uint32_t data_end;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530360
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400361 /* Invalidate entire buffer descriptor */
Marek Vasut96cec172014-09-15 01:05:23 +0200362 invalidate_dcache_range(desc_start, desc_end);
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400363
364 status = desc_p->txrx_status;
365
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530366 /* Check if the owner is the CPU */
367 if (!(status & DESC_RXSTS_OWNBYDMA)) {
368
Marek Vasut2b261092015-12-20 03:59:23 +0100369 length = (status & DESC_RXSTS_FRMLENMSK) >>
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530370 DESC_RXSTS_FRMLENSHFT;
371
Alexey Brodkin50b0df82014-01-22 20:49:09 +0400372 /* Invalidate received data */
Marek Vasut96cec172014-09-15 01:05:23 +0200373 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
374 invalidate_dcache_range(data_start, data_end);
Simon Glass75577ba2015-04-05 16:07:41 -0600375 *packetp = desc_p->dmamac_addr;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530376 }
377
Simon Glass75577ba2015-04-05 16:07:41 -0600378 return length;
379}
380
381static int _dw_free_pkt(struct dw_eth_dev *priv)
382{
383 u32 desc_num = priv->rx_currdescnum;
384 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
385 uint32_t desc_start = (uint32_t)desc_p;
386 uint32_t desc_end = desc_start +
387 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
388
389 /*
390 * Make the current descriptor valid again and go to
391 * the next one
392 */
393 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
394
395 /* Flush only status field - others weren't changed */
396 flush_dcache_range(desc_start, desc_end);
397
398 /* Test the wrap-around condition. */
399 if (++desc_num >= CONFIG_RX_DESCR_NUM)
400 desc_num = 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530401 priv->rx_currdescnum = desc_num;
402
Simon Glass75577ba2015-04-05 16:07:41 -0600403 return 0;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530404}
405
Simon Glass64dcd252015-04-05 16:07:40 -0600406static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530407{
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400408 struct phy_device *phydev;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300409 int mask = 0xffffffff, ret;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530410
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400411#ifdef CONFIG_PHY_ADDR
412 mask = 1 << CONFIG_PHY_ADDR;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530413#endif
414
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400415 phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
416 if (!phydev)
Simon Glass64dcd252015-04-05 16:07:40 -0600417 return -ENODEV;
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530418
Ian Campbell15e82e52014-04-28 20:14:05 +0100419 phy_connect_dev(phydev, dev);
420
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400421 phydev->supported &= PHY_GBIT_FEATURES;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300422 if (priv->max_speed) {
423 ret = phy_set_supported(phydev, priv->max_speed);
424 if (ret)
425 return ret;
426 }
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400427 phydev->advertising = phydev->supported;
428
429 priv->phydev = phydev;
430 phy_config(phydev);
431
Simon Glass64dcd252015-04-05 16:07:40 -0600432 return 0;
433}
434
Simon Glass75577ba2015-04-05 16:07:41 -0600435#ifndef CONFIG_DM_ETH
Simon Glass64dcd252015-04-05 16:07:40 -0600436static int dw_eth_init(struct eth_device *dev, bd_t *bis)
437{
438 return _dw_eth_init(dev->priv, dev->enetaddr);
439}
440
441static int dw_eth_send(struct eth_device *dev, void *packet, int length)
442{
443 return _dw_eth_send(dev->priv, packet, length);
444}
445
446static int dw_eth_recv(struct eth_device *dev)
447{
Simon Glass75577ba2015-04-05 16:07:41 -0600448 uchar *packet;
449 int length;
450
451 length = _dw_eth_recv(dev->priv, &packet);
452 if (length == -EAGAIN)
453 return 0;
454 net_process_received_packet(packet, length);
455
456 _dw_free_pkt(dev->priv);
457
458 return 0;
Simon Glass64dcd252015-04-05 16:07:40 -0600459}
460
461static void dw_eth_halt(struct eth_device *dev)
462{
463 return _dw_eth_halt(dev->priv);
464}
465
466static int dw_write_hwaddr(struct eth_device *dev)
467{
468 return _dw_write_hwaddr(dev->priv, dev->enetaddr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530469}
470
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400471int designware_initialize(ulong base_addr, u32 interface)
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530472{
473 struct eth_device *dev;
474 struct dw_eth_dev *priv;
475
476 dev = (struct eth_device *) malloc(sizeof(struct eth_device));
477 if (!dev)
478 return -ENOMEM;
479
480 /*
481 * Since the priv structure contains the descriptors which need a strict
482 * buswidth alignment, memalign is used to allocate memory
483 */
Ian Campbell1c848a22014-05-08 22:26:32 +0100484 priv = (struct dw_eth_dev *) memalign(ARCH_DMA_MINALIGN,
485 sizeof(struct dw_eth_dev));
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530486 if (!priv) {
487 free(dev);
488 return -ENOMEM;
489 }
490
491 memset(dev, 0, sizeof(struct eth_device));
492 memset(priv, 0, sizeof(struct dw_eth_dev));
493
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400494 sprintf(dev->name, "dwmac.%lx", base_addr);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530495 dev->iobase = (int)base_addr;
496 dev->priv = priv;
497
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530498 priv->dev = dev;
499 priv->mac_regs_p = (struct eth_mac_regs *)base_addr;
500 priv->dma_regs_p = (struct eth_dma_regs *)(base_addr +
501 DW_DMA_BASE_OFFSET);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530502
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530503 dev->init = dw_eth_init;
504 dev->send = dw_eth_send;
505 dev->recv = dw_eth_recv;
506 dev->halt = dw_eth_halt;
507 dev->write_hwaddr = dw_write_hwaddr;
508
509 eth_register(dev);
510
Alexey Brodkin92a190a2014-01-22 20:54:06 +0400511 priv->interface = interface;
512
513 dw_mdio_init(dev->name, priv->mac_regs_p);
514 priv->bus = miiphy_get_dev_by_name(dev->name);
515
Simon Glass64dcd252015-04-05 16:07:40 -0600516 return dw_phy_init(priv, dev);
Vipin KUMAR5b1b1882010-06-29 10:53:34 +0530517}
Simon Glass75577ba2015-04-05 16:07:41 -0600518#endif
519
520#ifdef CONFIG_DM_ETH
521static int designware_eth_start(struct udevice *dev)
522{
523 struct eth_pdata *pdata = dev_get_platdata(dev);
524
525 return _dw_eth_init(dev->priv, pdata->enetaddr);
526}
527
528static int designware_eth_send(struct udevice *dev, void *packet, int length)
529{
530 struct dw_eth_dev *priv = dev_get_priv(dev);
531
532 return _dw_eth_send(priv, packet, length);
533}
534
Simon Glassa1ca92e2015-07-06 16:47:49 -0600535static int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
Simon Glass75577ba2015-04-05 16:07:41 -0600536{
537 struct dw_eth_dev *priv = dev_get_priv(dev);
538
539 return _dw_eth_recv(priv, packetp);
540}
541
542static int designware_eth_free_pkt(struct udevice *dev, uchar *packet,
543 int length)
544{
545 struct dw_eth_dev *priv = dev_get_priv(dev);
546
547 return _dw_free_pkt(priv);
548}
549
550static void designware_eth_stop(struct udevice *dev)
551{
552 struct dw_eth_dev *priv = dev_get_priv(dev);
553
554 return _dw_eth_halt(priv);
555}
556
557static int designware_eth_write_hwaddr(struct udevice *dev)
558{
559 struct eth_pdata *pdata = dev_get_platdata(dev);
560 struct dw_eth_dev *priv = dev_get_priv(dev);
561
562 return _dw_write_hwaddr(priv, pdata->enetaddr);
563}
564
Bin Meng8b7ee662015-09-11 03:24:35 -0700565static int designware_eth_bind(struct udevice *dev)
566{
567#ifdef CONFIG_DM_PCI
568 static int num_cards;
569 char name[20];
570
571 /* Create a unique device name for PCI type devices */
572 if (device_is_on_pci_bus(dev)) {
573 sprintf(name, "eth_designware#%u", num_cards++);
574 device_set_name(dev, name);
575 }
576#endif
577
578 return 0;
579}
580
Simon Glass75577ba2015-04-05 16:07:41 -0600581static int designware_eth_probe(struct udevice *dev)
582{
583 struct eth_pdata *pdata = dev_get_platdata(dev);
584 struct dw_eth_dev *priv = dev_get_priv(dev);
Bin Mengf0dc73c2015-09-03 05:37:29 -0700585 u32 iobase = pdata->iobase;
Simon Glass75577ba2015-04-05 16:07:41 -0600586 int ret;
587
Bin Meng8b7ee662015-09-11 03:24:35 -0700588#ifdef CONFIG_DM_PCI
589 /*
590 * If we are on PCI bus, either directly attached to a PCI root port,
591 * or via a PCI bridge, fill in platdata before we probe the hardware.
592 */
593 if (device_is_on_pci_bus(dev)) {
Simon Glass21ccce12015-11-29 13:17:47 -0700594 pci_dev_t bdf = dm_pci_get_bdf(dev);
Bin Meng8b7ee662015-09-11 03:24:35 -0700595
596 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
597 iobase &= PCI_BASE_ADDRESS_MEM_MASK;
598 iobase = pci_mem_to_phys(bdf, iobase);
599
600 pdata->iobase = iobase;
601 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
602 }
603#endif
604
Bin Mengf0dc73c2015-09-03 05:37:29 -0700605 debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
606 priv->mac_regs_p = (struct eth_mac_regs *)iobase;
607 priv->dma_regs_p = (struct eth_dma_regs *)(iobase + DW_DMA_BASE_OFFSET);
Simon Glass75577ba2015-04-05 16:07:41 -0600608 priv->interface = pdata->phy_interface;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300609 priv->max_speed = pdata->max_speed;
Simon Glass75577ba2015-04-05 16:07:41 -0600610
611 dw_mdio_init(dev->name, priv->mac_regs_p);
612 priv->bus = miiphy_get_dev_by_name(dev->name);
613
614 ret = dw_phy_init(priv, dev);
615 debug("%s, ret=%d\n", __func__, ret);
616
617 return ret;
618}
619
Bin Meng5d2459f2015-10-07 21:32:38 -0700620static int designware_eth_remove(struct udevice *dev)
621{
622 struct dw_eth_dev *priv = dev_get_priv(dev);
623
624 free(priv->phydev);
625 mdio_unregister(priv->bus);
626 mdio_free(priv->bus);
627
628 return 0;
629}
630
Simon Glass75577ba2015-04-05 16:07:41 -0600631static const struct eth_ops designware_eth_ops = {
632 .start = designware_eth_start,
633 .send = designware_eth_send,
634 .recv = designware_eth_recv,
635 .free_pkt = designware_eth_free_pkt,
636 .stop = designware_eth_stop,
637 .write_hwaddr = designware_eth_write_hwaddr,
638};
639
640static int designware_eth_ofdata_to_platdata(struct udevice *dev)
641{
642 struct eth_pdata *pdata = dev_get_platdata(dev);
643 const char *phy_mode;
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300644 const fdt32_t *cell;
Simon Glass75577ba2015-04-05 16:07:41 -0600645
646 pdata->iobase = dev_get_addr(dev);
647 pdata->phy_interface = -1;
648 phy_mode = fdt_getprop(gd->fdt_blob, dev->of_offset, "phy-mode", NULL);
649 if (phy_mode)
650 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
651 if (pdata->phy_interface == -1) {
652 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
653 return -EINVAL;
654 }
655
Alexey Brodkin6968ec92016-01-13 16:59:37 +0300656 pdata->max_speed = 0;
657 cell = fdt_getprop(gd->fdt_blob, dev->of_offset, "max-speed", NULL);
658 if (cell)
659 pdata->max_speed = fdt32_to_cpu(*cell);
660
Simon Glass75577ba2015-04-05 16:07:41 -0600661 return 0;
662}
663
664static const struct udevice_id designware_eth_ids[] = {
665 { .compatible = "allwinner,sun7i-a20-gmac" },
Marek Vasutb9628592015-07-25 18:38:44 +0200666 { .compatible = "altr,socfpga-stmmac" },
Simon Glass75577ba2015-04-05 16:07:41 -0600667 { }
668};
669
Marek Vasut9f76f102015-07-25 18:42:34 +0200670U_BOOT_DRIVER(eth_designware) = {
Simon Glass75577ba2015-04-05 16:07:41 -0600671 .name = "eth_designware",
672 .id = UCLASS_ETH,
673 .of_match = designware_eth_ids,
674 .ofdata_to_platdata = designware_eth_ofdata_to_platdata,
Bin Meng8b7ee662015-09-11 03:24:35 -0700675 .bind = designware_eth_bind,
Simon Glass75577ba2015-04-05 16:07:41 -0600676 .probe = designware_eth_probe,
Bin Meng5d2459f2015-10-07 21:32:38 -0700677 .remove = designware_eth_remove,
Simon Glass75577ba2015-04-05 16:07:41 -0600678 .ops = &designware_eth_ops,
679 .priv_auto_alloc_size = sizeof(struct dw_eth_dev),
680 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
681 .flags = DM_FLAG_ALLOC_PRIV_DMA,
682};
Bin Meng8b7ee662015-09-11 03:24:35 -0700683
684static struct pci_device_id supported[] = {
685 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
686 { }
687};
688
689U_BOOT_PCI_DEVICE(eth_designware, supported);
Simon Glass75577ba2015-04-05 16:07:41 -0600690#endif