blob: f97b82afdd1ea85ab90b79c634a5f22cabd2fdbd [file] [log] [blame]
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
Wolfgang Denk1a459662013-07-08 09:37:19 +02004 * SPDX-License-Identifier: GPL-2.0+
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01005 */
6#include <common.h>
Wenyou Yang577aa3b2016-11-02 10:06:56 +08007#include <clk.h>
Simon Glassf1dcc192016-05-05 07:28:11 -06008#include <dm.h>
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01009
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010010/*
11 * The u-boot networking stack is a little weird. It seems like the
12 * networking core allocates receive buffers up front without any
13 * regard to the hardware that's supposed to actually receive those
14 * packets.
15 *
16 * The MACB receives packets into 128-byte receive buffers, so the
17 * buffers allocated by the core isn't very practical to use. We'll
18 * allocate our own, but we need one such buffer in case a packet
19 * wraps around the DMA ring so that we have to copy it.
20 *
Jean-Christophe PLAGNIOL-VILLARD6d0f6bc2008-10-16 15:01:15 +020021 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010022 * configuration header. This way, the core allocates one RX buffer
23 * and one TX buffer, each of which can hold a ethernet packet of
24 * maximum size.
25 *
26 * For some reason, the networking core unconditionally specifies a
27 * 32-byte packet "alignment" (which really should be called
28 * "padding"). MACB shouldn't need that, but we'll refrain from any
29 * core modifications here...
30 */
31
32#include <net.h>
Simon Glassf1dcc192016-05-05 07:28:11 -060033#ifndef CONFIG_DM_ETH
Ben Warren89973f82008-08-31 22:22:04 -070034#include <netdev.h>
Simon Glassf1dcc192016-05-05 07:28:11 -060035#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010036#include <malloc.h>
Semih Hazar0f751d62009-12-17 15:07:15 +020037#include <miiphy.h>
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010038
39#include <linux/mii.h>
40#include <asm/io.h>
41#include <asm/dma-mapping.h>
42#include <asm/arch/clk.h>
Masahiro Yamada5d97dff2016-09-21 11:28:57 +090043#include <linux/errno.h>
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010044
45#include "macb.h"
46
Wenyou Yanga212b662016-05-17 13:11:35 +080047DECLARE_GLOBAL_DATA_PTR;
48
Andreas Bießmannceef9832014-05-26 22:55:18 +020049#define MACB_RX_BUFFER_SIZE 4096
50#define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128)
51#define MACB_TX_RING_SIZE 16
52#define MACB_TX_TIMEOUT 1000
53#define MACB_AUTONEG_TIMEOUT 5000000
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010054
55struct macb_dma_desc {
56 u32 addr;
57 u32 ctrl;
58};
59
Wu, Josh5ae0e382014-05-27 16:31:05 +080060#define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc))
61#define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE))
62#define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE))
Wu, Joshade4ea42015-06-03 16:45:44 +080063#define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1))
Wu, Josh5ae0e382014-05-27 16:31:05 +080064
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010065#define RXADDR_USED 0x00000001
66#define RXADDR_WRAP 0x00000002
67
68#define RXBUF_FRMLEN_MASK 0x00000fff
69#define RXBUF_FRAME_START 0x00004000
70#define RXBUF_FRAME_END 0x00008000
71#define RXBUF_TYPEID_MATCH 0x00400000
72#define RXBUF_ADDR4_MATCH 0x00800000
73#define RXBUF_ADDR3_MATCH 0x01000000
74#define RXBUF_ADDR2_MATCH 0x02000000
75#define RXBUF_ADDR1_MATCH 0x04000000
76#define RXBUF_BROADCAST 0x80000000
77
78#define TXBUF_FRMLEN_MASK 0x000007ff
79#define TXBUF_FRAME_END 0x00008000
80#define TXBUF_NOCRC 0x00010000
81#define TXBUF_EXHAUSTED 0x08000000
82#define TXBUF_UNDERRUN 0x10000000
83#define TXBUF_MAXRETRY 0x20000000
84#define TXBUF_WRAP 0x40000000
85#define TXBUF_USED 0x80000000
86
87struct macb_device {
88 void *regs;
89
90 unsigned int rx_tail;
91 unsigned int tx_head;
92 unsigned int tx_tail;
Simon Glassd5555b72016-05-05 07:28:09 -060093 unsigned int next_rx_tail;
94 bool wrapped;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010095
96 void *rx_buffer;
97 void *tx_buffer;
98 struct macb_dma_desc *rx_ring;
99 struct macb_dma_desc *tx_ring;
100
101 unsigned long rx_buffer_dma;
102 unsigned long rx_ring_dma;
103 unsigned long tx_ring_dma;
104
Wu, Joshade4ea42015-06-03 16:45:44 +0800105 struct macb_dma_desc *dummy_desc;
106 unsigned long dummy_desc_dma;
107
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100108 const struct device *dev;
Simon Glassf1dcc192016-05-05 07:28:11 -0600109#ifndef CONFIG_DM_ETH
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100110 struct eth_device netdev;
Simon Glassf1dcc192016-05-05 07:28:11 -0600111#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100112 unsigned short phy_addr;
Bo Shenb1a00062013-04-24 15:59:27 +0800113 struct mii_dev *bus;
Wenyou Yanga212b662016-05-17 13:11:35 +0800114
115#ifdef CONFIG_DM_ETH
Wenyou Yang577aa3b2016-11-02 10:06:56 +0800116 unsigned long pclk_rate;
Wenyou Yanga212b662016-05-17 13:11:35 +0800117 phy_interface_t phy_interface;
118#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100119};
Simon Glassf1dcc192016-05-05 07:28:11 -0600120#ifndef CONFIG_DM_ETH
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100121#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
Simon Glassf1dcc192016-05-05 07:28:11 -0600122#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100123
Bo Shend256be22013-04-24 15:59:28 +0800124static int macb_is_gem(struct macb_device *macb)
125{
126 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
127}
128
Gregory CLEMENT75b03cf2015-12-16 14:50:34 +0100129#ifndef cpu_is_sama5d2
130#define cpu_is_sama5d2() 0
131#endif
132
133#ifndef cpu_is_sama5d4
134#define cpu_is_sama5d4() 0
135#endif
136
137static int gem_is_gigabit_capable(struct macb_device *macb)
138{
139 /*
Robert P. J. Day1cc0a9f2016-05-04 04:47:31 -0400140 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
Gregory CLEMENT75b03cf2015-12-16 14:50:34 +0100141 * configured to support only 10/100.
142 */
143 return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
144}
145
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100146static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
147{
148 unsigned long netctl;
149 unsigned long netstat;
150 unsigned long frame;
151
152 netctl = macb_readl(macb, NCR);
153 netctl |= MACB_BIT(MPE);
154 macb_writel(macb, NCR, netctl);
155
156 frame = (MACB_BF(SOF, 1)
157 | MACB_BF(RW, 1)
158 | MACB_BF(PHYA, macb->phy_addr)
159 | MACB_BF(REGA, reg)
160 | MACB_BF(CODE, 2)
161 | MACB_BF(DATA, value));
162 macb_writel(macb, MAN, frame);
163
164 do {
165 netstat = macb_readl(macb, NSR);
166 } while (!(netstat & MACB_BIT(IDLE)));
167
168 netctl = macb_readl(macb, NCR);
169 netctl &= ~MACB_BIT(MPE);
170 macb_writel(macb, NCR, netctl);
171}
172
173static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
174{
175 unsigned long netctl;
176 unsigned long netstat;
177 unsigned long frame;
178
179 netctl = macb_readl(macb, NCR);
180 netctl |= MACB_BIT(MPE);
181 macb_writel(macb, NCR, netctl);
182
183 frame = (MACB_BF(SOF, 1)
184 | MACB_BF(RW, 2)
185 | MACB_BF(PHYA, macb->phy_addr)
186 | MACB_BF(REGA, reg)
187 | MACB_BF(CODE, 2));
188 macb_writel(macb, MAN, frame);
189
190 do {
191 netstat = macb_readl(macb, NSR);
192 } while (!(netstat & MACB_BIT(IDLE)));
193
194 frame = macb_readl(macb, MAN);
195
196 netctl = macb_readl(macb, NCR);
197 netctl &= ~MACB_BIT(MPE);
198 macb_writel(macb, NCR, netctl);
199
200 return MACB_BFEXT(DATA, frame);
201}
202
Joe Hershberger1b8c18b2013-06-24 19:06:38 -0500203void __weak arch_get_mdio_control(const char *name)
Shiraz Hashim416ce622012-12-13 17:22:52 +0530204{
205 return;
206}
207
Bo Shenb1a00062013-04-24 15:59:27 +0800208#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar0f751d62009-12-17 15:07:15 +0200209
Joe Hershberger5a49f172016-08-08 11:28:38 -0500210int macb_miiphy_read(struct mii_dev *bus, int phy_adr, int devad, int reg)
Semih Hazar0f751d62009-12-17 15:07:15 +0200211{
Joe Hershberger5a49f172016-08-08 11:28:38 -0500212 u16 value = 0;
Simon Glassf1dcc192016-05-05 07:28:11 -0600213#ifdef CONFIG_DM_ETH
Joe Hershberger5a49f172016-08-08 11:28:38 -0500214 struct udevice *dev = eth_get_dev_by_name(bus->name);
Simon Glassf1dcc192016-05-05 07:28:11 -0600215 struct macb_device *macb = dev_get_priv(dev);
216#else
Joe Hershberger5a49f172016-08-08 11:28:38 -0500217 struct eth_device *dev = eth_get_dev_by_name(bus->name);
Semih Hazar0f751d62009-12-17 15:07:15 +0200218 struct macb_device *macb = to_macb(dev);
Simon Glassf1dcc192016-05-05 07:28:11 -0600219#endif
Semih Hazar0f751d62009-12-17 15:07:15 +0200220
Andreas Bießmannceef9832014-05-26 22:55:18 +0200221 if (macb->phy_addr != phy_adr)
Semih Hazar0f751d62009-12-17 15:07:15 +0200222 return -1;
223
Joe Hershberger5a49f172016-08-08 11:28:38 -0500224 arch_get_mdio_control(bus->name);
225 value = macb_mdio_read(macb, reg);
Semih Hazar0f751d62009-12-17 15:07:15 +0200226
Joe Hershberger5a49f172016-08-08 11:28:38 -0500227 return value;
Semih Hazar0f751d62009-12-17 15:07:15 +0200228}
229
Joe Hershberger5a49f172016-08-08 11:28:38 -0500230int macb_miiphy_write(struct mii_dev *bus, int phy_adr, int devad, int reg,
231 u16 value)
Semih Hazar0f751d62009-12-17 15:07:15 +0200232{
Simon Glassf1dcc192016-05-05 07:28:11 -0600233#ifdef CONFIG_DM_ETH
Joe Hershberger5a49f172016-08-08 11:28:38 -0500234 struct udevice *dev = eth_get_dev_by_name(bus->name);
Simon Glassf1dcc192016-05-05 07:28:11 -0600235 struct macb_device *macb = dev_get_priv(dev);
236#else
Joe Hershberger5a49f172016-08-08 11:28:38 -0500237 struct eth_device *dev = eth_get_dev_by_name(bus->name);
Semih Hazar0f751d62009-12-17 15:07:15 +0200238 struct macb_device *macb = to_macb(dev);
Simon Glassf1dcc192016-05-05 07:28:11 -0600239#endif
Semih Hazar0f751d62009-12-17 15:07:15 +0200240
Andreas Bießmannceef9832014-05-26 22:55:18 +0200241 if (macb->phy_addr != phy_adr)
Semih Hazar0f751d62009-12-17 15:07:15 +0200242 return -1;
243
Joe Hershberger5a49f172016-08-08 11:28:38 -0500244 arch_get_mdio_control(bus->name);
Semih Hazar0f751d62009-12-17 15:07:15 +0200245 macb_mdio_write(macb, reg, value);
246
247 return 0;
248}
249#endif
250
Wu, Josh5ae0e382014-05-27 16:31:05 +0800251#define RX 1
252#define TX 0
253static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
254{
255 if (rx)
Heiko Schocher592a7492016-08-29 07:46:11 +0200256 invalidate_dcache_range(macb->rx_ring_dma,
257 ALIGN(macb->rx_ring_dma + MACB_RX_DMA_DESC_SIZE,
258 PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800259 else
Heiko Schocher592a7492016-08-29 07:46:11 +0200260 invalidate_dcache_range(macb->tx_ring_dma,
261 ALIGN(macb->tx_ring_dma + MACB_TX_DMA_DESC_SIZE,
262 PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800263}
264
265static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
266{
267 if (rx)
268 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
Heiko Schocher592a7492016-08-29 07:46:11 +0200269 ALIGN(MACB_RX_DMA_DESC_SIZE, PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800270 else
271 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
Heiko Schocher592a7492016-08-29 07:46:11 +0200272 ALIGN(MACB_TX_DMA_DESC_SIZE, PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800273}
274
275static inline void macb_flush_rx_buffer(struct macb_device *macb)
276{
277 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
Heiko Schocher592a7492016-08-29 07:46:11 +0200278 ALIGN(MACB_RX_BUFFER_SIZE, PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800279}
280
281static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
282{
283 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
Heiko Schocher592a7492016-08-29 07:46:11 +0200284 ALIGN(MACB_RX_BUFFER_SIZE, PKTALIGN));
Wu, Josh5ae0e382014-05-27 16:31:05 +0800285}
Semih Hazar0f751d62009-12-17 15:07:15 +0200286
Jon Loeliger07d38a12007-07-09 17:30:01 -0500287#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100288
Simon Glassd5555b72016-05-05 07:28:09 -0600289static int _macb_send(struct macb_device *macb, const char *name, void *packet,
290 int length)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100291{
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100292 unsigned long paddr, ctrl;
293 unsigned int tx_head = macb->tx_head;
294 int i;
295
296 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
297
298 ctrl = length & TXBUF_FRMLEN_MASK;
299 ctrl |= TXBUF_FRAME_END;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200300 if (tx_head == (MACB_TX_RING_SIZE - 1)) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100301 ctrl |= TXBUF_WRAP;
302 macb->tx_head = 0;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200303 } else {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100304 macb->tx_head++;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200305 }
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100306
307 macb->tx_ring[tx_head].ctrl = ctrl;
308 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200309 barrier();
Wu, Josh5ae0e382014-05-27 16:31:05 +0800310 macb_flush_ring_desc(macb, TX);
311 /* Do we need check paddr and length is dcache line aligned? */
Simon Glassf589f8c2016-05-05 07:28:10 -0600312 flush_dcache_range(paddr, paddr + ALIGN(length, ARCH_DMA_MINALIGN));
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100313 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
314
315 /*
316 * I guess this is necessary because the networking core may
317 * re-use the transmit buffer as soon as we return...
318 */
Andreas Bießmannceef9832014-05-26 22:55:18 +0200319 for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200320 barrier();
Wu, Josh5ae0e382014-05-27 16:31:05 +0800321 macb_invalidate_ring_desc(macb, TX);
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200322 ctrl = macb->tx_ring[tx_head].ctrl;
323 if (ctrl & TXBUF_USED)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100324 break;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100325 udelay(1);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100326 }
327
328 dma_unmap_single(packet, length, paddr);
329
Andreas Bießmannceef9832014-05-26 22:55:18 +0200330 if (i <= MACB_TX_TIMEOUT) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100331 if (ctrl & TXBUF_UNDERRUN)
Simon Glassd5555b72016-05-05 07:28:09 -0600332 printf("%s: TX underrun\n", name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100333 if (ctrl & TXBUF_EXHAUSTED)
Simon Glassd5555b72016-05-05 07:28:09 -0600334 printf("%s: TX buffers exhausted in mid frame\n", name);
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200335 } else {
Simon Glassd5555b72016-05-05 07:28:09 -0600336 printf("%s: TX timeout\n", name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100337 }
338
339 /* No one cares anyway */
340 return 0;
341}
342
343static void reclaim_rx_buffers(struct macb_device *macb,
344 unsigned int new_tail)
345{
346 unsigned int i;
347
348 i = macb->rx_tail;
Wu, Josh5ae0e382014-05-27 16:31:05 +0800349
350 macb_invalidate_ring_desc(macb, RX);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100351 while (i > new_tail) {
352 macb->rx_ring[i].addr &= ~RXADDR_USED;
353 i++;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200354 if (i > MACB_RX_RING_SIZE)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100355 i = 0;
356 }
357
358 while (i < new_tail) {
359 macb->rx_ring[i].addr &= ~RXADDR_USED;
360 i++;
361 }
362
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200363 barrier();
Wu, Josh5ae0e382014-05-27 16:31:05 +0800364 macb_flush_ring_desc(macb, RX);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100365 macb->rx_tail = new_tail;
366}
367
Simon Glassd5555b72016-05-05 07:28:09 -0600368static int _macb_recv(struct macb_device *macb, uchar **packetp)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100369{
Simon Glassd5555b72016-05-05 07:28:09 -0600370 unsigned int next_rx_tail = macb->next_rx_tail;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100371 void *buffer;
372 int length;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100373 u32 status;
374
Simon Glassd5555b72016-05-05 07:28:09 -0600375 macb->wrapped = false;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100376 for (;;) {
Wu, Josh5ae0e382014-05-27 16:31:05 +0800377 macb_invalidate_ring_desc(macb, RX);
378
Simon Glassd5555b72016-05-05 07:28:09 -0600379 if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
380 return -EAGAIN;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100381
Simon Glassd5555b72016-05-05 07:28:09 -0600382 status = macb->rx_ring[next_rx_tail].ctrl;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100383 if (status & RXBUF_FRAME_START) {
Simon Glassd5555b72016-05-05 07:28:09 -0600384 if (next_rx_tail != macb->rx_tail)
385 reclaim_rx_buffers(macb, next_rx_tail);
386 macb->wrapped = false;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100387 }
388
389 if (status & RXBUF_FRAME_END) {
390 buffer = macb->rx_buffer + 128 * macb->rx_tail;
391 length = status & RXBUF_FRMLEN_MASK;
Wu, Josh5ae0e382014-05-27 16:31:05 +0800392
393 macb_invalidate_rx_buffer(macb);
Simon Glassd5555b72016-05-05 07:28:09 -0600394 if (macb->wrapped) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100395 unsigned int headlen, taillen;
396
Andreas Bießmannceef9832014-05-26 22:55:18 +0200397 headlen = 128 * (MACB_RX_RING_SIZE
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100398 - macb->rx_tail);
399 taillen = length - headlen;
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500400 memcpy((void *)net_rx_packets[0],
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100401 buffer, headlen);
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500402 memcpy((void *)net_rx_packets[0] + headlen,
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100403 macb->rx_buffer, taillen);
Simon Glassd5555b72016-05-05 07:28:09 -0600404 *packetp = (void *)net_rx_packets[0];
405 } else {
406 *packetp = buffer;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100407 }
408
Simon Glassd5555b72016-05-05 07:28:09 -0600409 if (++next_rx_tail >= MACB_RX_RING_SIZE)
410 next_rx_tail = 0;
411 macb->next_rx_tail = next_rx_tail;
412 return length;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100413 } else {
Simon Glassd5555b72016-05-05 07:28:09 -0600414 if (++next_rx_tail >= MACB_RX_RING_SIZE) {
415 macb->wrapped = true;
416 next_rx_tail = 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100417 }
418 }
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200419 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100420 }
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100421}
422
Simon Glassd5555b72016-05-05 07:28:09 -0600423static void macb_phy_reset(struct macb_device *macb, const char *name)
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200424{
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200425 int i;
426 u16 status, adv;
427
428 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
429 macb_mdio_write(macb, MII_ADVERTISE, adv);
Simon Glassd5555b72016-05-05 07:28:09 -0600430 printf("%s: Starting autonegotiation...\n", name);
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200431 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
432 | BMCR_ANRESTART));
433
Andreas Bießmannceef9832014-05-26 22:55:18 +0200434 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200435 status = macb_mdio_read(macb, MII_BMSR);
436 if (status & BMSR_ANEGCOMPLETE)
437 break;
438 udelay(100);
439 }
440
441 if (status & BMSR_ANEGCOMPLETE)
Simon Glassd5555b72016-05-05 07:28:09 -0600442 printf("%s: Autonegotiation complete\n", name);
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200443 else
444 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
Simon Glassd5555b72016-05-05 07:28:09 -0600445 name, status);
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200446}
447
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100448#ifdef CONFIG_MACB_SEARCH_PHY
Wenyou Yanga212b662016-05-17 13:11:35 +0800449static int macb_phy_find(struct macb_device *macb, const char *name)
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100450{
451 int i;
452 u16 phy_id;
453
454 /* Search for PHY... */
455 for (i = 0; i < 32; i++) {
456 macb->phy_addr = i;
457 phy_id = macb_mdio_read(macb, MII_PHYSID1);
458 if (phy_id != 0xffff) {
Wenyou Yanga212b662016-05-17 13:11:35 +0800459 printf("%s: PHY present at %d\n", name, i);
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100460 return 1;
461 }
462 }
463
464 /* PHY isn't up to snuff */
Wenyou Yanga212b662016-05-17 13:11:35 +0800465 printf("%s: PHY not found\n", name);
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100466
467 return 0;
468}
469#endif /* CONFIG_MACB_SEARCH_PHY */
470
Wenyou Yanga212b662016-05-17 13:11:35 +0800471#ifdef CONFIG_DM_ETH
472static int macb_phy_init(struct udevice *dev, const char *name)
473#else
Simon Glassd5555b72016-05-05 07:28:09 -0600474static int macb_phy_init(struct macb_device *macb, const char *name)
Wenyou Yanga212b662016-05-17 13:11:35 +0800475#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100476{
Wenyou Yanga212b662016-05-17 13:11:35 +0800477#ifdef CONFIG_DM_ETH
478 struct macb_device *macb = dev_get_priv(dev);
479#endif
Bo Shenb1a00062013-04-24 15:59:27 +0800480#ifdef CONFIG_PHYLIB
481 struct phy_device *phydev;
482#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100483 u32 ncfgr;
484 u16 phy_id, status, adv, lpa;
485 int media, speed, duplex;
486 int i;
487
Simon Glassd5555b72016-05-05 07:28:09 -0600488 arch_get_mdio_control(name);
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100489#ifdef CONFIG_MACB_SEARCH_PHY
490 /* Auto-detect phy_addr */
Wenyou Yanga212b662016-05-17 13:11:35 +0800491 if (!macb_phy_find(macb, name))
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100492 return 0;
Gunnar Rangoyfc01ea12009-01-23 12:56:31 +0100493#endif /* CONFIG_MACB_SEARCH_PHY */
494
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100495 /* Check if the PHY is up to snuff... */
496 phy_id = macb_mdio_read(macb, MII_PHYSID1);
497 if (phy_id == 0xffff) {
Simon Glassd5555b72016-05-05 07:28:09 -0600498 printf("%s: No PHY present\n", name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100499 return 0;
500 }
501
Bo Shenb1a00062013-04-24 15:59:27 +0800502#ifdef CONFIG_PHYLIB
Wenyou Yanga212b662016-05-17 13:11:35 +0800503#ifdef CONFIG_DM_ETH
504 phydev = phy_connect(macb->bus, macb->phy_addr, dev,
505 macb->phy_interface);
506#else
Bo Shen8314ccd2013-08-19 10:35:47 +0800507 /* need to consider other phy interface mode */
Simon Glassd5555b72016-05-05 07:28:09 -0600508 phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
Bo Shen8314ccd2013-08-19 10:35:47 +0800509 PHY_INTERFACE_MODE_RGMII);
Wenyou Yanga212b662016-05-17 13:11:35 +0800510#endif
Bo Shen8314ccd2013-08-19 10:35:47 +0800511 if (!phydev) {
512 printf("phy_connect failed\n");
513 return -ENODEV;
514 }
515
Bo Shenb1a00062013-04-24 15:59:27 +0800516 phy_config(phydev);
517#endif
518
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200519 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100520 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200521 /* Try to re-negotiate if we don't have link already. */
Simon Glassd5555b72016-05-05 07:28:09 -0600522 macb_phy_reset(macb, name);
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200523
Andreas Bießmannceef9832014-05-26 22:55:18 +0200524 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100525 status = macb_mdio_read(macb, MII_BMSR);
526 if (status & BMSR_LSTATUS)
527 break;
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200528 udelay(100);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100529 }
530 }
531
532 if (!(status & BMSR_LSTATUS)) {
533 printf("%s: link down (status: 0x%04x)\n",
Simon Glassd5555b72016-05-05 07:28:09 -0600534 name, status);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100535 return 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100536 }
Bo Shend256be22013-04-24 15:59:28 +0800537
Gregory CLEMENT75b03cf2015-12-16 14:50:34 +0100538 /* First check for GMAC and that it is GiB capable */
539 if (gem_is_gigabit_capable(macb)) {
Bo Shend256be22013-04-24 15:59:28 +0800540 lpa = macb_mdio_read(macb, MII_STAT1000);
Bo Shend256be22013-04-24 15:59:28 +0800541
Andreas Bießmann47609572014-09-18 23:46:48 +0200542 if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
543 duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
544
545 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
Simon Glassd5555b72016-05-05 07:28:09 -0600546 name,
Bo Shend256be22013-04-24 15:59:28 +0800547 duplex ? "full" : "half",
548 lpa);
549
550 ncfgr = macb_readl(macb, NCFGR);
Andreas Bießmann47609572014-09-18 23:46:48 +0200551 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
552 ncfgr |= GEM_BIT(GBE);
553
Bo Shend256be22013-04-24 15:59:28 +0800554 if (duplex)
555 ncfgr |= MACB_BIT(FD);
Andreas Bießmann47609572014-09-18 23:46:48 +0200556
Bo Shend256be22013-04-24 15:59:28 +0800557 macb_writel(macb, NCFGR, ncfgr);
558
559 return 1;
560 }
561 }
562
563 /* fall back for EMAC checking */
564 adv = macb_mdio_read(macb, MII_ADVERTISE);
565 lpa = macb_mdio_read(macb, MII_LPA);
566 media = mii_nway_result(lpa & adv);
567 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
568 ? 1 : 0);
569 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
570 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
Simon Glassd5555b72016-05-05 07:28:09 -0600571 name,
Bo Shend256be22013-04-24 15:59:28 +0800572 speed ? "100" : "10",
573 duplex ? "full" : "half",
574 lpa);
575
576 ncfgr = macb_readl(macb, NCFGR);
Bo Shenc83cb5f2015-03-04 13:35:16 +0800577 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
Bo Shend256be22013-04-24 15:59:28 +0800578 if (speed)
579 ncfgr |= MACB_BIT(SPD);
580 if (duplex)
581 ncfgr |= MACB_BIT(FD);
582 macb_writel(macb, NCFGR, ncfgr);
583
584 return 1;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100585}
586
Wu, Joshade4ea42015-06-03 16:45:44 +0800587static int gmac_init_multi_queues(struct macb_device *macb)
588{
589 int i, num_queues = 1;
590 u32 queue_mask;
591
592 /* bit 0 is never set but queue 0 always exists */
593 queue_mask = gem_readl(macb, DCFG6) & 0xff;
594 queue_mask |= 0x1;
595
596 for (i = 1; i < MACB_MAX_QUEUES; i++)
597 if (queue_mask & (1 << i))
598 num_queues++;
599
600 macb->dummy_desc->ctrl = TXBUF_USED;
601 macb->dummy_desc->addr = 0;
602 flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
Heiko Schocher592a7492016-08-29 07:46:11 +0200603 ALIGN(MACB_TX_DUMMY_DMA_DESC_SIZE, PKTALIGN));
Wu, Joshade4ea42015-06-03 16:45:44 +0800604
605 for (i = 1; i < num_queues; i++)
606 gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
607
608 return 0;
609}
610
Wenyou Yanga212b662016-05-17 13:11:35 +0800611#ifdef CONFIG_DM_ETH
612static int _macb_init(struct udevice *dev, const char *name)
613#else
Simon Glassd5555b72016-05-05 07:28:09 -0600614static int _macb_init(struct macb_device *macb, const char *name)
Wenyou Yanga212b662016-05-17 13:11:35 +0800615#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100616{
Wenyou Yanga212b662016-05-17 13:11:35 +0800617#ifdef CONFIG_DM_ETH
618 struct macb_device *macb = dev_get_priv(dev);
619#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100620 unsigned long paddr;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100621 int i;
622
623 /*
624 * macb_halt should have been called at some point before now,
625 * so we'll assume the controller is idle.
626 */
627
628 /* initialize DMA descriptors */
629 paddr = macb->rx_buffer_dma;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200630 for (i = 0; i < MACB_RX_RING_SIZE; i++) {
631 if (i == (MACB_RX_RING_SIZE - 1))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100632 paddr |= RXADDR_WRAP;
633 macb->rx_ring[i].addr = paddr;
634 macb->rx_ring[i].ctrl = 0;
635 paddr += 128;
636 }
Wu, Josh5ae0e382014-05-27 16:31:05 +0800637 macb_flush_ring_desc(macb, RX);
638 macb_flush_rx_buffer(macb);
639
Andreas Bießmannceef9832014-05-26 22:55:18 +0200640 for (i = 0; i < MACB_TX_RING_SIZE; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100641 macb->tx_ring[i].addr = 0;
Andreas Bießmannceef9832014-05-26 22:55:18 +0200642 if (i == (MACB_TX_RING_SIZE - 1))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100643 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
644 else
645 macb->tx_ring[i].ctrl = TXBUF_USED;
646 }
Wu, Josh5ae0e382014-05-27 16:31:05 +0800647 macb_flush_ring_desc(macb, TX);
648
Andreas Bießmannceef9832014-05-26 22:55:18 +0200649 macb->rx_tail = 0;
650 macb->tx_head = 0;
651 macb->tx_tail = 0;
Simon Glassd5555b72016-05-05 07:28:09 -0600652 macb->next_rx_tail = 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100653
654 macb_writel(macb, RBQP, macb->rx_ring_dma);
655 macb_writel(macb, TBQP, macb->tx_ring_dma);
656
Bo Shend256be22013-04-24 15:59:28 +0800657 if (macb_is_gem(macb)) {
Wu, Joshade4ea42015-06-03 16:45:44 +0800658 /* Check the multi queue and initialize the queue for tx */
659 gmac_init_multi_queues(macb);
660
Bo Shencabf61c2014-11-10 15:24:01 +0800661 /*
662 * When the GMAC IP with GE feature, this bit is used to
663 * select interface between RGMII and GMII.
664 * When the GMAC IP without GE feature, this bit is used
665 * to select interface between RMII and MII.
666 */
Wenyou Yanga212b662016-05-17 13:11:35 +0800667#ifdef CONFIG_DM_ETH
668 if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
669 gem_writel(macb, UR, GEM_BIT(RGMII));
670 else
671 gem_writel(macb, UR, 0);
672#else
Bo Shencabf61c2014-11-10 15:24:01 +0800673#if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
Bo Shend256be22013-04-24 15:59:28 +0800674 gem_writel(macb, UR, GEM_BIT(RGMII));
675#else
676 gem_writel(macb, UR, 0);
677#endif
Wenyou Yanga212b662016-05-17 13:11:35 +0800678#endif
Bo Shend256be22013-04-24 15:59:28 +0800679 } else {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100680 /* choose RMII or MII mode. This depends on the board */
Wenyou Yanga212b662016-05-17 13:11:35 +0800681#ifdef CONFIG_DM_ETH
682#ifdef CONFIG_AT91FAMILY
683 if (macb->phy_interface == PHY_INTERFACE_MODE_RMII) {
684 macb_writel(macb, USRIO,
685 MACB_BIT(RMII) | MACB_BIT(CLKEN));
686 } else {
687 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
688 }
689#else
690 if (macb->phy_interface == PHY_INTERFACE_MODE_RMII)
691 macb_writel(macb, USRIO, 0);
692 else
693 macb_writel(macb, USRIO, MACB_BIT(MII));
694#endif
695#else
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100696#ifdef CONFIG_RMII
Bo Shend8f64b42013-04-24 15:59:26 +0800697#ifdef CONFIG_AT91FAMILY
Stelian Pop7263ef12008-01-03 21:15:56 +0000698 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
699#else
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100700 macb_writel(macb, USRIO, 0);
Stelian Pop7263ef12008-01-03 21:15:56 +0000701#endif
702#else
Bo Shend8f64b42013-04-24 15:59:26 +0800703#ifdef CONFIG_AT91FAMILY
Stelian Pop7263ef12008-01-03 21:15:56 +0000704 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100705#else
706 macb_writel(macb, USRIO, MACB_BIT(MII));
707#endif
Stelian Pop7263ef12008-01-03 21:15:56 +0000708#endif /* CONFIG_RMII */
Wenyou Yanga212b662016-05-17 13:11:35 +0800709#endif
Bo Shend256be22013-04-24 15:59:28 +0800710 }
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100711
Wenyou Yanga212b662016-05-17 13:11:35 +0800712#ifdef CONFIG_DM_ETH
713 if (!macb_phy_init(dev, name))
714#else
Simon Glassd5555b72016-05-05 07:28:09 -0600715 if (!macb_phy_init(macb, name))
Wenyou Yanga212b662016-05-17 13:11:35 +0800716#endif
Ben Warren422b1a02008-01-09 18:15:53 -0500717 return -1;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100718
719 /* Enable TX and RX */
720 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
721
Ben Warren422b1a02008-01-09 18:15:53 -0500722 return 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100723}
724
Simon Glassd5555b72016-05-05 07:28:09 -0600725static void _macb_halt(struct macb_device *macb)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100726{
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100727 u32 ncr, tsr;
728
729 /* Halt the controller and wait for any ongoing transmission to end. */
730 ncr = macb_readl(macb, NCR);
731 ncr |= MACB_BIT(THALT);
732 macb_writel(macb, NCR, ncr);
733
734 do {
735 tsr = macb_readl(macb, TSR);
736 } while (tsr & MACB_BIT(TGO));
737
738 /* Disable TX and RX, and clear statistics */
739 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
740}
741
Simon Glassd5555b72016-05-05 07:28:09 -0600742static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
Ben Warren6bb46792010-06-01 11:55:42 -0700743{
Ben Warren6bb46792010-06-01 11:55:42 -0700744 u32 hwaddr_bottom;
745 u16 hwaddr_top;
746
747 /* set hardware address */
Simon Glassd5555b72016-05-05 07:28:09 -0600748 hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
749 enetaddr[2] << 16 | enetaddr[3] << 24;
Ben Warren6bb46792010-06-01 11:55:42 -0700750 macb_writel(macb, SA1B, hwaddr_bottom);
Simon Glassd5555b72016-05-05 07:28:09 -0600751 hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
Ben Warren6bb46792010-06-01 11:55:42 -0700752 macb_writel(macb, SA1T, hwaddr_top);
753 return 0;
754}
755
Bo Shend256be22013-04-24 15:59:28 +0800756static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
757{
758 u32 config;
Wenyou Yang577aa3b2016-11-02 10:06:56 +0800759#ifdef CONFIG_DM_ETH
760 unsigned long macb_hz = macb->pclk_rate;
761#else
Bo Shend256be22013-04-24 15:59:28 +0800762 unsigned long macb_hz = get_macb_pclk_rate(id);
Wenyou Yang577aa3b2016-11-02 10:06:56 +0800763#endif
Bo Shend256be22013-04-24 15:59:28 +0800764
765 if (macb_hz < 20000000)
766 config = MACB_BF(CLK, MACB_CLK_DIV8);
767 else if (macb_hz < 40000000)
768 config = MACB_BF(CLK, MACB_CLK_DIV16);
769 else if (macb_hz < 80000000)
770 config = MACB_BF(CLK, MACB_CLK_DIV32);
771 else
772 config = MACB_BF(CLK, MACB_CLK_DIV64);
773
774 return config;
775}
776
777static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
778{
779 u32 config;
Wenyou Yang577aa3b2016-11-02 10:06:56 +0800780
781#ifdef CONFIG_DM_ETH
782 unsigned long macb_hz = macb->pclk_rate;
783#else
Bo Shend256be22013-04-24 15:59:28 +0800784 unsigned long macb_hz = get_macb_pclk_rate(id);
Wenyou Yang577aa3b2016-11-02 10:06:56 +0800785#endif
Bo Shend256be22013-04-24 15:59:28 +0800786
787 if (macb_hz < 20000000)
788 config = GEM_BF(CLK, GEM_CLK_DIV8);
789 else if (macb_hz < 40000000)
790 config = GEM_BF(CLK, GEM_CLK_DIV16);
791 else if (macb_hz < 80000000)
792 config = GEM_BF(CLK, GEM_CLK_DIV32);
793 else if (macb_hz < 120000000)
794 config = GEM_BF(CLK, GEM_CLK_DIV48);
795 else if (macb_hz < 160000000)
796 config = GEM_BF(CLK, GEM_CLK_DIV64);
797 else
798 config = GEM_BF(CLK, GEM_CLK_DIV96);
799
800 return config;
801}
802
Bo Shen32e4f6b2013-09-18 15:07:44 +0800803/*
804 * Get the DMA bus width field of the network configuration register that we
805 * should program. We find the width from decoding the design configuration
806 * register to find the maximum supported data bus width.
807 */
808static u32 macb_dbw(struct macb_device *macb)
809{
810 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
811 case 4:
812 return GEM_BF(DBW, GEM_DBW128);
813 case 2:
814 return GEM_BF(DBW, GEM_DBW64);
815 case 1:
816 default:
817 return GEM_BF(DBW, GEM_DBW32);
818 }
819}
820
Simon Glassd5555b72016-05-05 07:28:09 -0600821static void _macb_eth_initialize(struct macb_device *macb)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100822{
Simon Glassd5555b72016-05-05 07:28:09 -0600823 int id = 0; /* This is not used by functions we call */
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100824 u32 ncfgr;
825
Simon Glassd5555b72016-05-05 07:28:09 -0600826 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
Andreas Bießmannceef9832014-05-26 22:55:18 +0200827 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100828 &macb->rx_buffer_dma);
Wu, Josh5ae0e382014-05-27 16:31:05 +0800829 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100830 &macb->rx_ring_dma);
Wu, Josh5ae0e382014-05-27 16:31:05 +0800831 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100832 &macb->tx_ring_dma);
Wu, Joshade4ea42015-06-03 16:45:44 +0800833 macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
834 &macb->dummy_desc_dma);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100835
Simon Glassd5555b72016-05-05 07:28:09 -0600836 /*
837 * Do some basic initialization so that we at least can talk
838 * to the PHY
839 */
840 if (macb_is_gem(macb)) {
841 ncfgr = gem_mdc_clk_div(id, macb);
842 ncfgr |= macb_dbw(macb);
843 } else {
844 ncfgr = macb_mdc_clk_div(id, macb);
845 }
846
847 macb_writel(macb, NCFGR, ncfgr);
848}
849
Simon Glassf1dcc192016-05-05 07:28:11 -0600850#ifndef CONFIG_DM_ETH
Simon Glassd5555b72016-05-05 07:28:09 -0600851static int macb_send(struct eth_device *netdev, void *packet, int length)
852{
853 struct macb_device *macb = to_macb(netdev);
854
855 return _macb_send(macb, netdev->name, packet, length);
856}
857
858static int macb_recv(struct eth_device *netdev)
859{
860 struct macb_device *macb = to_macb(netdev);
861 uchar *packet;
862 int length;
863
864 macb->wrapped = false;
865 for (;;) {
866 macb->next_rx_tail = macb->rx_tail;
867 length = _macb_recv(macb, &packet);
868 if (length >= 0) {
869 net_process_received_packet(packet, length);
870 reclaim_rx_buffers(macb, macb->next_rx_tail);
871 } else if (length < 0) {
872 return length;
873 }
874 }
875}
876
877static int macb_init(struct eth_device *netdev, bd_t *bd)
878{
879 struct macb_device *macb = to_macb(netdev);
880
881 return _macb_init(macb, netdev->name);
882}
883
884static void macb_halt(struct eth_device *netdev)
885{
886 struct macb_device *macb = to_macb(netdev);
887
888 return _macb_halt(macb);
889}
890
891static int macb_write_hwaddr(struct eth_device *netdev)
892{
893 struct macb_device *macb = to_macb(netdev);
894
895 return _macb_write_hwaddr(macb, netdev->enetaddr);
896}
897
898int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
899{
900 struct macb_device *macb;
901 struct eth_device *netdev;
902
903 macb = malloc(sizeof(struct macb_device));
904 if (!macb) {
905 printf("Error: Failed to allocate memory for MACB%d\n", id);
906 return -1;
907 }
908 memset(macb, 0, sizeof(struct macb_device));
909
910 netdev = &macb->netdev;
Wu, Josh5ae0e382014-05-27 16:31:05 +0800911
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100912 macb->regs = regs;
913 macb->phy_addr = phy_addr;
914
Bo Shend256be22013-04-24 15:59:28 +0800915 if (macb_is_gem(macb))
916 sprintf(netdev->name, "gmac%d", id);
917 else
918 sprintf(netdev->name, "macb%d", id);
919
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100920 netdev->init = macb_init;
921 netdev->halt = macb_halt;
922 netdev->send = macb_send;
923 netdev->recv = macb_recv;
Ben Warren6bb46792010-06-01 11:55:42 -0700924 netdev->write_hwaddr = macb_write_hwaddr;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100925
Simon Glassd5555b72016-05-05 07:28:09 -0600926 _macb_eth_initialize(macb);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100927
928 eth_register(netdev);
929
Bo Shenb1a00062013-04-24 15:59:27 +0800930#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Joe Hershberger5a49f172016-08-08 11:28:38 -0500931 int retval;
932 struct mii_dev *mdiodev = mdio_alloc();
933 if (!mdiodev)
934 return -ENOMEM;
935 strncpy(mdiodev->name, netdev->name, MDIO_NAME_LEN);
936 mdiodev->read = macb_miiphy_read;
937 mdiodev->write = macb_miiphy_write;
938
939 retval = mdio_register(mdiodev);
940 if (retval < 0)
941 return retval;
Bo Shenb1a00062013-04-24 15:59:27 +0800942 macb->bus = miiphy_get_dev_by_name(netdev->name);
Semih Hazar0f751d62009-12-17 15:07:15 +0200943#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100944 return 0;
945}
Simon Glassf1dcc192016-05-05 07:28:11 -0600946#endif /* !CONFIG_DM_ETH */
947
948#ifdef CONFIG_DM_ETH
949
950static int macb_start(struct udevice *dev)
951{
Wenyou Yanga212b662016-05-17 13:11:35 +0800952 return _macb_init(dev, dev->name);
Simon Glassf1dcc192016-05-05 07:28:11 -0600953}
954
955static int macb_send(struct udevice *dev, void *packet, int length)
956{
957 struct macb_device *macb = dev_get_priv(dev);
958
959 return _macb_send(macb, dev->name, packet, length);
960}
961
962static int macb_recv(struct udevice *dev, int flags, uchar **packetp)
963{
964 struct macb_device *macb = dev_get_priv(dev);
965
966 macb->next_rx_tail = macb->rx_tail;
967 macb->wrapped = false;
968
969 return _macb_recv(macb, packetp);
970}
971
972static int macb_free_pkt(struct udevice *dev, uchar *packet, int length)
973{
974 struct macb_device *macb = dev_get_priv(dev);
975
976 reclaim_rx_buffers(macb, macb->next_rx_tail);
977
978 return 0;
979}
980
981static void macb_stop(struct udevice *dev)
982{
983 struct macb_device *macb = dev_get_priv(dev);
984
985 _macb_halt(macb);
986}
987
988static int macb_write_hwaddr(struct udevice *dev)
989{
990 struct eth_pdata *plat = dev_get_platdata(dev);
991 struct macb_device *macb = dev_get_priv(dev);
992
993 return _macb_write_hwaddr(macb, plat->enetaddr);
994}
995
996static const struct eth_ops macb_eth_ops = {
997 .start = macb_start,
998 .send = macb_send,
999 .recv = macb_recv,
1000 .stop = macb_stop,
1001 .free_pkt = macb_free_pkt,
1002 .write_hwaddr = macb_write_hwaddr,
1003};
1004
Wenyou Yang577aa3b2016-11-02 10:06:56 +08001005static int macb_enable_clk(struct udevice *dev)
1006{
1007 struct macb_device *macb = dev_get_priv(dev);
1008 struct clk clk;
1009 ulong clk_rate;
1010 int ret;
1011
1012 ret = clk_get_by_index(dev, 0, &clk);
1013 if (ret)
1014 return -EINVAL;
1015
1016 ret = clk_enable(&clk);
1017 if (ret)
1018 return ret;
1019
1020 clk_rate = clk_get_rate(&clk);
1021 if (!clk_rate)
1022 return -EINVAL;
1023
1024 macb->pclk_rate = clk_rate;
1025
1026 return 0;
1027}
1028
Simon Glassf1dcc192016-05-05 07:28:11 -06001029static int macb_eth_probe(struct udevice *dev)
1030{
1031 struct eth_pdata *pdata = dev_get_platdata(dev);
1032 struct macb_device *macb = dev_get_priv(dev);
1033
Wenyou Yanga212b662016-05-17 13:11:35 +08001034#ifdef CONFIG_DM_ETH
1035 const char *phy_mode;
Wenyou Yang577aa3b2016-11-02 10:06:56 +08001036 int ret;
Wenyou Yanga212b662016-05-17 13:11:35 +08001037
1038 phy_mode = fdt_getprop(gd->fdt_blob, dev->of_offset, "phy-mode", NULL);
1039 if (phy_mode)
1040 macb->phy_interface = phy_get_interface_by_name(phy_mode);
1041 if (macb->phy_interface == -1) {
1042 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1043 return -EINVAL;
1044 }
1045#endif
1046
Simon Glassf1dcc192016-05-05 07:28:11 -06001047 macb->regs = (void *)pdata->iobase;
1048
Wenyou Yang577aa3b2016-11-02 10:06:56 +08001049 ret = macb_enable_clk(dev);
1050 if (ret)
1051 return ret;
1052
Simon Glassf1dcc192016-05-05 07:28:11 -06001053 _macb_eth_initialize(macb);
Wenyou Yang577aa3b2016-11-02 10:06:56 +08001054
Simon Glassf1dcc192016-05-05 07:28:11 -06001055#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Joe Hershberger5a49f172016-08-08 11:28:38 -05001056 int retval;
1057 struct mii_dev *mdiodev = mdio_alloc();
1058 if (!mdiodev)
1059 return -ENOMEM;
1060 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1061 mdiodev->read = macb_miiphy_read;
1062 mdiodev->write = macb_miiphy_write;
1063
1064 retval = mdio_register(mdiodev);
1065 if (retval < 0)
1066 return retval;
Simon Glassf1dcc192016-05-05 07:28:11 -06001067 macb->bus = miiphy_get_dev_by_name(dev->name);
1068#endif
1069
1070 return 0;
1071}
1072
1073static int macb_eth_ofdata_to_platdata(struct udevice *dev)
1074{
1075 struct eth_pdata *pdata = dev_get_platdata(dev);
1076
1077 pdata->iobase = dev_get_addr(dev);
1078 return 0;
1079}
1080
1081static const struct udevice_id macb_eth_ids[] = {
1082 { .compatible = "cdns,macb" },
1083 { }
1084};
1085
1086U_BOOT_DRIVER(eth_macb) = {
1087 .name = "eth_macb",
1088 .id = UCLASS_ETH,
1089 .of_match = macb_eth_ids,
1090 .ofdata_to_platdata = macb_eth_ofdata_to_platdata,
1091 .probe = macb_eth_probe,
1092 .ops = &macb_eth_ops,
1093 .priv_auto_alloc_size = sizeof(struct macb_device),
1094 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1095};
1096#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01001097
Jon Loeliger07d38a12007-07-09 17:30:01 -05001098#endif