blob: 95cdc496cbac384e28c3a4b8ab090073a73248d7 [file] [log] [blame]
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#include <common.h>
19
Jon Loeligerd5be43d2007-06-11 19:02:10 -050020#if defined(CONFIG_MACB) \
Stefan Roese3865b1f2007-07-11 12:13:53 +020021 && (defined(CONFIG_CMD_NET) || defined(CONFIG_CMD_MII))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010022
23/*
24 * The u-boot networking stack is a little weird. It seems like the
25 * networking core allocates receive buffers up front without any
26 * regard to the hardware that's supposed to actually receive those
27 * packets.
28 *
29 * The MACB receives packets into 128-byte receive buffers, so the
30 * buffers allocated by the core isn't very practical to use. We'll
31 * allocate our own, but we need one such buffer in case a packet
32 * wraps around the DMA ring so that we have to copy it.
33 *
34 * Therefore, define CFG_RX_ETH_BUFFER to 1 in the board-specific
35 * configuration header. This way, the core allocates one RX buffer
36 * and one TX buffer, each of which can hold a ethernet packet of
37 * maximum size.
38 *
39 * For some reason, the networking core unconditionally specifies a
40 * 32-byte packet "alignment" (which really should be called
41 * "padding"). MACB shouldn't need that, but we'll refrain from any
42 * core modifications here...
43 */
44
45#include <net.h>
46#include <malloc.h>
47
48#include <linux/mii.h>
49#include <asm/io.h>
50#include <asm/dma-mapping.h>
51#include <asm/arch/clk.h>
52
53#include "macb.h"
54
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +020055#define barrier() asm volatile("" ::: "memory")
56
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010057#define CFG_MACB_RX_BUFFER_SIZE 4096
58#define CFG_MACB_RX_RING_SIZE (CFG_MACB_RX_BUFFER_SIZE / 128)
59#define CFG_MACB_TX_RING_SIZE 16
60#define CFG_MACB_TX_TIMEOUT 1000
61#define CFG_MACB_AUTONEG_TIMEOUT 5000000
62
63struct macb_dma_desc {
64 u32 addr;
65 u32 ctrl;
66};
67
68#define RXADDR_USED 0x00000001
69#define RXADDR_WRAP 0x00000002
70
71#define RXBUF_FRMLEN_MASK 0x00000fff
72#define RXBUF_FRAME_START 0x00004000
73#define RXBUF_FRAME_END 0x00008000
74#define RXBUF_TYPEID_MATCH 0x00400000
75#define RXBUF_ADDR4_MATCH 0x00800000
76#define RXBUF_ADDR3_MATCH 0x01000000
77#define RXBUF_ADDR2_MATCH 0x02000000
78#define RXBUF_ADDR1_MATCH 0x04000000
79#define RXBUF_BROADCAST 0x80000000
80
81#define TXBUF_FRMLEN_MASK 0x000007ff
82#define TXBUF_FRAME_END 0x00008000
83#define TXBUF_NOCRC 0x00010000
84#define TXBUF_EXHAUSTED 0x08000000
85#define TXBUF_UNDERRUN 0x10000000
86#define TXBUF_MAXRETRY 0x20000000
87#define TXBUF_WRAP 0x40000000
88#define TXBUF_USED 0x80000000
89
90struct macb_device {
91 void *regs;
92
93 unsigned int rx_tail;
94 unsigned int tx_head;
95 unsigned int tx_tail;
96
97 void *rx_buffer;
98 void *tx_buffer;
99 struct macb_dma_desc *rx_ring;
100 struct macb_dma_desc *tx_ring;
101
102 unsigned long rx_buffer_dma;
103 unsigned long rx_ring_dma;
104 unsigned long tx_ring_dma;
105
106 const struct device *dev;
107 struct eth_device netdev;
108 unsigned short phy_addr;
109};
110#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
111
112static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
113{
114 unsigned long netctl;
115 unsigned long netstat;
116 unsigned long frame;
117
118 netctl = macb_readl(macb, NCR);
119 netctl |= MACB_BIT(MPE);
120 macb_writel(macb, NCR, netctl);
121
122 frame = (MACB_BF(SOF, 1)
123 | MACB_BF(RW, 1)
124 | MACB_BF(PHYA, macb->phy_addr)
125 | MACB_BF(REGA, reg)
126 | MACB_BF(CODE, 2)
127 | MACB_BF(DATA, value));
128 macb_writel(macb, MAN, frame);
129
130 do {
131 netstat = macb_readl(macb, NSR);
132 } while (!(netstat & MACB_BIT(IDLE)));
133
134 netctl = macb_readl(macb, NCR);
135 netctl &= ~MACB_BIT(MPE);
136 macb_writel(macb, NCR, netctl);
137}
138
139static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
140{
141 unsigned long netctl;
142 unsigned long netstat;
143 unsigned long frame;
144
145 netctl = macb_readl(macb, NCR);
146 netctl |= MACB_BIT(MPE);
147 macb_writel(macb, NCR, netctl);
148
149 frame = (MACB_BF(SOF, 1)
150 | MACB_BF(RW, 2)
151 | MACB_BF(PHYA, macb->phy_addr)
152 | MACB_BF(REGA, reg)
153 | MACB_BF(CODE, 2));
154 macb_writel(macb, MAN, frame);
155
156 do {
157 netstat = macb_readl(macb, NSR);
158 } while (!(netstat & MACB_BIT(IDLE)));
159
160 frame = macb_readl(macb, MAN);
161
162 netctl = macb_readl(macb, NCR);
163 netctl &= ~MACB_BIT(MPE);
164 macb_writel(macb, NCR, netctl);
165
166 return MACB_BFEXT(DATA, frame);
167}
168
Jon Loeliger07d38a12007-07-09 17:30:01 -0500169#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100170
171static int macb_send(struct eth_device *netdev, volatile void *packet,
172 int length)
173{
174 struct macb_device *macb = to_macb(netdev);
175 unsigned long paddr, ctrl;
176 unsigned int tx_head = macb->tx_head;
177 int i;
178
179 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
180
181 ctrl = length & TXBUF_FRMLEN_MASK;
182 ctrl |= TXBUF_FRAME_END;
183 if (tx_head == (CFG_MACB_TX_RING_SIZE - 1)) {
184 ctrl |= TXBUF_WRAP;
185 macb->tx_head = 0;
186 } else
187 macb->tx_head++;
188
189 macb->tx_ring[tx_head].ctrl = ctrl;
190 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200191 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100192 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
193
194 /*
195 * I guess this is necessary because the networking core may
196 * re-use the transmit buffer as soon as we return...
197 */
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200198 for (i = 0; i <= CFG_MACB_TX_TIMEOUT; i++) {
199 barrier();
200 ctrl = macb->tx_ring[tx_head].ctrl;
201 if (ctrl & TXBUF_USED)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100202 break;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100203 udelay(1);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100204 }
205
206 dma_unmap_single(packet, length, paddr);
207
208 if (i <= CFG_MACB_TX_TIMEOUT) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100209 if (ctrl & TXBUF_UNDERRUN)
210 printf("%s: TX underrun\n", netdev->name);
211 if (ctrl & TXBUF_EXHAUSTED)
212 printf("%s: TX buffers exhausted in mid frame\n",
213 netdev->name);
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200214 } else {
215 printf("%s: TX timeout\n", netdev->name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100216 }
217
218 /* No one cares anyway */
219 return 0;
220}
221
222static void reclaim_rx_buffers(struct macb_device *macb,
223 unsigned int new_tail)
224{
225 unsigned int i;
226
227 i = macb->rx_tail;
228 while (i > new_tail) {
229 macb->rx_ring[i].addr &= ~RXADDR_USED;
230 i++;
231 if (i > CFG_MACB_RX_RING_SIZE)
232 i = 0;
233 }
234
235 while (i < new_tail) {
236 macb->rx_ring[i].addr &= ~RXADDR_USED;
237 i++;
238 }
239
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200240 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100241 macb->rx_tail = new_tail;
242}
243
244static int macb_recv(struct eth_device *netdev)
245{
246 struct macb_device *macb = to_macb(netdev);
247 unsigned int rx_tail = macb->rx_tail;
248 void *buffer;
249 int length;
250 int wrapped = 0;
251 u32 status;
252
253 for (;;) {
254 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED))
255 return -1;
256
257 status = macb->rx_ring[rx_tail].ctrl;
258 if (status & RXBUF_FRAME_START) {
259 if (rx_tail != macb->rx_tail)
260 reclaim_rx_buffers(macb, rx_tail);
261 wrapped = 0;
262 }
263
264 if (status & RXBUF_FRAME_END) {
265 buffer = macb->rx_buffer + 128 * macb->rx_tail;
266 length = status & RXBUF_FRMLEN_MASK;
267 if (wrapped) {
268 unsigned int headlen, taillen;
269
270 headlen = 128 * (CFG_MACB_RX_RING_SIZE
271 - macb->rx_tail);
272 taillen = length - headlen;
273 memcpy((void *)NetRxPackets[0],
274 buffer, headlen);
275 memcpy((void *)NetRxPackets[0] + headlen,
276 macb->rx_buffer, taillen);
277 buffer = (void *)NetRxPackets[0];
278 }
279
280 NetReceive(buffer, length);
281 if (++rx_tail >= CFG_MACB_RX_RING_SIZE)
282 rx_tail = 0;
283 reclaim_rx_buffers(macb, rx_tail);
284 } else {
285 if (++rx_tail >= CFG_MACB_RX_RING_SIZE) {
286 wrapped = 1;
287 rx_tail = 0;
288 }
289 }
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200290 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100291 }
292
293 return 0;
294}
295
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200296static void macb_phy_reset(struct macb_device *macb)
297{
298 struct eth_device *netdev = &macb->netdev;
299 int i;
300 u16 status, adv;
301
302 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
303 macb_mdio_write(macb, MII_ADVERTISE, adv);
304 printf("%s: Starting autonegotiation...\n", netdev->name);
305 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
306 | BMCR_ANRESTART));
307
308 for (i = 0; i < CFG_MACB_AUTONEG_TIMEOUT / 100; i++) {
309 status = macb_mdio_read(macb, MII_BMSR);
310 if (status & BMSR_ANEGCOMPLETE)
311 break;
312 udelay(100);
313 }
314
315 if (status & BMSR_ANEGCOMPLETE)
316 printf("%s: Autonegotiation complete\n", netdev->name);
317 else
318 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
319 netdev->name, status);
320}
321
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100322static int macb_phy_init(struct macb_device *macb)
323{
324 struct eth_device *netdev = &macb->netdev;
325 u32 ncfgr;
326 u16 phy_id, status, adv, lpa;
327 int media, speed, duplex;
328 int i;
329
330 /* Check if the PHY is up to snuff... */
331 phy_id = macb_mdio_read(macb, MII_PHYSID1);
332 if (phy_id == 0xffff) {
333 printf("%s: No PHY present\n", netdev->name);
334 return 0;
335 }
336
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200337 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100338 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200339 /* Try to re-negotiate if we don't have link already. */
340 macb_phy_reset(macb);
341
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100342 for (i = 0; i < CFG_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100343 status = macb_mdio_read(macb, MII_BMSR);
344 if (status & BMSR_LSTATUS)
345 break;
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200346 udelay(100);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100347 }
348 }
349
350 if (!(status & BMSR_LSTATUS)) {
351 printf("%s: link down (status: 0x%04x)\n",
352 netdev->name, status);
353 return 0;
354 } else {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200355 adv = macb_mdio_read(macb, MII_ADVERTISE);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100356 lpa = macb_mdio_read(macb, MII_LPA);
357 media = mii_nway_result(lpa & adv);
358 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
359 ? 1 : 0);
360 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
361 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
362 netdev->name,
363 speed ? "100" : "10",
364 duplex ? "full" : "half",
365 lpa);
366
367 ncfgr = macb_readl(macb, NCFGR);
368 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
369 if (speed)
370 ncfgr |= MACB_BIT(SPD);
371 if (duplex)
372 ncfgr |= MACB_BIT(FD);
373 macb_writel(macb, NCFGR, ncfgr);
374 return 1;
375 }
376}
377
378static int macb_init(struct eth_device *netdev, bd_t *bd)
379{
380 struct macb_device *macb = to_macb(netdev);
381 unsigned long paddr;
382 u32 hwaddr_bottom;
383 u16 hwaddr_top;
384 int i;
385
386 /*
387 * macb_halt should have been called at some point before now,
388 * so we'll assume the controller is idle.
389 */
390
391 /* initialize DMA descriptors */
392 paddr = macb->rx_buffer_dma;
393 for (i = 0; i < CFG_MACB_RX_RING_SIZE; i++) {
394 if (i == (CFG_MACB_RX_RING_SIZE - 1))
395 paddr |= RXADDR_WRAP;
396 macb->rx_ring[i].addr = paddr;
397 macb->rx_ring[i].ctrl = 0;
398 paddr += 128;
399 }
400 for (i = 0; i < CFG_MACB_TX_RING_SIZE; i++) {
401 macb->tx_ring[i].addr = 0;
402 if (i == (CFG_MACB_TX_RING_SIZE - 1))
403 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
404 else
405 macb->tx_ring[i].ctrl = TXBUF_USED;
406 }
407 macb->rx_tail = macb->tx_head = macb->tx_tail = 0;
408
409 macb_writel(macb, RBQP, macb->rx_ring_dma);
410 macb_writel(macb, TBQP, macb->tx_ring_dma);
411
412 /* set hardware address */
413 hwaddr_bottom = cpu_to_le32(*((u32 *)netdev->enetaddr));
414 macb_writel(macb, SA1B, hwaddr_bottom);
415 hwaddr_top = cpu_to_le16(*((u16 *)(netdev->enetaddr + 4)));
416 macb_writel(macb, SA1T, hwaddr_top);
417
418 /* choose RMII or MII mode. This depends on the board */
419#ifdef CONFIG_RMII
420 macb_writel(macb, USRIO, 0);
421#else
422 macb_writel(macb, USRIO, MACB_BIT(MII));
423#endif
424
425 if (!macb_phy_init(macb))
426 return 0;
427
428 /* Enable TX and RX */
429 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
430
431 return 1;
432}
433
434static void macb_halt(struct eth_device *netdev)
435{
436 struct macb_device *macb = to_macb(netdev);
437 u32 ncr, tsr;
438
439 /* Halt the controller and wait for any ongoing transmission to end. */
440 ncr = macb_readl(macb, NCR);
441 ncr |= MACB_BIT(THALT);
442 macb_writel(macb, NCR, ncr);
443
444 do {
445 tsr = macb_readl(macb, TSR);
446 } while (tsr & MACB_BIT(TGO));
447
448 /* Disable TX and RX, and clear statistics */
449 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
450}
451
452int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
453{
454 struct macb_device *macb;
455 struct eth_device *netdev;
456 unsigned long macb_hz;
457 u32 ncfgr;
458
459 macb = malloc(sizeof(struct macb_device));
460 if (!macb) {
461 printf("Error: Failed to allocate memory for MACB%d\n", id);
462 return -1;
463 }
464 memset(macb, 0, sizeof(struct macb_device));
465
466 netdev = &macb->netdev;
467
468 macb->rx_buffer = dma_alloc_coherent(CFG_MACB_RX_BUFFER_SIZE,
469 &macb->rx_buffer_dma);
470 macb->rx_ring = dma_alloc_coherent(CFG_MACB_RX_RING_SIZE
471 * sizeof(struct macb_dma_desc),
472 &macb->rx_ring_dma);
473 macb->tx_ring = dma_alloc_coherent(CFG_MACB_TX_RING_SIZE
474 * sizeof(struct macb_dma_desc),
475 &macb->tx_ring_dma);
476
477 macb->regs = regs;
478 macb->phy_addr = phy_addr;
479
480 sprintf(netdev->name, "macb%d", id);
481 netdev->init = macb_init;
482 netdev->halt = macb_halt;
483 netdev->send = macb_send;
484 netdev->recv = macb_recv;
485
486 /*
487 * Do some basic initialization so that we at least can talk
488 * to the PHY
489 */
490 macb_hz = get_macb_pclk_rate(id);
491 if (macb_hz < 20000000)
492 ncfgr = MACB_BF(CLK, MACB_CLK_DIV8);
493 else if (macb_hz < 40000000)
494 ncfgr = MACB_BF(CLK, MACB_CLK_DIV16);
495 else if (macb_hz < 80000000)
496 ncfgr = MACB_BF(CLK, MACB_CLK_DIV32);
497 else
498 ncfgr = MACB_BF(CLK, MACB_CLK_DIV64);
499
500 macb_writel(macb, NCFGR, ncfgr);
501
502 eth_register(netdev);
503
504 return 0;
505}
506
Jon Loeliger07d38a12007-07-09 17:30:01 -0500507#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100508
Jon Loeliger07d38a12007-07-09 17:30:01 -0500509#if defined(CONFIG_CMD_MII)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100510
511int miiphy_read(unsigned char addr, unsigned char reg, unsigned short *value)
512{
513 unsigned long netctl;
514 unsigned long netstat;
515 unsigned long frame;
516 int iflag;
517
518 iflag = disable_interrupts();
519 netctl = macb_readl(&macb, EMACB_NCR);
520 netctl |= MACB_BIT(MPE);
521 macb_writel(&macb, EMACB_NCR, netctl);
522 if (iflag)
523 enable_interrupts();
524
525 frame = (MACB_BF(SOF, 1)
526 | MACB_BF(RW, 2)
527 | MACB_BF(PHYA, addr)
528 | MACB_BF(REGA, reg)
529 | MACB_BF(CODE, 2));
530 macb_writel(&macb, EMACB_MAN, frame);
531
532 do {
533 netstat = macb_readl(&macb, EMACB_NSR);
534 } while (!(netstat & MACB_BIT(IDLE)));
535
536 frame = macb_readl(&macb, EMACB_MAN);
537 *value = MACB_BFEXT(DATA, frame);
538
539 iflag = disable_interrupts();
540 netctl = macb_readl(&macb, EMACB_NCR);
541 netctl &= ~MACB_BIT(MPE);
542 macb_writel(&macb, EMACB_NCR, netctl);
543 if (iflag)
544 enable_interrupts();
545
546 return 0;
547}
548
549int miiphy_write(unsigned char addr, unsigned char reg, unsigned short value)
550{
551 unsigned long netctl;
552 unsigned long netstat;
553 unsigned long frame;
554 int iflag;
555
556 iflag = disable_interrupts();
557 netctl = macb_readl(&macb, EMACB_NCR);
558 netctl |= MACB_BIT(MPE);
559 macb_writel(&macb, EMACB_NCR, netctl);
560 if (iflag)
561 enable_interrupts();
562
563 frame = (MACB_BF(SOF, 1)
564 | MACB_BF(RW, 1)
565 | MACB_BF(PHYA, addr)
566 | MACB_BF(REGA, reg)
567 | MACB_BF(CODE, 2)
568 | MACB_BF(DATA, value));
569 macb_writel(&macb, EMACB_MAN, frame);
570
571 do {
572 netstat = macb_readl(&macb, EMACB_NSR);
573 } while (!(netstat & MACB_BIT(IDLE)));
574
575 iflag = disable_interrupts();
576 netctl = macb_readl(&macb, EMACB_NCR);
577 netctl &= ~MACB_BIT(MPE);
578 macb_writel(&macb, EMACB_NCR, netctl);
579 if (iflag)
580 enable_interrupts();
581
582 return 0;
583}
584
Jon Loeliger07d38a12007-07-09 17:30:01 -0500585#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100586
587#endif /* CONFIG_MACB */