blob: 9c98338f746000da0edb0aa562e682e3b7fa99c4 [file] [log] [blame]
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#include <common.h>
19
Jon Loeligerd5be43d2007-06-11 19:02:10 -050020#if defined(CONFIG_MACB) \
Stefan Roese3865b1f2007-07-11 12:13:53 +020021 && (defined(CONFIG_CMD_NET) || defined(CONFIG_CMD_MII))
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010022
23/*
24 * The u-boot networking stack is a little weird. It seems like the
25 * networking core allocates receive buffers up front without any
26 * regard to the hardware that's supposed to actually receive those
27 * packets.
28 *
29 * The MACB receives packets into 128-byte receive buffers, so the
30 * buffers allocated by the core isn't very practical to use. We'll
31 * allocate our own, but we need one such buffer in case a packet
32 * wraps around the DMA ring so that we have to copy it.
33 *
34 * Therefore, define CFG_RX_ETH_BUFFER to 1 in the board-specific
35 * configuration header. This way, the core allocates one RX buffer
36 * and one TX buffer, each of which can hold a ethernet packet of
37 * maximum size.
38 *
39 * For some reason, the networking core unconditionally specifies a
40 * 32-byte packet "alignment" (which really should be called
41 * "padding"). MACB shouldn't need that, but we'll refrain from any
42 * core modifications here...
43 */
44
45#include <net.h>
46#include <malloc.h>
47
48#include <linux/mii.h>
49#include <asm/io.h>
50#include <asm/dma-mapping.h>
51#include <asm/arch/clk.h>
52
53#include "macb.h"
54
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +020055#define barrier() asm volatile("" ::: "memory")
56
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +010057#define CFG_MACB_RX_BUFFER_SIZE 4096
58#define CFG_MACB_RX_RING_SIZE (CFG_MACB_RX_BUFFER_SIZE / 128)
59#define CFG_MACB_TX_RING_SIZE 16
60#define CFG_MACB_TX_TIMEOUT 1000
61#define CFG_MACB_AUTONEG_TIMEOUT 5000000
62
63struct macb_dma_desc {
64 u32 addr;
65 u32 ctrl;
66};
67
68#define RXADDR_USED 0x00000001
69#define RXADDR_WRAP 0x00000002
70
71#define RXBUF_FRMLEN_MASK 0x00000fff
72#define RXBUF_FRAME_START 0x00004000
73#define RXBUF_FRAME_END 0x00008000
74#define RXBUF_TYPEID_MATCH 0x00400000
75#define RXBUF_ADDR4_MATCH 0x00800000
76#define RXBUF_ADDR3_MATCH 0x01000000
77#define RXBUF_ADDR2_MATCH 0x02000000
78#define RXBUF_ADDR1_MATCH 0x04000000
79#define RXBUF_BROADCAST 0x80000000
80
81#define TXBUF_FRMLEN_MASK 0x000007ff
82#define TXBUF_FRAME_END 0x00008000
83#define TXBUF_NOCRC 0x00010000
84#define TXBUF_EXHAUSTED 0x08000000
85#define TXBUF_UNDERRUN 0x10000000
86#define TXBUF_MAXRETRY 0x20000000
87#define TXBUF_WRAP 0x40000000
88#define TXBUF_USED 0x80000000
89
90struct macb_device {
91 void *regs;
92
93 unsigned int rx_tail;
94 unsigned int tx_head;
95 unsigned int tx_tail;
96
97 void *rx_buffer;
98 void *tx_buffer;
99 struct macb_dma_desc *rx_ring;
100 struct macb_dma_desc *tx_ring;
101
102 unsigned long rx_buffer_dma;
103 unsigned long rx_ring_dma;
104 unsigned long tx_ring_dma;
105
106 const struct device *dev;
107 struct eth_device netdev;
108 unsigned short phy_addr;
109};
110#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
111
112static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
113{
114 unsigned long netctl;
115 unsigned long netstat;
116 unsigned long frame;
117
118 netctl = macb_readl(macb, NCR);
119 netctl |= MACB_BIT(MPE);
120 macb_writel(macb, NCR, netctl);
121
122 frame = (MACB_BF(SOF, 1)
123 | MACB_BF(RW, 1)
124 | MACB_BF(PHYA, macb->phy_addr)
125 | MACB_BF(REGA, reg)
126 | MACB_BF(CODE, 2)
127 | MACB_BF(DATA, value));
128 macb_writel(macb, MAN, frame);
129
130 do {
131 netstat = macb_readl(macb, NSR);
132 } while (!(netstat & MACB_BIT(IDLE)));
133
134 netctl = macb_readl(macb, NCR);
135 netctl &= ~MACB_BIT(MPE);
136 macb_writel(macb, NCR, netctl);
137}
138
139static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
140{
141 unsigned long netctl;
142 unsigned long netstat;
143 unsigned long frame;
144
145 netctl = macb_readl(macb, NCR);
146 netctl |= MACB_BIT(MPE);
147 macb_writel(macb, NCR, netctl);
148
149 frame = (MACB_BF(SOF, 1)
150 | MACB_BF(RW, 2)
151 | MACB_BF(PHYA, macb->phy_addr)
152 | MACB_BF(REGA, reg)
153 | MACB_BF(CODE, 2));
154 macb_writel(macb, MAN, frame);
155
156 do {
157 netstat = macb_readl(macb, NSR);
158 } while (!(netstat & MACB_BIT(IDLE)));
159
160 frame = macb_readl(macb, MAN);
161
162 netctl = macb_readl(macb, NCR);
163 netctl &= ~MACB_BIT(MPE);
164 macb_writel(macb, NCR, netctl);
165
166 return MACB_BFEXT(DATA, frame);
167}
168
Jon Loeliger07d38a12007-07-09 17:30:01 -0500169#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100170
171static int macb_send(struct eth_device *netdev, volatile void *packet,
172 int length)
173{
174 struct macb_device *macb = to_macb(netdev);
175 unsigned long paddr, ctrl;
176 unsigned int tx_head = macb->tx_head;
177 int i;
178
179 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
180
181 ctrl = length & TXBUF_FRMLEN_MASK;
182 ctrl |= TXBUF_FRAME_END;
183 if (tx_head == (CFG_MACB_TX_RING_SIZE - 1)) {
184 ctrl |= TXBUF_WRAP;
185 macb->tx_head = 0;
186 } else
187 macb->tx_head++;
188
189 macb->tx_ring[tx_head].ctrl = ctrl;
190 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200191 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100192 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
193
194 /*
195 * I guess this is necessary because the networking core may
196 * re-use the transmit buffer as soon as we return...
197 */
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200198 for (i = 0; i <= CFG_MACB_TX_TIMEOUT; i++) {
199 barrier();
200 ctrl = macb->tx_ring[tx_head].ctrl;
201 if (ctrl & TXBUF_USED)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100202 break;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100203 udelay(1);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100204 }
205
206 dma_unmap_single(packet, length, paddr);
207
208 if (i <= CFG_MACB_TX_TIMEOUT) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100209 if (ctrl & TXBUF_UNDERRUN)
210 printf("%s: TX underrun\n", netdev->name);
211 if (ctrl & TXBUF_EXHAUSTED)
212 printf("%s: TX buffers exhausted in mid frame\n",
213 netdev->name);
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200214 } else {
215 printf("%s: TX timeout\n", netdev->name);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100216 }
217
218 /* No one cares anyway */
219 return 0;
220}
221
222static void reclaim_rx_buffers(struct macb_device *macb,
223 unsigned int new_tail)
224{
225 unsigned int i;
226
227 i = macb->rx_tail;
228 while (i > new_tail) {
229 macb->rx_ring[i].addr &= ~RXADDR_USED;
230 i++;
231 if (i > CFG_MACB_RX_RING_SIZE)
232 i = 0;
233 }
234
235 while (i < new_tail) {
236 macb->rx_ring[i].addr &= ~RXADDR_USED;
237 i++;
238 }
239
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200240 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100241 macb->rx_tail = new_tail;
242}
243
244static int macb_recv(struct eth_device *netdev)
245{
246 struct macb_device *macb = to_macb(netdev);
247 unsigned int rx_tail = macb->rx_tail;
248 void *buffer;
249 int length;
250 int wrapped = 0;
251 u32 status;
252
253 for (;;) {
254 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED))
255 return -1;
256
257 status = macb->rx_ring[rx_tail].ctrl;
258 if (status & RXBUF_FRAME_START) {
259 if (rx_tail != macb->rx_tail)
260 reclaim_rx_buffers(macb, rx_tail);
261 wrapped = 0;
262 }
263
264 if (status & RXBUF_FRAME_END) {
265 buffer = macb->rx_buffer + 128 * macb->rx_tail;
266 length = status & RXBUF_FRMLEN_MASK;
267 if (wrapped) {
268 unsigned int headlen, taillen;
269
270 headlen = 128 * (CFG_MACB_RX_RING_SIZE
271 - macb->rx_tail);
272 taillen = length - headlen;
273 memcpy((void *)NetRxPackets[0],
274 buffer, headlen);
275 memcpy((void *)NetRxPackets[0] + headlen,
276 macb->rx_buffer, taillen);
277 buffer = (void *)NetRxPackets[0];
278 }
279
280 NetReceive(buffer, length);
281 if (++rx_tail >= CFG_MACB_RX_RING_SIZE)
282 rx_tail = 0;
283 reclaim_rx_buffers(macb, rx_tail);
284 } else {
285 if (++rx_tail >= CFG_MACB_RX_RING_SIZE) {
286 wrapped = 1;
287 rx_tail = 0;
288 }
289 }
Haavard Skinnemoen04fcb5d2007-05-02 13:22:38 +0200290 barrier();
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100291 }
292
293 return 0;
294}
295
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200296static void macb_phy_reset(struct macb_device *macb)
297{
298 struct eth_device *netdev = &macb->netdev;
299 int i;
300 u16 status, adv;
301
302 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
303 macb_mdio_write(macb, MII_ADVERTISE, adv);
304 printf("%s: Starting autonegotiation...\n", netdev->name);
305 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
306 | BMCR_ANRESTART));
307
308 for (i = 0; i < CFG_MACB_AUTONEG_TIMEOUT / 100; i++) {
309 status = macb_mdio_read(macb, MII_BMSR);
310 if (status & BMSR_ANEGCOMPLETE)
311 break;
312 udelay(100);
313 }
314
315 if (status & BMSR_ANEGCOMPLETE)
316 printf("%s: Autonegotiation complete\n", netdev->name);
317 else
318 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
319 netdev->name, status);
320}
321
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100322static int macb_phy_init(struct macb_device *macb)
323{
324 struct eth_device *netdev = &macb->netdev;
325 u32 ncfgr;
326 u16 phy_id, status, adv, lpa;
327 int media, speed, duplex;
328 int i;
329
330 /* Check if the PHY is up to snuff... */
331 phy_id = macb_mdio_read(macb, MII_PHYSID1);
332 if (phy_id == 0xffff) {
333 printf("%s: No PHY present\n", netdev->name);
334 return 0;
335 }
336
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200337 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100338 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200339 /* Try to re-negotiate if we don't have link already. */
340 macb_phy_reset(macb);
341
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100342 for (i = 0; i < CFG_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100343 status = macb_mdio_read(macb, MII_BMSR);
344 if (status & BMSR_LSTATUS)
345 break;
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200346 udelay(100);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100347 }
348 }
349
350 if (!(status & BMSR_LSTATUS)) {
351 printf("%s: link down (status: 0x%04x)\n",
352 netdev->name, status);
353 return 0;
354 } else {
Haavard Skinnemoenf2134f82007-05-02 13:31:53 +0200355 adv = macb_mdio_read(macb, MII_ADVERTISE);
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100356 lpa = macb_mdio_read(macb, MII_LPA);
357 media = mii_nway_result(lpa & adv);
358 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
359 ? 1 : 0);
360 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
361 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
362 netdev->name,
363 speed ? "100" : "10",
364 duplex ? "full" : "half",
365 lpa);
366
367 ncfgr = macb_readl(macb, NCFGR);
368 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
369 if (speed)
370 ncfgr |= MACB_BIT(SPD);
371 if (duplex)
372 ncfgr |= MACB_BIT(FD);
373 macb_writel(macb, NCFGR, ncfgr);
374 return 1;
375 }
376}
377
378static int macb_init(struct eth_device *netdev, bd_t *bd)
379{
380 struct macb_device *macb = to_macb(netdev);
381 unsigned long paddr;
382 u32 hwaddr_bottom;
383 u16 hwaddr_top;
384 int i;
385
386 /*
387 * macb_halt should have been called at some point before now,
388 * so we'll assume the controller is idle.
389 */
390
391 /* initialize DMA descriptors */
392 paddr = macb->rx_buffer_dma;
393 for (i = 0; i < CFG_MACB_RX_RING_SIZE; i++) {
394 if (i == (CFG_MACB_RX_RING_SIZE - 1))
395 paddr |= RXADDR_WRAP;
396 macb->rx_ring[i].addr = paddr;
397 macb->rx_ring[i].ctrl = 0;
398 paddr += 128;
399 }
400 for (i = 0; i < CFG_MACB_TX_RING_SIZE; i++) {
401 macb->tx_ring[i].addr = 0;
402 if (i == (CFG_MACB_TX_RING_SIZE - 1))
403 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
404 else
405 macb->tx_ring[i].ctrl = TXBUF_USED;
406 }
407 macb->rx_tail = macb->tx_head = macb->tx_tail = 0;
408
409 macb_writel(macb, RBQP, macb->rx_ring_dma);
410 macb_writel(macb, TBQP, macb->tx_ring_dma);
411
412 /* set hardware address */
413 hwaddr_bottom = cpu_to_le32(*((u32 *)netdev->enetaddr));
414 macb_writel(macb, SA1B, hwaddr_bottom);
415 hwaddr_top = cpu_to_le16(*((u16 *)(netdev->enetaddr + 4)));
416 macb_writel(macb, SA1T, hwaddr_top);
417
418 /* choose RMII or MII mode. This depends on the board */
419#ifdef CONFIG_RMII
Stelian Pop7263ef12008-01-03 21:15:56 +0000420#ifdef CONFIG_AT91CAP9ADK
421 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
422#else
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100423 macb_writel(macb, USRIO, 0);
Stelian Pop7263ef12008-01-03 21:15:56 +0000424#endif
425#else
426#ifdef CONFIG_AT91CAP9ADK
427 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100428#else
429 macb_writel(macb, USRIO, MACB_BIT(MII));
430#endif
Stelian Pop7263ef12008-01-03 21:15:56 +0000431#endif /* CONFIG_RMII */
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100432
433 if (!macb_phy_init(macb))
Ben Warren422b1a02008-01-09 18:15:53 -0500434 return -1;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100435
436 /* Enable TX and RX */
437 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
438
Ben Warren422b1a02008-01-09 18:15:53 -0500439 return 0;
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100440}
441
442static void macb_halt(struct eth_device *netdev)
443{
444 struct macb_device *macb = to_macb(netdev);
445 u32 ncr, tsr;
446
447 /* Halt the controller and wait for any ongoing transmission to end. */
448 ncr = macb_readl(macb, NCR);
449 ncr |= MACB_BIT(THALT);
450 macb_writel(macb, NCR, ncr);
451
452 do {
453 tsr = macb_readl(macb, TSR);
454 } while (tsr & MACB_BIT(TGO));
455
456 /* Disable TX and RX, and clear statistics */
457 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
458}
459
460int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
461{
462 struct macb_device *macb;
463 struct eth_device *netdev;
464 unsigned long macb_hz;
465 u32 ncfgr;
466
467 macb = malloc(sizeof(struct macb_device));
468 if (!macb) {
469 printf("Error: Failed to allocate memory for MACB%d\n", id);
470 return -1;
471 }
472 memset(macb, 0, sizeof(struct macb_device));
473
474 netdev = &macb->netdev;
475
476 macb->rx_buffer = dma_alloc_coherent(CFG_MACB_RX_BUFFER_SIZE,
477 &macb->rx_buffer_dma);
478 macb->rx_ring = dma_alloc_coherent(CFG_MACB_RX_RING_SIZE
479 * sizeof(struct macb_dma_desc),
480 &macb->rx_ring_dma);
481 macb->tx_ring = dma_alloc_coherent(CFG_MACB_TX_RING_SIZE
482 * sizeof(struct macb_dma_desc),
483 &macb->tx_ring_dma);
484
485 macb->regs = regs;
486 macb->phy_addr = phy_addr;
487
488 sprintf(netdev->name, "macb%d", id);
489 netdev->init = macb_init;
490 netdev->halt = macb_halt;
491 netdev->send = macb_send;
492 netdev->recv = macb_recv;
493
494 /*
495 * Do some basic initialization so that we at least can talk
496 * to the PHY
497 */
498 macb_hz = get_macb_pclk_rate(id);
499 if (macb_hz < 20000000)
500 ncfgr = MACB_BF(CLK, MACB_CLK_DIV8);
501 else if (macb_hz < 40000000)
502 ncfgr = MACB_BF(CLK, MACB_CLK_DIV16);
503 else if (macb_hz < 80000000)
504 ncfgr = MACB_BF(CLK, MACB_CLK_DIV32);
505 else
506 ncfgr = MACB_BF(CLK, MACB_CLK_DIV64);
507
508 macb_writel(macb, NCFGR, ncfgr);
509
510 eth_register(netdev);
511
512 return 0;
513}
514
Jon Loeliger07d38a12007-07-09 17:30:01 -0500515#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100516
Jon Loeliger07d38a12007-07-09 17:30:01 -0500517#if defined(CONFIG_CMD_MII)
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100518
519int miiphy_read(unsigned char addr, unsigned char reg, unsigned short *value)
520{
521 unsigned long netctl;
522 unsigned long netstat;
523 unsigned long frame;
524 int iflag;
525
526 iflag = disable_interrupts();
527 netctl = macb_readl(&macb, EMACB_NCR);
528 netctl |= MACB_BIT(MPE);
529 macb_writel(&macb, EMACB_NCR, netctl);
530 if (iflag)
531 enable_interrupts();
532
533 frame = (MACB_BF(SOF, 1)
534 | MACB_BF(RW, 2)
535 | MACB_BF(PHYA, addr)
536 | MACB_BF(REGA, reg)
537 | MACB_BF(CODE, 2));
538 macb_writel(&macb, EMACB_MAN, frame);
539
540 do {
541 netstat = macb_readl(&macb, EMACB_NSR);
542 } while (!(netstat & MACB_BIT(IDLE)));
543
544 frame = macb_readl(&macb, EMACB_MAN);
545 *value = MACB_BFEXT(DATA, frame);
546
547 iflag = disable_interrupts();
548 netctl = macb_readl(&macb, EMACB_NCR);
549 netctl &= ~MACB_BIT(MPE);
550 macb_writel(&macb, EMACB_NCR, netctl);
551 if (iflag)
552 enable_interrupts();
553
554 return 0;
555}
556
557int miiphy_write(unsigned char addr, unsigned char reg, unsigned short value)
558{
559 unsigned long netctl;
560 unsigned long netstat;
561 unsigned long frame;
562 int iflag;
563
564 iflag = disable_interrupts();
565 netctl = macb_readl(&macb, EMACB_NCR);
566 netctl |= MACB_BIT(MPE);
567 macb_writel(&macb, EMACB_NCR, netctl);
568 if (iflag)
569 enable_interrupts();
570
571 frame = (MACB_BF(SOF, 1)
572 | MACB_BF(RW, 1)
573 | MACB_BF(PHYA, addr)
574 | MACB_BF(REGA, reg)
575 | MACB_BF(CODE, 2)
576 | MACB_BF(DATA, value));
577 macb_writel(&macb, EMACB_MAN, frame);
578
579 do {
580 netstat = macb_readl(&macb, EMACB_NSR);
581 } while (!(netstat & MACB_BIT(IDLE)));
582
583 iflag = disable_interrupts();
584 netctl = macb_readl(&macb, EMACB_NCR);
585 netctl &= ~MACB_BIT(MPE);
586 macb_writel(&macb, EMACB_NCR, netctl);
587 if (iflag)
588 enable_interrupts();
589
590 return 0;
591}
592
Jon Loeliger07d38a12007-07-09 17:30:01 -0500593#endif
Haavard Skinnemoen5c1fe1f2006-01-20 10:03:34 +0100594
595#endif /* CONFIG_MACB */