blob: 85e2ad35872f9ba3755027147dcde07cefb863a8 [file] [log] [blame]
Dave Liu7737d5c2006-11-03 12:11:15 -06001/*
2 * Copyright (C) 2006 Freescale Semiconductor, Inc.
3 *
4 * Dave Liu <daveliu@freescale.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of
9 * the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
19 * MA 02111-1307 USA
20 */
21
22#include "common.h"
23#include "net.h"
24#include "malloc.h"
25#include "asm/errno.h"
26#include "asm/io.h"
27#include "asm/immap_qe.h"
28#include "qe.h"
29#include "uccf.h"
30#include "uec.h"
31#include "uec_phy.h"
David Saadad5d28fe2008-03-31 02:37:38 -070032#include "miiphy.h"
Dave Liu7737d5c2006-11-03 12:11:15 -060033
Dave Liu7737d5c2006-11-03 12:11:15 -060034#ifdef CONFIG_UEC_ETH1
35static uec_info_t eth1_uec_info = {
36 .uf_info = {
37 .ucc_num = CFG_UEC1_UCC_NUM,
38 .rx_clock = CFG_UEC1_RX_CLK,
39 .tx_clock = CFG_UEC1_TX_CLK,
40 .eth_type = CFG_UEC1_ETH_TYPE,
41 },
David Saada24656652008-01-15 10:40:24 +020042#if (CFG_UEC1_ETH_TYPE == FAST_ETH)
43 .num_threads_tx = UEC_NUM_OF_THREADS_1,
44 .num_threads_rx = UEC_NUM_OF_THREADS_1,
45#else
Dave Liu7737d5c2006-11-03 12:11:15 -060046 .num_threads_tx = UEC_NUM_OF_THREADS_4,
47 .num_threads_rx = UEC_NUM_OF_THREADS_4,
David Saada24656652008-01-15 10:40:24 +020048#endif
Dave Liu7737d5c2006-11-03 12:11:15 -060049 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
50 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
51 .tx_bd_ring_len = 16,
52 .rx_bd_ring_len = 16,
53 .phy_address = CFG_UEC1_PHY_ADDR,
54 .enet_interface = CFG_UEC1_INTERFACE_MODE,
55};
56#endif
57#ifdef CONFIG_UEC_ETH2
58static uec_info_t eth2_uec_info = {
59 .uf_info = {
60 .ucc_num = CFG_UEC2_UCC_NUM,
61 .rx_clock = CFG_UEC2_RX_CLK,
62 .tx_clock = CFG_UEC2_TX_CLK,
63 .eth_type = CFG_UEC2_ETH_TYPE,
64 },
David Saada24656652008-01-15 10:40:24 +020065#if (CFG_UEC2_ETH_TYPE == FAST_ETH)
66 .num_threads_tx = UEC_NUM_OF_THREADS_1,
67 .num_threads_rx = UEC_NUM_OF_THREADS_1,
68#else
Dave Liu7737d5c2006-11-03 12:11:15 -060069 .num_threads_tx = UEC_NUM_OF_THREADS_4,
70 .num_threads_rx = UEC_NUM_OF_THREADS_4,
David Saada24656652008-01-15 10:40:24 +020071#endif
Dave Liu7737d5c2006-11-03 12:11:15 -060072 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
73 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
74 .tx_bd_ring_len = 16,
75 .rx_bd_ring_len = 16,
76 .phy_address = CFG_UEC2_PHY_ADDR,
77 .enet_interface = CFG_UEC2_INTERFACE_MODE,
78};
79#endif
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010080#ifdef CONFIG_UEC_ETH3
81static uec_info_t eth3_uec_info = {
82 .uf_info = {
83 .ucc_num = CFG_UEC3_UCC_NUM,
84 .rx_clock = CFG_UEC3_RX_CLK,
85 .tx_clock = CFG_UEC3_TX_CLK,
86 .eth_type = CFG_UEC3_ETH_TYPE,
87 },
David Saada24656652008-01-15 10:40:24 +020088#if (CFG_UEC3_ETH_TYPE == FAST_ETH)
89 .num_threads_tx = UEC_NUM_OF_THREADS_1,
90 .num_threads_rx = UEC_NUM_OF_THREADS_1,
91#else
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010092 .num_threads_tx = UEC_NUM_OF_THREADS_4,
93 .num_threads_rx = UEC_NUM_OF_THREADS_4,
David Saada24656652008-01-15 10:40:24 +020094#endif
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010095 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
96 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
97 .tx_bd_ring_len = 16,
98 .rx_bd_ring_len = 16,
99 .phy_address = CFG_UEC3_PHY_ADDR,
100 .enet_interface = CFG_UEC3_INTERFACE_MODE,
101};
102#endif
David Saada24656652008-01-15 10:40:24 +0200103#ifdef CONFIG_UEC_ETH4
104static uec_info_t eth4_uec_info = {
105 .uf_info = {
106 .ucc_num = CFG_UEC4_UCC_NUM,
107 .rx_clock = CFG_UEC4_RX_CLK,
108 .tx_clock = CFG_UEC4_TX_CLK,
109 .eth_type = CFG_UEC4_ETH_TYPE,
110 },
111#if (CFG_UEC4_ETH_TYPE == FAST_ETH)
112 .num_threads_tx = UEC_NUM_OF_THREADS_1,
113 .num_threads_rx = UEC_NUM_OF_THREADS_1,
114#else
115 .num_threads_tx = UEC_NUM_OF_THREADS_4,
116 .num_threads_rx = UEC_NUM_OF_THREADS_4,
117#endif
118 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
119 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
120 .tx_bd_ring_len = 16,
121 .rx_bd_ring_len = 16,
122 .phy_address = CFG_UEC4_PHY_ADDR,
123 .enet_interface = CFG_UEC4_INTERFACE_MODE,
124};
125#endif
Joakim Tjernlundccf21c32007-12-06 16:43:40 +0100126
David Saadad5d28fe2008-03-31 02:37:38 -0700127#define MAXCONTROLLERS (4)
128
129static struct eth_device *devlist[MAXCONTROLLERS];
130
David Saadad5d28fe2008-03-31 02:37:38 -0700131u16 phy_read (struct uec_mii_info *mii_info, u16 regnum);
132void phy_write (struct uec_mii_info *mii_info, u16 regnum, u16 val);
133
Dave Liu7737d5c2006-11-03 12:11:15 -0600134static int uec_mac_enable(uec_private_t *uec, comm_dir_e mode)
135{
136 uec_t *uec_regs;
137 u32 maccfg1;
138
139 if (!uec) {
140 printf("%s: uec not initial\n", __FUNCTION__);
141 return -EINVAL;
142 }
143 uec_regs = uec->uec_regs;
144
145 maccfg1 = in_be32(&uec_regs->maccfg1);
146
147 if (mode & COMM_DIR_TX) {
148 maccfg1 |= MACCFG1_ENABLE_TX;
149 out_be32(&uec_regs->maccfg1, maccfg1);
150 uec->mac_tx_enabled = 1;
151 }
152
153 if (mode & COMM_DIR_RX) {
154 maccfg1 |= MACCFG1_ENABLE_RX;
155 out_be32(&uec_regs->maccfg1, maccfg1);
156 uec->mac_rx_enabled = 1;
157 }
158
159 return 0;
160}
161
162static int uec_mac_disable(uec_private_t *uec, comm_dir_e mode)
163{
164 uec_t *uec_regs;
165 u32 maccfg1;
166
167 if (!uec) {
168 printf("%s: uec not initial\n", __FUNCTION__);
169 return -EINVAL;
170 }
171 uec_regs = uec->uec_regs;
172
173 maccfg1 = in_be32(&uec_regs->maccfg1);
174
175 if (mode & COMM_DIR_TX) {
176 maccfg1 &= ~MACCFG1_ENABLE_TX;
177 out_be32(&uec_regs->maccfg1, maccfg1);
178 uec->mac_tx_enabled = 0;
179 }
180
181 if (mode & COMM_DIR_RX) {
182 maccfg1 &= ~MACCFG1_ENABLE_RX;
183 out_be32(&uec_regs->maccfg1, maccfg1);
184 uec->mac_rx_enabled = 0;
185 }
186
187 return 0;
188}
189
190static int uec_graceful_stop_tx(uec_private_t *uec)
191{
192 ucc_fast_t *uf_regs;
193 u32 cecr_subblock;
194 u32 ucce;
195
196 if (!uec || !uec->uccf) {
197 printf("%s: No handle passed.\n", __FUNCTION__);
198 return -EINVAL;
199 }
200
201 uf_regs = uec->uccf->uf_regs;
202
203 /* Clear the grace stop event */
204 out_be32(&uf_regs->ucce, UCCE_GRA);
205
206 /* Issue host command */
207 cecr_subblock =
208 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
209 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
210 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
211
212 /* Wait for command to complete */
213 do {
214 ucce = in_be32(&uf_regs->ucce);
215 } while (! (ucce & UCCE_GRA));
216
217 uec->grace_stopped_tx = 1;
218
219 return 0;
220}
221
222static int uec_graceful_stop_rx(uec_private_t *uec)
223{
224 u32 cecr_subblock;
225 u8 ack;
226
227 if (!uec) {
228 printf("%s: No handle passed.\n", __FUNCTION__);
229 return -EINVAL;
230 }
231
232 if (!uec->p_rx_glbl_pram) {
233 printf("%s: No init rx global parameter\n", __FUNCTION__);
234 return -EINVAL;
235 }
236
237 /* Clear acknowledge bit */
238 ack = uec->p_rx_glbl_pram->rxgstpack;
239 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
240 uec->p_rx_glbl_pram->rxgstpack = ack;
241
242 /* Keep issuing cmd and checking ack bit until it is asserted */
243 do {
244 /* Issue host command */
245 cecr_subblock =
246 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
247 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
248 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
249 ack = uec->p_rx_glbl_pram->rxgstpack;
250 } while (! (ack & GRACEFUL_STOP_ACKNOWLEDGE_RX ));
251
252 uec->grace_stopped_rx = 1;
253
254 return 0;
255}
256
257static int uec_restart_tx(uec_private_t *uec)
258{
259 u32 cecr_subblock;
260
261 if (!uec || !uec->uec_info) {
262 printf("%s: No handle passed.\n", __FUNCTION__);
263 return -EINVAL;
264 }
265
266 cecr_subblock =
267 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
268 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
269 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
270
271 uec->grace_stopped_tx = 0;
272
273 return 0;
274}
275
276static int uec_restart_rx(uec_private_t *uec)
277{
278 u32 cecr_subblock;
279
280 if (!uec || !uec->uec_info) {
281 printf("%s: No handle passed.\n", __FUNCTION__);
282 return -EINVAL;
283 }
284
285 cecr_subblock =
286 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
287 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
288 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
289
290 uec->grace_stopped_rx = 0;
291
292 return 0;
293}
294
295static int uec_open(uec_private_t *uec, comm_dir_e mode)
296{
297 ucc_fast_private_t *uccf;
298
299 if (!uec || !uec->uccf) {
300 printf("%s: No handle passed.\n", __FUNCTION__);
301 return -EINVAL;
302 }
303 uccf = uec->uccf;
304
305 /* check if the UCC number is in range. */
306 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
307 printf("%s: ucc_num out of range.\n", __FUNCTION__);
308 return -EINVAL;
309 }
310
311 /* Enable MAC */
312 uec_mac_enable(uec, mode);
313
314 /* Enable UCC fast */
315 ucc_fast_enable(uccf, mode);
316
317 /* RISC microcode start */
318 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx) {
319 uec_restart_tx(uec);
320 }
321 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx) {
322 uec_restart_rx(uec);
323 }
324
325 return 0;
326}
327
328static int uec_stop(uec_private_t *uec, comm_dir_e mode)
329{
330 ucc_fast_private_t *uccf;
331
332 if (!uec || !uec->uccf) {
333 printf("%s: No handle passed.\n", __FUNCTION__);
334 return -EINVAL;
335 }
336 uccf = uec->uccf;
337
338 /* check if the UCC number is in range. */
339 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
340 printf("%s: ucc_num out of range.\n", __FUNCTION__);
341 return -EINVAL;
342 }
343 /* Stop any transmissions */
344 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx) {
345 uec_graceful_stop_tx(uec);
346 }
347 /* Stop any receptions */
348 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx) {
349 uec_graceful_stop_rx(uec);
350 }
351
352 /* Disable the UCC fast */
353 ucc_fast_disable(uec->uccf, mode);
354
355 /* Disable the MAC */
356 uec_mac_disable(uec, mode);
357
358 return 0;
359}
360
361static int uec_set_mac_duplex(uec_private_t *uec, int duplex)
362{
363 uec_t *uec_regs;
364 u32 maccfg2;
365
366 if (!uec) {
367 printf("%s: uec not initial\n", __FUNCTION__);
368 return -EINVAL;
369 }
370 uec_regs = uec->uec_regs;
371
372 if (duplex == DUPLEX_HALF) {
373 maccfg2 = in_be32(&uec_regs->maccfg2);
374 maccfg2 &= ~MACCFG2_FDX;
375 out_be32(&uec_regs->maccfg2, maccfg2);
376 }
377
378 if (duplex == DUPLEX_FULL) {
379 maccfg2 = in_be32(&uec_regs->maccfg2);
380 maccfg2 |= MACCFG2_FDX;
381 out_be32(&uec_regs->maccfg2, maccfg2);
382 }
383
384 return 0;
385}
386
387static int uec_set_mac_if_mode(uec_private_t *uec, enet_interface_e if_mode)
388{
389 enet_interface_e enet_if_mode;
Wolfgang Denk53677ef2008-05-20 16:00:29 +0200390 uec_info_t *uec_info;
Dave Liu7737d5c2006-11-03 12:11:15 -0600391 uec_t *uec_regs;
392 u32 upsmr;
393 u32 maccfg2;
394
395 if (!uec) {
396 printf("%s: uec not initial\n", __FUNCTION__);
397 return -EINVAL;
398 }
399
400 uec_info = uec->uec_info;
401 uec_regs = uec->uec_regs;
402 enet_if_mode = if_mode;
403
404 maccfg2 = in_be32(&uec_regs->maccfg2);
405 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
406
407 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
408 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
409
410 switch (enet_if_mode) {
411 case ENET_100_MII:
412 case ENET_10_MII:
413 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
414 break;
415 case ENET_1000_GMII:
416 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
417 break;
418 case ENET_1000_TBI:
419 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
420 upsmr |= UPSMR_TBIM;
421 break;
422 case ENET_1000_RTBI:
423 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
424 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
425 break;
Anton Vorontsov6a600c32008-03-24 20:46:28 +0300426 case ENET_1000_RGMII_RXID:
Haiying Wang41410ee2008-09-24 11:42:12 -0500427 case ENET_1000_RGMII_ID:
Dave Liu7737d5c2006-11-03 12:11:15 -0600428 case ENET_1000_RGMII:
429 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
430 upsmr |= UPSMR_RPM;
431 break;
432 case ENET_100_RGMII:
433 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
434 upsmr |= UPSMR_RPM;
435 break;
436 case ENET_10_RGMII:
437 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
438 upsmr |= (UPSMR_RPM | UPSMR_R10M);
439 break;
440 case ENET_100_RMII:
441 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
442 upsmr |= UPSMR_RMM;
443 break;
444 case ENET_10_RMII:
445 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
446 upsmr |= (UPSMR_R10M | UPSMR_RMM);
447 break;
448 default:
449 return -EINVAL;
450 break;
451 }
452 out_be32(&uec_regs->maccfg2, maccfg2);
453 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
454
455 return 0;
456}
457
Andy Flemingda9d4612007-08-14 00:14:25 -0500458static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
Dave Liu7737d5c2006-11-03 12:11:15 -0600459{
460 uint timeout = 0x1000;
461 u32 miimcfg = 0;
462
Andy Flemingda9d4612007-08-14 00:14:25 -0500463 miimcfg = in_be32(&uec_mii_regs->miimcfg);
Dave Liu7737d5c2006-11-03 12:11:15 -0600464 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
Andy Flemingda9d4612007-08-14 00:14:25 -0500465 out_be32(&uec_mii_regs->miimcfg, miimcfg);
Dave Liu7737d5c2006-11-03 12:11:15 -0600466
467 /* Wait until the bus is free */
Andy Flemingda9d4612007-08-14 00:14:25 -0500468 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--);
Dave Liu7737d5c2006-11-03 12:11:15 -0600469 if (timeout <= 0) {
470 printf("%s: The MII Bus is stuck!", __FUNCTION__);
471 return -ETIMEDOUT;
472 }
473
474 return 0;
475}
476
477static int init_phy(struct eth_device *dev)
478{
479 uec_private_t *uec;
Andy Flemingda9d4612007-08-14 00:14:25 -0500480 uec_mii_t *umii_regs;
Dave Liu7737d5c2006-11-03 12:11:15 -0600481 struct uec_mii_info *mii_info;
482 struct phy_info *curphy;
483 int err;
484
485 uec = (uec_private_t *)dev->priv;
Andy Flemingda9d4612007-08-14 00:14:25 -0500486 umii_regs = uec->uec_mii_regs;
Dave Liu7737d5c2006-11-03 12:11:15 -0600487
488 uec->oldlink = 0;
489 uec->oldspeed = 0;
490 uec->oldduplex = -1;
491
492 mii_info = malloc(sizeof(*mii_info));
493 if (!mii_info) {
494 printf("%s: Could not allocate mii_info", dev->name);
495 return -ENOMEM;
496 }
497 memset(mii_info, 0, sizeof(*mii_info));
498
Dave Liu24c3aca2006-12-07 21:13:15 +0800499 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
500 mii_info->speed = SPEED_1000;
501 } else {
502 mii_info->speed = SPEED_100;
503 }
504
Dave Liu7737d5c2006-11-03 12:11:15 -0600505 mii_info->duplex = DUPLEX_FULL;
506 mii_info->pause = 0;
507 mii_info->link = 1;
508
509 mii_info->advertising = (ADVERTISED_10baseT_Half |
510 ADVERTISED_10baseT_Full |
511 ADVERTISED_100baseT_Half |
512 ADVERTISED_100baseT_Full |
513 ADVERTISED_1000baseT_Full);
514 mii_info->autoneg = 1;
515 mii_info->mii_id = uec->uec_info->phy_address;
516 mii_info->dev = dev;
517
Andy Flemingda9d4612007-08-14 00:14:25 -0500518 mii_info->mdio_read = &uec_read_phy_reg;
519 mii_info->mdio_write = &uec_write_phy_reg;
Dave Liu7737d5c2006-11-03 12:11:15 -0600520
521 uec->mii_info = mii_info;
522
Kim Phillipsee62ed32008-01-15 14:11:00 -0600523 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
524
Andy Flemingda9d4612007-08-14 00:14:25 -0500525 if (init_mii_management_configuration(umii_regs)) {
Dave Liu7737d5c2006-11-03 12:11:15 -0600526 printf("%s: The MII Bus is stuck!", dev->name);
527 err = -1;
528 goto bus_fail;
529 }
530
531 /* get info for this PHY */
Andy Flemingda9d4612007-08-14 00:14:25 -0500532 curphy = uec_get_phy_info(uec->mii_info);
Dave Liu7737d5c2006-11-03 12:11:15 -0600533 if (!curphy) {
534 printf("%s: No PHY found", dev->name);
535 err = -1;
536 goto no_phy;
537 }
538
539 mii_info->phyinfo = curphy;
540
541 /* Run the commands which initialize the PHY */
542 if (curphy->init) {
543 err = curphy->init(uec->mii_info);
544 if (err)
545 goto phy_init_fail;
546 }
547
548 return 0;
549
550phy_init_fail:
551no_phy:
552bus_fail:
553 free(mii_info);
554 return err;
555}
556
557static void adjust_link(struct eth_device *dev)
558{
559 uec_private_t *uec = (uec_private_t *)dev->priv;
560 uec_t *uec_regs;
561 struct uec_mii_info *mii_info = uec->mii_info;
562
563 extern void change_phy_interface_mode(struct eth_device *dev,
564 enet_interface_e mode);
565 uec_regs = uec->uec_regs;
566
567 if (mii_info->link) {
568 /* Now we make sure that we can be in full duplex mode.
569 * If not, we operate in half-duplex mode. */
570 if (mii_info->duplex != uec->oldduplex) {
571 if (!(mii_info->duplex)) {
572 uec_set_mac_duplex(uec, DUPLEX_HALF);
573 printf("%s: Half Duplex\n", dev->name);
574 } else {
575 uec_set_mac_duplex(uec, DUPLEX_FULL);
576 printf("%s: Full Duplex\n", dev->name);
577 }
578 uec->oldduplex = mii_info->duplex;
579 }
580
581 if (mii_info->speed != uec->oldspeed) {
Dave Liu24c3aca2006-12-07 21:13:15 +0800582 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
583 switch (mii_info->speed) {
Dave Liu7737d5c2006-11-03 12:11:15 -0600584 case 1000:
585 break;
586 case 100:
587 printf ("switching to rgmii 100\n");
588 /* change phy to rgmii 100 */
589 change_phy_interface_mode(dev,
590 ENET_100_RGMII);
591 /* change the MAC interface mode */
592 uec_set_mac_if_mode(uec,ENET_100_RGMII);
593 break;
594 case 10:
595 printf ("switching to rgmii 10\n");
596 /* change phy to rgmii 10 */
597 change_phy_interface_mode(dev,
598 ENET_10_RGMII);
599 /* change the MAC interface mode */
600 uec_set_mac_if_mode(uec,ENET_10_RGMII);
601 break;
602 default:
603 printf("%s: Ack,Speed(%d)is illegal\n",
604 dev->name, mii_info->speed);
605 break;
Dave Liu24c3aca2006-12-07 21:13:15 +0800606 }
Dave Liu7737d5c2006-11-03 12:11:15 -0600607 }
608
609 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
610 uec->oldspeed = mii_info->speed;
611 }
612
613 if (!uec->oldlink) {
614 printf("%s: Link is up\n", dev->name);
615 uec->oldlink = 1;
616 }
617
618 } else { /* if (mii_info->link) */
619 if (uec->oldlink) {
620 printf("%s: Link is down\n", dev->name);
621 uec->oldlink = 0;
622 uec->oldspeed = 0;
623 uec->oldduplex = -1;
624 }
625 }
626}
627
628static void phy_change(struct eth_device *dev)
629{
630 uec_private_t *uec = (uec_private_t *)dev->priv;
Dave Liu7737d5c2006-11-03 12:11:15 -0600631
632 /* Update the link, speed, duplex */
Kim Phillipsee62ed32008-01-15 14:11:00 -0600633 uec->mii_info->phyinfo->read_status(uec->mii_info);
Dave Liu7737d5c2006-11-03 12:11:15 -0600634
635 /* Adjust the interface according to speed */
Kim Phillipsee62ed32008-01-15 14:11:00 -0600636 adjust_link(dev);
Dave Liu7737d5c2006-11-03 12:11:15 -0600637}
638
Ben Warrend9d78ee2008-08-07 23:26:35 -0700639#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \
640 && !defined(BITBANGMII)
641
642/*
643 * Read a MII PHY register.
644 *
645 * Returns:
646 * 0 on success
647 */
648static int uec_miiphy_read(char *devname, unsigned char addr,
649 unsigned char reg, unsigned short *value)
650{
651 *value = uec_read_phy_reg(devlist[0], addr, reg);
652
653 return 0;
654}
655
656/*
657 * Write a MII PHY register.
658 *
659 * Returns:
660 * 0 on success
661 */
662static int uec_miiphy_write(char *devname, unsigned char addr,
663 unsigned char reg, unsigned short value)
664{
665 uec_write_phy_reg(devlist[0], addr, reg, value);
666
667 return 0;
668}
669
670#endif
671
Dave Liu7737d5c2006-11-03 12:11:15 -0600672static int uec_set_mac_address(uec_private_t *uec, u8 *mac_addr)
673{
674 uec_t *uec_regs;
675 u32 mac_addr1;
676 u32 mac_addr2;
677
678 if (!uec) {
679 printf("%s: uec not initial\n", __FUNCTION__);
680 return -EINVAL;
681 }
682
683 uec_regs = uec->uec_regs;
684
685 /* if a station address of 0x12345678ABCD, perform a write to
686 MACSTNADDR1 of 0xCDAB7856,
687 MACSTNADDR2 of 0x34120000 */
688
689 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) | \
690 (mac_addr[3] << 8) | (mac_addr[2]);
691 out_be32(&uec_regs->macstnaddr1, mac_addr1);
692
693 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
694 out_be32(&uec_regs->macstnaddr2, mac_addr2);
695
696 return 0;
697}
698
699static int uec_convert_threads_num(uec_num_of_threads_e threads_num,
700 int *threads_num_ret)
701{
702 int num_threads_numerica;
703
704 switch (threads_num) {
705 case UEC_NUM_OF_THREADS_1:
706 num_threads_numerica = 1;
707 break;
708 case UEC_NUM_OF_THREADS_2:
709 num_threads_numerica = 2;
710 break;
711 case UEC_NUM_OF_THREADS_4:
712 num_threads_numerica = 4;
713 break;
714 case UEC_NUM_OF_THREADS_6:
715 num_threads_numerica = 6;
716 break;
717 case UEC_NUM_OF_THREADS_8:
718 num_threads_numerica = 8;
719 break;
720 default:
721 printf("%s: Bad number of threads value.",
722 __FUNCTION__);
723 return -EINVAL;
724 }
725
726 *threads_num_ret = num_threads_numerica;
727
728 return 0;
729}
730
731static void uec_init_tx_parameter(uec_private_t *uec, int num_threads_tx)
732{
733 uec_info_t *uec_info;
734 u32 end_bd;
735 u8 bmrx = 0;
736 int i;
737
738 uec_info = uec->uec_info;
739
740 /* Alloc global Tx parameter RAM page */
741 uec->tx_glbl_pram_offset = qe_muram_alloc(
742 sizeof(uec_tx_global_pram_t),
743 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
744 uec->p_tx_glbl_pram = (uec_tx_global_pram_t *)
745 qe_muram_addr(uec->tx_glbl_pram_offset);
746
747 /* Zero the global Tx prameter RAM */
748 memset(uec->p_tx_glbl_pram, 0, sizeof(uec_tx_global_pram_t));
749
750 /* Init global Tx parameter RAM */
751
752 /* TEMODER, RMON statistics disable, one Tx queue */
753 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
754
755 /* SQPTR */
756 uec->send_q_mem_reg_offset = qe_muram_alloc(
757 sizeof(uec_send_queue_qd_t),
758 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
759 uec->p_send_q_mem_reg = (uec_send_queue_mem_region_t *)
760 qe_muram_addr(uec->send_q_mem_reg_offset);
761 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
762
763 /* Setup the table with TxBDs ring */
764 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
765 * SIZEOFBD;
766 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
767 (u32)(uec->p_tx_bd_ring));
768 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
769 end_bd);
770
771 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
772 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
773
774 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
775 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
776
777 /* TSTATE, global snooping, big endian, the CSB bus selected */
778 bmrx = BMR_INIT_VALUE;
779 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
780
781 /* IPH_Offset */
782 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++) {
783 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
784 }
785
786 /* VTAG table */
787 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++) {
788 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
789 }
790
791 /* TQPTR */
792 uec->thread_dat_tx_offset = qe_muram_alloc(
793 num_threads_tx * sizeof(uec_thread_data_tx_t) +
794 32 *(num_threads_tx == 1), UEC_THREAD_DATA_ALIGNMENT);
795
796 uec->p_thread_data_tx = (uec_thread_data_tx_t *)
797 qe_muram_addr(uec->thread_dat_tx_offset);
798 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
799}
800
801static void uec_init_rx_parameter(uec_private_t *uec, int num_threads_rx)
802{
803 u8 bmrx = 0;
804 int i;
805 uec_82xx_address_filtering_pram_t *p_af_pram;
806
807 /* Allocate global Rx parameter RAM page */
808 uec->rx_glbl_pram_offset = qe_muram_alloc(
809 sizeof(uec_rx_global_pram_t), UEC_RX_GLOBAL_PRAM_ALIGNMENT);
810 uec->p_rx_glbl_pram = (uec_rx_global_pram_t *)
811 qe_muram_addr(uec->rx_glbl_pram_offset);
812
813 /* Zero Global Rx parameter RAM */
814 memset(uec->p_rx_glbl_pram, 0, sizeof(uec_rx_global_pram_t));
815
816 /* Init global Rx parameter RAM */
817 /* REMODER, Extended feature mode disable, VLAN disable,
818 LossLess flow control disable, Receive firmware statisic disable,
819 Extended address parsing mode disable, One Rx queues,
820 Dynamic maximum/minimum frame length disable, IP checksum check
821 disable, IP address alignment disable
822 */
823 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
824
825 /* RQPTR */
826 uec->thread_dat_rx_offset = qe_muram_alloc(
827 num_threads_rx * sizeof(uec_thread_data_rx_t),
828 UEC_THREAD_DATA_ALIGNMENT);
829 uec->p_thread_data_rx = (uec_thread_data_rx_t *)
830 qe_muram_addr(uec->thread_dat_rx_offset);
831 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
832
833 /* Type_or_Len */
834 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
835
836 /* RxRMON base pointer, we don't need it */
837 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
838
839 /* IntCoalescingPTR, we don't need it, no interrupt */
840 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
841
842 /* RSTATE, global snooping, big endian, the CSB bus selected */
843 bmrx = BMR_INIT_VALUE;
844 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
845
846 /* MRBLR */
847 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
848
849 /* RBDQPTR */
850 uec->rx_bd_qs_tbl_offset = qe_muram_alloc(
851 sizeof(uec_rx_bd_queues_entry_t) + \
852 sizeof(uec_rx_prefetched_bds_t),
853 UEC_RX_BD_QUEUES_ALIGNMENT);
854 uec->p_rx_bd_qs_tbl = (uec_rx_bd_queues_entry_t *)
855 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
856
857 /* Zero it */
858 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(uec_rx_bd_queues_entry_t) + \
859 sizeof(uec_rx_prefetched_bds_t));
860 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
861 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
862 (u32)uec->p_rx_bd_ring);
863
864 /* MFLR */
865 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
866 /* MINFLR */
867 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
868 /* MAXD1 */
869 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
870 /* MAXD2 */
871 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
872 /* ECAM_PTR */
873 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
874 /* L2QT */
875 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
876 /* L3QT */
877 for (i = 0; i < 8; i++) {
878 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
879 }
880
881 /* VLAN_TYPE */
882 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
883 /* TCI */
884 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
885
886 /* Clear PQ2 style address filtering hash table */
887 p_af_pram = (uec_82xx_address_filtering_pram_t *) \
888 uec->p_rx_glbl_pram->addressfiltering;
889
890 p_af_pram->iaddr_h = 0;
891 p_af_pram->iaddr_l = 0;
892 p_af_pram->gaddr_h = 0;
893 p_af_pram->gaddr_l = 0;
894}
895
896static int uec_issue_init_enet_rxtx_cmd(uec_private_t *uec,
897 int thread_tx, int thread_rx)
898{
899 uec_init_cmd_pram_t *p_init_enet_param;
900 u32 init_enet_param_offset;
901 uec_info_t *uec_info;
902 int i;
903 int snum;
904 u32 init_enet_offset;
905 u32 entry_val;
906 u32 command;
907 u32 cecr_subblock;
908
909 uec_info = uec->uec_info;
910
911 /* Allocate init enet command parameter */
912 uec->init_enet_param_offset = qe_muram_alloc(
913 sizeof(uec_init_cmd_pram_t), 4);
914 init_enet_param_offset = uec->init_enet_param_offset;
915 uec->p_init_enet_param = (uec_init_cmd_pram_t *)
916 qe_muram_addr(uec->init_enet_param_offset);
917
918 /* Zero init enet command struct */
919 memset((void *)uec->p_init_enet_param, 0, sizeof(uec_init_cmd_pram_t));
920
921 /* Init the command struct */
922 p_init_enet_param = uec->p_init_enet_param;
923 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
924 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
925 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
926 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
927 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
928 p_init_enet_param->largestexternallookupkeysize = 0;
929
930 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
931 << ENET_INIT_PARAM_RGF_SHIFT;
932 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
933 << ENET_INIT_PARAM_TGF_SHIFT;
934
935 /* Init Rx global parameter pointer */
936 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
937 (u32)uec_info->riscRx;
938
939 /* Init Rx threads */
940 for (i = 0; i < (thread_rx + 1); i++) {
941 if ((snum = qe_get_snum()) < 0) {
942 printf("%s can not get snum\n", __FUNCTION__);
943 return -ENOMEM;
944 }
945
946 if (i==0) {
947 init_enet_offset = 0;
948 } else {
949 init_enet_offset = qe_muram_alloc(
950 sizeof(uec_thread_rx_pram_t),
951 UEC_THREAD_RX_PRAM_ALIGNMENT);
952 }
953
954 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
955 init_enet_offset | (u32)uec_info->riscRx;
956 p_init_enet_param->rxthread[i] = entry_val;
957 }
958
959 /* Init Tx global parameter pointer */
960 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
961 (u32)uec_info->riscTx;
962
963 /* Init Tx threads */
964 for (i = 0; i < thread_tx; i++) {
965 if ((snum = qe_get_snum()) < 0) {
966 printf("%s can not get snum\n", __FUNCTION__);
967 return -ENOMEM;
968 }
969
970 init_enet_offset = qe_muram_alloc(sizeof(uec_thread_tx_pram_t),
971 UEC_THREAD_TX_PRAM_ALIGNMENT);
972
973 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
974 init_enet_offset | (u32)uec_info->riscTx;
975 p_init_enet_param->txthread[i] = entry_val;
976 }
977
978 __asm__ __volatile__("sync");
979
980 /* Issue QE command */
981 command = QE_INIT_TX_RX;
982 cecr_subblock = ucc_fast_get_qe_cr_subblock(
983 uec->uec_info->uf_info.ucc_num);
984 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
985 init_enet_param_offset);
986
987 return 0;
988}
989
990static int uec_startup(uec_private_t *uec)
991{
992 uec_info_t *uec_info;
993 ucc_fast_info_t *uf_info;
994 ucc_fast_private_t *uccf;
995 ucc_fast_t *uf_regs;
996 uec_t *uec_regs;
997 int num_threads_tx;
998 int num_threads_rx;
999 u32 utbipar;
1000 enet_interface_e enet_interface;
1001 u32 length;
1002 u32 align;
1003 qe_bd_t *bd;
1004 u8 *buf;
1005 int i;
1006
1007 if (!uec || !uec->uec_info) {
1008 printf("%s: uec or uec_info not initial\n", __FUNCTION__);
1009 return -EINVAL;
1010 }
1011
1012 uec_info = uec->uec_info;
1013 uf_info = &(uec_info->uf_info);
1014
1015 /* Check if Rx BD ring len is illegal */
1016 if ((uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN) || \
1017 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1018 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1019 __FUNCTION__);
1020 return -EINVAL;
1021 }
1022
1023 /* Check if Tx BD ring len is illegal */
1024 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1025 printf("%s: Tx BD ring length must not be smaller than 2.\n",
1026 __FUNCTION__);
1027 return -EINVAL;
1028 }
1029
1030 /* Check if MRBLR is illegal */
1031 if ((MAX_RXBUF_LEN == 0) || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
1032 printf("%s: max rx buffer length must be mutliple of 128.\n",
1033 __FUNCTION__);
1034 return -EINVAL;
1035 }
1036
1037 /* Both Rx and Tx are stopped */
1038 uec->grace_stopped_rx = 1;
1039 uec->grace_stopped_tx = 1;
1040
1041 /* Init UCC fast */
1042 if (ucc_fast_init(uf_info, &uccf)) {
1043 printf("%s: failed to init ucc fast\n", __FUNCTION__);
1044 return -ENOMEM;
1045 }
1046
1047 /* Save uccf */
1048 uec->uccf = uccf;
1049
1050 /* Convert the Tx threads number */
1051 if (uec_convert_threads_num(uec_info->num_threads_tx,
1052 &num_threads_tx)) {
1053 return -EINVAL;
1054 }
1055
1056 /* Convert the Rx threads number */
1057 if (uec_convert_threads_num(uec_info->num_threads_rx,
1058 &num_threads_rx)) {
1059 return -EINVAL;
1060 }
1061
1062 uf_regs = uccf->uf_regs;
1063
1064 /* UEC register is following UCC fast registers */
1065 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1066
1067 /* Save the UEC register pointer to UEC private struct */
1068 uec->uec_regs = uec_regs;
1069
1070 /* Init UPSMR, enable hardware statistics (UCC) */
1071 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1072
1073 /* Init MACCFG1, flow control disable, disable Tx and Rx */
1074 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1075
1076 /* Init MACCFG2, length check, MAC PAD and CRC enable */
1077 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1078
1079 /* Setup MAC interface mode */
1080 uec_set_mac_if_mode(uec, uec_info->enet_interface);
1081
Andy Flemingda9d4612007-08-14 00:14:25 -05001082 /* Setup MII management base */
1083#ifndef CONFIG_eTSEC_MDIO_BUS
1084 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1085#else
1086 uec->uec_mii_regs = (uec_mii_t *) CONFIG_MIIM_ADDRESS;
1087#endif
1088
Dave Liu7737d5c2006-11-03 12:11:15 -06001089 /* Setup MII master clock source */
1090 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1091
1092 /* Setup UTBIPAR */
1093 utbipar = in_be32(&uec_regs->utbipar);
1094 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1095 enet_interface = uec->uec_info->enet_interface;
1096 if (enet_interface == ENET_1000_TBI ||
1097 enet_interface == ENET_1000_RTBI) {
1098 utbipar |= (uec_info->phy_address + uec_info->uf_info.ucc_num)
1099 << UTBIPAR_PHY_ADDRESS_SHIFT;
1100 } else {
1101 utbipar |= (0x10 + uec_info->uf_info.ucc_num)
1102 << UTBIPAR_PHY_ADDRESS_SHIFT;
1103 }
1104
1105 out_be32(&uec_regs->utbipar, utbipar);
1106
1107 /* Allocate Tx BDs */
1108 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1110 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1111 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1112 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1113 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1114 }
1115
1116 align = UEC_TX_BD_RING_ALIGNMENT;
1117 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1118 if (uec->tx_bd_ring_offset != 0) {
1119 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1120 & ~(align - 1));
1121 }
1122
1123 /* Zero all of Tx BDs */
1124 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1125
1126 /* Allocate Rx BDs */
1127 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1128 align = UEC_RX_BD_RING_ALIGNMENT;
1129 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1130 if (uec->rx_bd_ring_offset != 0) {
1131 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1132 & ~(align - 1));
1133 }
1134
1135 /* Zero all of Rx BDs */
1136 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1137
1138 /* Allocate Rx buffer */
1139 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1140 align = UEC_RX_DATA_BUF_ALIGNMENT;
1141 uec->rx_buf_offset = (u32)malloc(length + align);
1142 if (uec->rx_buf_offset != 0) {
1143 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1144 & ~(align - 1));
1145 }
1146
1147 /* Zero all of the Rx buffer */
1148 memset((void *)(uec->rx_buf_offset), 0, length + align);
1149
1150 /* Init TxBD ring */
1151 bd = (qe_bd_t *)uec->p_tx_bd_ring;
1152 uec->txBd = bd;
1153
1154 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1155 BD_DATA_CLEAR(bd);
1156 BD_STATUS_SET(bd, 0);
1157 BD_LENGTH_SET(bd, 0);
1158 bd ++;
1159 }
1160 BD_STATUS_SET((--bd), TxBD_WRAP);
1161
1162 /* Init RxBD ring */
1163 bd = (qe_bd_t *)uec->p_rx_bd_ring;
1164 uec->rxBd = bd;
1165 buf = uec->p_rx_buf;
1166 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1167 BD_DATA_SET(bd, buf);
1168 BD_LENGTH_SET(bd, 0);
1169 BD_STATUS_SET(bd, RxBD_EMPTY);
1170 buf += MAX_RXBUF_LEN;
1171 bd ++;
1172 }
1173 BD_STATUS_SET((--bd), RxBD_WRAP | RxBD_EMPTY);
1174
1175 /* Init global Tx parameter RAM */
1176 uec_init_tx_parameter(uec, num_threads_tx);
1177
1178 /* Init global Rx parameter RAM */
1179 uec_init_rx_parameter(uec, num_threads_rx);
1180
1181 /* Init ethernet Tx and Rx parameter command */
1182 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1183 num_threads_rx)) {
1184 printf("%s issue init enet cmd failed\n", __FUNCTION__);
1185 return -ENOMEM;
1186 }
1187
1188 return 0;
1189}
1190
1191static int uec_init(struct eth_device* dev, bd_t *bd)
1192{
1193 uec_private_t *uec;
Kim Phillipsee62ed32008-01-15 14:11:00 -06001194 int err, i;
1195 struct phy_info *curphy;
Dave Liu7737d5c2006-11-03 12:11:15 -06001196
1197 uec = (uec_private_t *)dev->priv;
1198
1199 if (uec->the_first_run == 0) {
Kim Phillipsee62ed32008-01-15 14:11:00 -06001200 err = init_phy(dev);
1201 if (err) {
1202 printf("%s: Cannot initialize PHY, aborting.\n",
1203 dev->name);
1204 return err;
Dave Liu7737d5c2006-11-03 12:11:15 -06001205 }
Kim Phillipsee62ed32008-01-15 14:11:00 -06001206
1207 curphy = uec->mii_info->phyinfo;
1208
1209 if (curphy->config_aneg) {
1210 err = curphy->config_aneg(uec->mii_info);
1211 if (err) {
1212 printf("%s: Can't negotiate PHY\n", dev->name);
1213 return err;
1214 }
1215 }
1216
1217 /* Give PHYs up to 5 sec to report a link */
1218 i = 50;
1219 do {
1220 err = curphy->read_status(uec->mii_info);
1221 udelay(100000);
1222 } while (((i-- > 0) && !uec->mii_info->link) || err);
1223
1224 if (err || i <= 0)
1225 printf("warning: %s: timeout on PHY link\n", dev->name);
1226
Dave Liu7737d5c2006-11-03 12:11:15 -06001227 uec->the_first_run = 1;
1228 }
1229
Kim Phillipsee62ed32008-01-15 14:11:00 -06001230 /* Set up the MAC address */
1231 if (dev->enetaddr[0] & 0x01) {
1232 printf("%s: MacAddress is multcast address\n",
1233 __FUNCTION__);
1234 return -1;
1235 }
1236 uec_set_mac_address(uec, dev->enetaddr);
1237
1238
Dave Liu7737d5c2006-11-03 12:11:15 -06001239 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1240 if (err) {
1241 printf("%s: cannot enable UEC device\n", dev->name);
Ben Warren422b1a02008-01-09 18:15:53 -05001242 return -1;
Dave Liu7737d5c2006-11-03 12:11:15 -06001243 }
1244
Kim Phillipsee62ed32008-01-15 14:11:00 -06001245 phy_change(dev);
1246
Ben Warren422b1a02008-01-09 18:15:53 -05001247 return (uec->mii_info->link ? 0 : -1);
Dave Liu7737d5c2006-11-03 12:11:15 -06001248}
1249
1250static void uec_halt(struct eth_device* dev)
1251{
1252 uec_private_t *uec = (uec_private_t *)dev->priv;
1253 uec_stop(uec, COMM_DIR_RX_AND_TX);
1254}
1255
1256static int uec_send(struct eth_device* dev, volatile void *buf, int len)
1257{
1258 uec_private_t *uec;
1259 ucc_fast_private_t *uccf;
1260 volatile qe_bd_t *bd;
Dave Liuddd02492006-12-06 11:38:17 +08001261 u16 status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001262 int i;
1263 int result = 0;
1264
1265 uec = (uec_private_t *)dev->priv;
1266 uccf = uec->uccf;
1267 bd = uec->txBd;
1268
1269 /* Find an empty TxBD */
Dave Liuddd02492006-12-06 11:38:17 +08001270 for (i = 0; bd->status & TxBD_READY; i++) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001271 if (i > 0x100000) {
1272 printf("%s: tx buffer not ready\n", dev->name);
1273 return result;
1274 }
1275 }
1276
1277 /* Init TxBD */
1278 BD_DATA_SET(bd, buf);
1279 BD_LENGTH_SET(bd, len);
Emilian Medvea28899c2007-01-30 16:14:50 -06001280 status = bd->status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001281 status &= BD_WRAP;
1282 status |= (TxBD_READY | TxBD_LAST);
1283 BD_STATUS_SET(bd, status);
1284
1285 /* Tell UCC to transmit the buffer */
1286 ucc_fast_transmit_on_demand(uccf);
1287
1288 /* Wait for buffer to be transmitted */
Dave Liuddd02492006-12-06 11:38:17 +08001289 for (i = 0; bd->status & TxBD_READY; i++) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001290 if (i > 0x100000) {
1291 printf("%s: tx error\n", dev->name);
1292 return result;
1293 }
Dave Liu7737d5c2006-11-03 12:11:15 -06001294 }
1295
1296 /* Ok, the buffer be transimitted */
1297 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1298 uec->txBd = bd;
1299 result = 1;
1300
1301 return result;
1302}
1303
1304static int uec_recv(struct eth_device* dev)
1305{
1306 uec_private_t *uec = dev->priv;
1307 volatile qe_bd_t *bd;
Dave Liuddd02492006-12-06 11:38:17 +08001308 u16 status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001309 u16 len;
1310 u8 *data;
1311
1312 bd = uec->rxBd;
Dave Liuddd02492006-12-06 11:38:17 +08001313 status = bd->status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001314
1315 while (!(status & RxBD_EMPTY)) {
1316 if (!(status & RxBD_ERROR)) {
1317 data = BD_DATA(bd);
1318 len = BD_LENGTH(bd);
1319 NetReceive(data, len);
1320 } else {
1321 printf("%s: Rx error\n", dev->name);
1322 }
1323 status &= BD_CLEAN;
1324 BD_LENGTH_SET(bd, 0);
1325 BD_STATUS_SET(bd, status | RxBD_EMPTY);
1326 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
Dave Liuddd02492006-12-06 11:38:17 +08001327 status = bd->status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001328 }
1329 uec->rxBd = bd;
1330
1331 return 1;
1332}
1333
1334int uec_initialize(int index)
1335{
1336 struct eth_device *dev;
1337 int i;
1338 uec_private_t *uec;
1339 uec_info_t *uec_info;
1340 int err;
1341
1342 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1343 if (!dev)
1344 return 0;
1345 memset(dev, 0, sizeof(struct eth_device));
1346
1347 /* Allocate the UEC private struct */
1348 uec = (uec_private_t *)malloc(sizeof(uec_private_t));
1349 if (!uec) {
1350 return -ENOMEM;
1351 }
1352 memset(uec, 0, sizeof(uec_private_t));
1353
1354 /* Init UEC private struct, they come from board.h */
Dave Liu06c428b2008-01-14 11:12:01 +08001355 uec_info = NULL;
Dave Liu7737d5c2006-11-03 12:11:15 -06001356 if (index == 0) {
1357#ifdef CONFIG_UEC_ETH1
1358 uec_info = &eth1_uec_info;
1359#endif
1360 } else if (index == 1) {
1361#ifdef CONFIG_UEC_ETH2
1362 uec_info = &eth2_uec_info;
1363#endif
Joakim Tjernlundccf21c32007-12-06 16:43:40 +01001364 } else if (index == 2) {
1365#ifdef CONFIG_UEC_ETH3
1366 uec_info = &eth3_uec_info;
1367#endif
David Saada24656652008-01-15 10:40:24 +02001368 } else if (index == 3) {
1369#ifdef CONFIG_UEC_ETH4
1370 uec_info = &eth4_uec_info;
1371#endif
Dave Liu7737d5c2006-11-03 12:11:15 -06001372 } else {
1373 printf("%s: index is illegal.\n", __FUNCTION__);
1374 return -EINVAL;
1375 }
1376
David Saadad5d28fe2008-03-31 02:37:38 -07001377 devlist[index] = dev;
1378
Dave Liu7737d5c2006-11-03 12:11:15 -06001379 uec->uec_info = uec_info;
1380
1381 sprintf(dev->name, "FSL UEC%d", index);
1382 dev->iobase = 0;
1383 dev->priv = (void *)uec;
1384 dev->init = uec_init;
1385 dev->halt = uec_halt;
1386 dev->send = uec_send;
1387 dev->recv = uec_recv;
1388
1389 /* Clear the ethnet address */
1390 for (i = 0; i < 6; i++)
1391 dev->enetaddr[i] = 0;
1392
1393 eth_register(dev);
1394
1395 err = uec_startup(uec);
1396 if (err) {
1397 printf("%s: Cannot configure net device, aborting.",dev->name);
1398 return err;
1399 }
1400
David Saadad5d28fe2008-03-31 02:37:38 -07001401#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) \
1402 && !defined(BITBANGMII)
1403 miiphy_register(dev->name, uec_miiphy_read, uec_miiphy_write);
1404#endif
1405
Dave Liu7737d5c2006-11-03 12:11:15 -06001406 return 1;
1407}