blob: cfd5397044cd20f7c0f265c3db57578eda842894 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Dave Liu7737d5c2006-11-03 12:11:15 -06002/*
Haiying Wanga52d2f82011-02-11 01:25:30 -06003 * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
Dave Liu7737d5c2006-11-03 12:11:15 -06004 *
5 * Dave Liu <daveliu@freescale.com>
Dave Liu7737d5c2006-11-03 12:11:15 -06006 */
7
Masahiro Yamadab5bf5cb2016-09-21 11:28:53 +09008#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -06009#include <log.h>
Masahiro Yamadab5bf5cb2016-09-21 11:28:53 +090010#include <net.h>
11#include <malloc.h>
Simon Glassc05ed002020-05-10 11:40:11 -060012#include <linux/delay.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090013#include <linux/errno.h>
Masahiro Yamadab5bf5cb2016-09-21 11:28:53 +090014#include <asm/io.h>
15#include <linux/immap_qe.h>
Dave Liu7737d5c2006-11-03 12:11:15 -060016#include "uccf.h"
17#include "uec.h"
18#include "uec_phy.h"
David Saadad5d28fe2008-03-31 02:37:38 -070019#include "miiphy.h"
Qianyu Gong2459afb2016-02-18 13:01:59 +080020#include <fsl_qe.h>
Andy Fleming865ff852011-04-13 00:37:12 -050021#include <phy.h>
Dave Liu7737d5c2006-11-03 12:11:15 -060022
Richard Retanubun1a951932009-07-01 14:03:15 -040023/* Default UTBIPAR SMI address */
24#ifndef CONFIG_UTBIPAR_INIT_TBIPA
25#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
26#endif
27
Heiko Schocher9bd64442020-05-25 07:27:26 +020028static struct uec_inf uec_info[] = {
Dave Liu7737d5c2006-11-03 12:11:15 -060029#ifdef CONFIG_UEC_ETH1
Haiying Wang8e552582009-06-04 16:12:41 -040030 STD_UEC_INFO(1), /* UEC1 */
Dave Liu7737d5c2006-11-03 12:11:15 -060031#endif
32#ifdef CONFIG_UEC_ETH2
Haiying Wang8e552582009-06-04 16:12:41 -040033 STD_UEC_INFO(2), /* UEC2 */
Dave Liu7737d5c2006-11-03 12:11:15 -060034#endif
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010035#ifdef CONFIG_UEC_ETH3
Haiying Wang8e552582009-06-04 16:12:41 -040036 STD_UEC_INFO(3), /* UEC3 */
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010037#endif
David Saada24656652008-01-15 10:40:24 +020038#ifdef CONFIG_UEC_ETH4
Haiying Wang8e552582009-06-04 16:12:41 -040039 STD_UEC_INFO(4), /* UEC4 */
David Saada24656652008-01-15 10:40:24 +020040#endif
richardretanubunc68a05f2008-09-29 18:28:23 -040041#ifdef CONFIG_UEC_ETH5
Haiying Wang8e552582009-06-04 16:12:41 -040042 STD_UEC_INFO(5), /* UEC5 */
richardretanubunc68a05f2008-09-29 18:28:23 -040043#endif
44#ifdef CONFIG_UEC_ETH6
Haiying Wang8e552582009-06-04 16:12:41 -040045 STD_UEC_INFO(6), /* UEC6 */
richardretanubunc68a05f2008-09-29 18:28:23 -040046#endif
Haiying Wang8e552582009-06-04 16:12:41 -040047#ifdef CONFIG_UEC_ETH7
48 STD_UEC_INFO(7), /* UEC7 */
Haiying Wang7211fbf2009-05-21 15:34:14 -040049#endif
Haiying Wang8e552582009-06-04 16:12:41 -040050#ifdef CONFIG_UEC_ETH8
51 STD_UEC_INFO(8), /* UEC8 */
52#endif
richardretanubunc68a05f2008-09-29 18:28:23 -040053};
Joakim Tjernlundccf21c32007-12-06 16:43:40 +010054
Haiying Wang8e552582009-06-04 16:12:41 -040055#define MAXCONTROLLERS (8)
David Saadad5d28fe2008-03-31 02:37:38 -070056
57static struct eth_device *devlist[MAXCONTROLLERS];
58
Heiko Schocher9bd64442020-05-25 07:27:26 +020059static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
Dave Liu7737d5c2006-11-03 12:11:15 -060060{
61 uec_t *uec_regs;
62 u32 maccfg1;
63
64 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +020065 printf("%s: uec not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -060066 return -EINVAL;
67 }
68 uec_regs = uec->uec_regs;
69
70 maccfg1 = in_be32(&uec_regs->maccfg1);
71
72 if (mode & COMM_DIR_TX) {
73 maccfg1 |= MACCFG1_ENABLE_TX;
74 out_be32(&uec_regs->maccfg1, maccfg1);
75 uec->mac_tx_enabled = 1;
76 }
77
78 if (mode & COMM_DIR_RX) {
79 maccfg1 |= MACCFG1_ENABLE_RX;
80 out_be32(&uec_regs->maccfg1, maccfg1);
81 uec->mac_rx_enabled = 1;
82 }
83
84 return 0;
85}
86
Heiko Schocher9bd64442020-05-25 07:27:26 +020087static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
Dave Liu7737d5c2006-11-03 12:11:15 -060088{
89 uec_t *uec_regs;
90 u32 maccfg1;
91
92 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +020093 printf("%s: uec not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -060094 return -EINVAL;
95 }
96 uec_regs = uec->uec_regs;
97
98 maccfg1 = in_be32(&uec_regs->maccfg1);
99
100 if (mode & COMM_DIR_TX) {
101 maccfg1 &= ~MACCFG1_ENABLE_TX;
102 out_be32(&uec_regs->maccfg1, maccfg1);
103 uec->mac_tx_enabled = 0;
104 }
105
106 if (mode & COMM_DIR_RX) {
107 maccfg1 &= ~MACCFG1_ENABLE_RX;
108 out_be32(&uec_regs->maccfg1, maccfg1);
109 uec->mac_rx_enabled = 0;
110 }
111
112 return 0;
113}
114
Heiko Schocher9bd64442020-05-25 07:27:26 +0200115static int uec_graceful_stop_tx(struct uec_priv *uec)
Dave Liu7737d5c2006-11-03 12:11:15 -0600116{
117 ucc_fast_t *uf_regs;
118 u32 cecr_subblock;
119 u32 ucce;
120
121 if (!uec || !uec->uccf) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200122 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600123 return -EINVAL;
124 }
125
126 uf_regs = uec->uccf->uf_regs;
127
128 /* Clear the grace stop event */
129 out_be32(&uf_regs->ucce, UCCE_GRA);
130
131 /* Issue host command */
132 cecr_subblock =
133 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
134 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200135 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600136
137 /* Wait for command to complete */
138 do {
139 ucce = in_be32(&uf_regs->ucce);
Heiko Schocher9bd64442020-05-25 07:27:26 +0200140 } while (!(ucce & UCCE_GRA));
Dave Liu7737d5c2006-11-03 12:11:15 -0600141
142 uec->grace_stopped_tx = 1;
143
144 return 0;
145}
146
Heiko Schocher9bd64442020-05-25 07:27:26 +0200147static int uec_graceful_stop_rx(struct uec_priv *uec)
Dave Liu7737d5c2006-11-03 12:11:15 -0600148{
149 u32 cecr_subblock;
150 u8 ack;
151
152 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200153 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600154 return -EINVAL;
155 }
156
157 if (!uec->p_rx_glbl_pram) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200158 printf("%s: No init rx global parameter\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600159 return -EINVAL;
160 }
161
162 /* Clear acknowledge bit */
163 ack = uec->p_rx_glbl_pram->rxgstpack;
164 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
165 uec->p_rx_glbl_pram->rxgstpack = ack;
166
167 /* Keep issuing cmd and checking ack bit until it is asserted */
168 do {
169 /* Issue host command */
170 cecr_subblock =
171 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
172 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200173 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600174 ack = uec->p_rx_glbl_pram->rxgstpack;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200175 } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
Dave Liu7737d5c2006-11-03 12:11:15 -0600176
177 uec->grace_stopped_rx = 1;
178
179 return 0;
180}
181
Heiko Schocher9bd64442020-05-25 07:27:26 +0200182static int uec_restart_tx(struct uec_priv *uec)
Dave Liu7737d5c2006-11-03 12:11:15 -0600183{
184 u32 cecr_subblock;
185
186 if (!uec || !uec->uec_info) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200187 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600188 return -EINVAL;
189 }
190
191 cecr_subblock =
192 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
193 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200194 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600195
196 uec->grace_stopped_tx = 0;
197
198 return 0;
199}
200
Heiko Schocher9bd64442020-05-25 07:27:26 +0200201static int uec_restart_rx(struct uec_priv *uec)
Dave Liu7737d5c2006-11-03 12:11:15 -0600202{
203 u32 cecr_subblock;
204
205 if (!uec || !uec->uec_info) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200206 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600207 return -EINVAL;
208 }
209
210 cecr_subblock =
211 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
212 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200213 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600214
215 uec->grace_stopped_rx = 0;
216
217 return 0;
218}
219
Heiko Schocher9bd64442020-05-25 07:27:26 +0200220static int uec_open(struct uec_priv *uec, comm_dir_e mode)
Dave Liu7737d5c2006-11-03 12:11:15 -0600221{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200222 struct ucc_fast_priv *uccf;
Dave Liu7737d5c2006-11-03 12:11:15 -0600223
224 if (!uec || !uec->uccf) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200225 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600226 return -EINVAL;
227 }
228 uccf = uec->uccf;
229
230 /* check if the UCC number is in range. */
231 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200232 printf("%s: ucc_num out of range.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600233 return -EINVAL;
234 }
235
236 /* Enable MAC */
237 uec_mac_enable(uec, mode);
238
239 /* Enable UCC fast */
240 ucc_fast_enable(uccf, mode);
241
242 /* RISC microcode start */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200243 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600244 uec_restart_tx(uec);
Heiko Schocher9bd64442020-05-25 07:27:26 +0200245 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600246 uec_restart_rx(uec);
Dave Liu7737d5c2006-11-03 12:11:15 -0600247
248 return 0;
249}
250
Heiko Schocher9bd64442020-05-25 07:27:26 +0200251static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
Dave Liu7737d5c2006-11-03 12:11:15 -0600252{
Dave Liu7737d5c2006-11-03 12:11:15 -0600253 if (!uec || !uec->uccf) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200254 printf("%s: No handle passed.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600255 return -EINVAL;
256 }
Dave Liu7737d5c2006-11-03 12:11:15 -0600257
258 /* check if the UCC number is in range. */
259 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200260 printf("%s: ucc_num out of range.\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600261 return -EINVAL;
262 }
263 /* Stop any transmissions */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200264 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600265 uec_graceful_stop_tx(uec);
Heiko Schocher9bd64442020-05-25 07:27:26 +0200266
Dave Liu7737d5c2006-11-03 12:11:15 -0600267 /* Stop any receptions */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200268 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600269 uec_graceful_stop_rx(uec);
Dave Liu7737d5c2006-11-03 12:11:15 -0600270
271 /* Disable the UCC fast */
272 ucc_fast_disable(uec->uccf, mode);
273
274 /* Disable the MAC */
275 uec_mac_disable(uec, mode);
276
277 return 0;
278}
279
Heiko Schocher9bd64442020-05-25 07:27:26 +0200280static int uec_set_mac_duplex(struct uec_priv *uec, int duplex)
Dave Liu7737d5c2006-11-03 12:11:15 -0600281{
282 uec_t *uec_regs;
283 u32 maccfg2;
284
285 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200286 printf("%s: uec not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600287 return -EINVAL;
288 }
289 uec_regs = uec->uec_regs;
290
291 if (duplex == DUPLEX_HALF) {
292 maccfg2 = in_be32(&uec_regs->maccfg2);
293 maccfg2 &= ~MACCFG2_FDX;
294 out_be32(&uec_regs->maccfg2, maccfg2);
295 }
296
297 if (duplex == DUPLEX_FULL) {
298 maccfg2 = in_be32(&uec_regs->maccfg2);
299 maccfg2 |= MACCFG2_FDX;
300 out_be32(&uec_regs->maccfg2, maccfg2);
301 }
302
303 return 0;
304}
305
Heiko Schocher9bd64442020-05-25 07:27:26 +0200306static int uec_set_mac_if_mode(struct uec_priv *uec,
307 phy_interface_t if_mode, int speed)
Dave Liu7737d5c2006-11-03 12:11:15 -0600308{
Andy Fleming865ff852011-04-13 00:37:12 -0500309 phy_interface_t enet_if_mode;
Dave Liu7737d5c2006-11-03 12:11:15 -0600310 uec_t *uec_regs;
311 u32 upsmr;
312 u32 maccfg2;
313
314 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200315 printf("%s: uec not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600316 return -EINVAL;
317 }
318
Dave Liu7737d5c2006-11-03 12:11:15 -0600319 uec_regs = uec->uec_regs;
320 enet_if_mode = if_mode;
321
322 maccfg2 = in_be32(&uec_regs->maccfg2);
323 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
324
325 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
326 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
327
Heiko Schocher582c55a2010-01-20 09:04:28 +0100328 switch (speed) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200329 case SPEED_10:
330 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
331 switch (enet_if_mode) {
332 case PHY_INTERFACE_MODE_MII:
Dave Liu7737d5c2006-11-03 12:11:15 -0600333 break;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200334 case PHY_INTERFACE_MODE_RGMII:
335 upsmr |= (UPSMR_RPM | UPSMR_R10M);
Dave Liu7737d5c2006-11-03 12:11:15 -0600336 break;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200337 case PHY_INTERFACE_MODE_RMII:
338 upsmr |= (UPSMR_R10M | UPSMR_RMM);
Haiying Wange8efef72009-06-04 16:12:42 -0400339 break;
Dave Liu7737d5c2006-11-03 12:11:15 -0600340 default:
341 return -EINVAL;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200342 }
343 break;
344 case SPEED_100:
345 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
346 switch (enet_if_mode) {
347 case PHY_INTERFACE_MODE_MII:
Dave Liu7737d5c2006-11-03 12:11:15 -0600348 break;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200349 case PHY_INTERFACE_MODE_RGMII:
350 upsmr |= UPSMR_RPM;
351 break;
352 case PHY_INTERFACE_MODE_RMII:
353 upsmr |= UPSMR_RMM;
354 break;
355 default:
356 return -EINVAL;
357 }
358 break;
359 case SPEED_1000:
360 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
361 switch (enet_if_mode) {
362 case PHY_INTERFACE_MODE_GMII:
363 break;
364 case PHY_INTERFACE_MODE_TBI:
365 upsmr |= UPSMR_TBIM;
366 break;
367 case PHY_INTERFACE_MODE_RTBI:
368 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
369 break;
370 case PHY_INTERFACE_MODE_RGMII_RXID:
371 case PHY_INTERFACE_MODE_RGMII_TXID:
372 case PHY_INTERFACE_MODE_RGMII_ID:
373 case PHY_INTERFACE_MODE_RGMII:
374 upsmr |= UPSMR_RPM;
375 break;
376 case PHY_INTERFACE_MODE_SGMII:
377 upsmr |= UPSMR_SGMM;
378 break;
379 default:
380 return -EINVAL;
381 }
382 break;
383 default:
384 return -EINVAL;
Dave Liu7737d5c2006-11-03 12:11:15 -0600385 }
Heiko Schocher582c55a2010-01-20 09:04:28 +0100386
Dave Liu7737d5c2006-11-03 12:11:15 -0600387 out_be32(&uec_regs->maccfg2, maccfg2);
388 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
389
390 return 0;
391}
392
Andy Flemingda9d4612007-08-14 00:14:25 -0500393static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
Dave Liu7737d5c2006-11-03 12:11:15 -0600394{
395 uint timeout = 0x1000;
396 u32 miimcfg = 0;
397
Andy Flemingda9d4612007-08-14 00:14:25 -0500398 miimcfg = in_be32(&uec_mii_regs->miimcfg);
Dave Liu7737d5c2006-11-03 12:11:15 -0600399 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
Andy Flemingda9d4612007-08-14 00:14:25 -0500400 out_be32(&uec_mii_regs->miimcfg, miimcfg);
Dave Liu7737d5c2006-11-03 12:11:15 -0600401
402 /* Wait until the bus is free */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200403 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--)
404 ;
Dave Liu7737d5c2006-11-03 12:11:15 -0600405 if (timeout <= 0) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200406 printf("%s: The MII Bus is stuck!", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600407 return -ETIMEDOUT;
408 }
409
410 return 0;
411}
412
413static int init_phy(struct eth_device *dev)
414{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200415 struct uec_priv *uec;
Andy Flemingda9d4612007-08-14 00:14:25 -0500416 uec_mii_t *umii_regs;
Dave Liu7737d5c2006-11-03 12:11:15 -0600417 struct uec_mii_info *mii_info;
418 struct phy_info *curphy;
419 int err;
420
Heiko Schocher9bd64442020-05-25 07:27:26 +0200421 uec = (struct uec_priv *)dev->priv;
Andy Flemingda9d4612007-08-14 00:14:25 -0500422 umii_regs = uec->uec_mii_regs;
Dave Liu7737d5c2006-11-03 12:11:15 -0600423
424 uec->oldlink = 0;
425 uec->oldspeed = 0;
426 uec->oldduplex = -1;
427
428 mii_info = malloc(sizeof(*mii_info));
429 if (!mii_info) {
430 printf("%s: Could not allocate mii_info", dev->name);
431 return -ENOMEM;
432 }
433 memset(mii_info, 0, sizeof(*mii_info));
434
Heiko Schocher9bd64442020-05-25 07:27:26 +0200435 if (uec->uec_info->uf_info.eth_type == GIGA_ETH)
Dave Liu24c3aca2006-12-07 21:13:15 +0800436 mii_info->speed = SPEED_1000;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200437 else
Dave Liu24c3aca2006-12-07 21:13:15 +0800438 mii_info->speed = SPEED_100;
Dave Liu24c3aca2006-12-07 21:13:15 +0800439
Dave Liu7737d5c2006-11-03 12:11:15 -0600440 mii_info->duplex = DUPLEX_FULL;
441 mii_info->pause = 0;
442 mii_info->link = 1;
443
444 mii_info->advertising = (ADVERTISED_10baseT_Half |
445 ADVERTISED_10baseT_Full |
446 ADVERTISED_100baseT_Half |
447 ADVERTISED_100baseT_Full |
448 ADVERTISED_1000baseT_Full);
449 mii_info->autoneg = 1;
450 mii_info->mii_id = uec->uec_info->phy_address;
451 mii_info->dev = dev;
452
Andy Flemingda9d4612007-08-14 00:14:25 -0500453 mii_info->mdio_read = &uec_read_phy_reg;
454 mii_info->mdio_write = &uec_write_phy_reg;
Dave Liu7737d5c2006-11-03 12:11:15 -0600455
456 uec->mii_info = mii_info;
457
Kim Phillipsee62ed32008-01-15 14:11:00 -0600458 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
459
Andy Flemingda9d4612007-08-14 00:14:25 -0500460 if (init_mii_management_configuration(umii_regs)) {
Dave Liu7737d5c2006-11-03 12:11:15 -0600461 printf("%s: The MII Bus is stuck!", dev->name);
462 err = -1;
463 goto bus_fail;
464 }
465
466 /* get info for this PHY */
Andy Flemingda9d4612007-08-14 00:14:25 -0500467 curphy = uec_get_phy_info(uec->mii_info);
Dave Liu7737d5c2006-11-03 12:11:15 -0600468 if (!curphy) {
469 printf("%s: No PHY found", dev->name);
470 err = -1;
471 goto no_phy;
472 }
473
474 mii_info->phyinfo = curphy;
475
476 /* Run the commands which initialize the PHY */
477 if (curphy->init) {
478 err = curphy->init(uec->mii_info);
479 if (err)
480 goto phy_init_fail;
481 }
482
483 return 0;
484
485phy_init_fail:
486no_phy:
487bus_fail:
488 free(mii_info);
489 return err;
490}
491
492static void adjust_link(struct eth_device *dev)
493{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200494 struct uec_priv *uec = (struct uec_priv *)dev->priv;
Dave Liu7737d5c2006-11-03 12:11:15 -0600495 struct uec_mii_info *mii_info = uec->mii_info;
496
Dave Liu7737d5c2006-11-03 12:11:15 -0600497 if (mii_info->link) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200498 /*
499 * Now we make sure that we can be in full duplex mode.
500 * If not, we operate in half-duplex mode.
501 */
Dave Liu7737d5c2006-11-03 12:11:15 -0600502 if (mii_info->duplex != uec->oldduplex) {
503 if (!(mii_info->duplex)) {
504 uec_set_mac_duplex(uec, DUPLEX_HALF);
505 printf("%s: Half Duplex\n", dev->name);
506 } else {
507 uec_set_mac_duplex(uec, DUPLEX_FULL);
508 printf("%s: Full Duplex\n", dev->name);
509 }
510 uec->oldduplex = mii_info->duplex;
511 }
512
513 if (mii_info->speed != uec->oldspeed) {
Andy Fleming865ff852011-04-13 00:37:12 -0500514 phy_interface_t mode =
Heiko Schocher582c55a2010-01-20 09:04:28 +0100515 uec->uec_info->enet_interface_type;
Dave Liu24c3aca2006-12-07 21:13:15 +0800516 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
517 switch (mii_info->speed) {
Andy Fleming865ff852011-04-13 00:37:12 -0500518 case SPEED_1000:
Dave Liu7737d5c2006-11-03 12:11:15 -0600519 break;
Andy Fleming865ff852011-04-13 00:37:12 -0500520 case SPEED_100:
Heiko Schocher9bd64442020-05-25 07:27:26 +0200521 printf("switching to rgmii 100\n");
Andy Fleming865ff852011-04-13 00:37:12 -0500522 mode = PHY_INTERFACE_MODE_RGMII;
Dave Liu7737d5c2006-11-03 12:11:15 -0600523 break;
Andy Fleming865ff852011-04-13 00:37:12 -0500524 case SPEED_10:
Heiko Schocher9bd64442020-05-25 07:27:26 +0200525 printf("switching to rgmii 10\n");
Andy Fleming865ff852011-04-13 00:37:12 -0500526 mode = PHY_INTERFACE_MODE_RGMII;
Dave Liu7737d5c2006-11-03 12:11:15 -0600527 break;
528 default:
529 printf("%s: Ack,Speed(%d)is illegal\n",
Heiko Schocher9bd64442020-05-25 07:27:26 +0200530 dev->name, mii_info->speed);
Dave Liu7737d5c2006-11-03 12:11:15 -0600531 break;
Dave Liu24c3aca2006-12-07 21:13:15 +0800532 }
Dave Liu7737d5c2006-11-03 12:11:15 -0600533 }
534
Heiko Schocher582c55a2010-01-20 09:04:28 +0100535 /* change phy */
536 change_phy_interface_mode(dev, mode, mii_info->speed);
537 /* change the MAC interface mode */
538 uec_set_mac_if_mode(uec, mode, mii_info->speed);
539
Dave Liu7737d5c2006-11-03 12:11:15 -0600540 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
541 uec->oldspeed = mii_info->speed;
542 }
543
544 if (!uec->oldlink) {
545 printf("%s: Link is up\n", dev->name);
546 uec->oldlink = 1;
547 }
548
549 } else { /* if (mii_info->link) */
550 if (uec->oldlink) {
551 printf("%s: Link is down\n", dev->name);
552 uec->oldlink = 0;
553 uec->oldspeed = 0;
554 uec->oldduplex = -1;
555 }
556 }
557}
558
559static void phy_change(struct eth_device *dev)
560{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200561 struct uec_priv *uec = (struct uec_priv *)dev->priv;
Dave Liu7737d5c2006-11-03 12:11:15 -0600562
York Sun4167a672016-11-18 11:05:38 -0800563#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wanga52d2f82011-02-11 01:25:30 -0600564 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
565
Heiko Schocher9bd64442020-05-25 07:27:26 +0200566 /* QE9 and QE12 need to be set for enabling QE MII management signals */
Haiying Wanga52d2f82011-02-11 01:25:30 -0600567 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
568 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
569#endif
570
Dave Liu7737d5c2006-11-03 12:11:15 -0600571 /* Update the link, speed, duplex */
Kim Phillipsee62ed32008-01-15 14:11:00 -0600572 uec->mii_info->phyinfo->read_status(uec->mii_info);
Dave Liu7737d5c2006-11-03 12:11:15 -0600573
York Sun4167a672016-11-18 11:05:38 -0800574#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wanga52d2f82011-02-11 01:25:30 -0600575 /*
576 * QE12 is muxed with LBCTL, it needs to be released for enabling
577 * LBCTL signal for LBC usage.
578 */
579 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
580#endif
581
Dave Liu7737d5c2006-11-03 12:11:15 -0600582 /* Adjust the interface according to speed */
Kim Phillipsee62ed32008-01-15 14:11:00 -0600583 adjust_link(dev);
Dave Liu7737d5c2006-11-03 12:11:15 -0600584}
585
Richard Retanubun23c34af2009-06-17 16:00:41 -0400586#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Ben Warrend9d78ee2008-08-07 23:26:35 -0700587
588/*
richardretanubun0115b192008-09-26 08:59:12 -0400589 * Find a device index from the devlist by name
590 *
591 * Returns:
592 * The index where the device is located, -1 on error
593 */
Mike Frysinger5700bb62010-07-27 18:35:08 -0400594static int uec_miiphy_find_dev_by_name(const char *devname)
richardretanubun0115b192008-09-26 08:59:12 -0400595{
596 int i;
597
598 for (i = 0; i < MAXCONTROLLERS; i++) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200599 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0)
richardretanubun0115b192008-09-26 08:59:12 -0400600 break;
richardretanubun0115b192008-09-26 08:59:12 -0400601 }
602
603 /* If device cannot be found, returns -1 */
604 if (i == MAXCONTROLLERS) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200605 debug("%s: device %s not found in devlist\n", __func__,
606 devname);
richardretanubun0115b192008-09-26 08:59:12 -0400607 i = -1;
608 }
609
610 return i;
611}
612
613/*
Ben Warrend9d78ee2008-08-07 23:26:35 -0700614 * Read a MII PHY register.
615 *
616 * Returns:
617 * 0 on success
618 */
Joe Hershberger5a49f172016-08-08 11:28:38 -0500619static int uec_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg)
Ben Warrend9d78ee2008-08-07 23:26:35 -0700620{
Joe Hershberger5a49f172016-08-08 11:28:38 -0500621 unsigned short value = 0;
richardretanubun0115b192008-09-26 08:59:12 -0400622 int devindex = 0;
Ben Warrend9d78ee2008-08-07 23:26:35 -0700623
Heiko Schocher9bd64442020-05-25 07:27:26 +0200624 if (!bus->name) {
625 debug("%s: NULL pointer given\n", __func__);
richardretanubun0115b192008-09-26 08:59:12 -0400626 } else {
Joe Hershberger5a49f172016-08-08 11:28:38 -0500627 devindex = uec_miiphy_find_dev_by_name(bus->name);
Heiko Schocher9bd64442020-05-25 07:27:26 +0200628 if (devindex >= 0)
Joe Hershberger5a49f172016-08-08 11:28:38 -0500629 value = uec_read_phy_reg(devlist[devindex], addr, reg);
richardretanubun0115b192008-09-26 08:59:12 -0400630 }
Joe Hershberger5a49f172016-08-08 11:28:38 -0500631 return value;
Ben Warrend9d78ee2008-08-07 23:26:35 -0700632}
633
634/*
635 * Write a MII PHY register.
636 *
637 * Returns:
638 * 0 on success
639 */
Joe Hershberger5a49f172016-08-08 11:28:38 -0500640static int uec_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
641 u16 value)
Ben Warrend9d78ee2008-08-07 23:26:35 -0700642{
richardretanubun0115b192008-09-26 08:59:12 -0400643 int devindex = 0;
Ben Warrend9d78ee2008-08-07 23:26:35 -0700644
Heiko Schocher9bd64442020-05-25 07:27:26 +0200645 if (!bus->name) {
646 debug("%s: NULL pointer given\n", __func__);
richardretanubun0115b192008-09-26 08:59:12 -0400647 } else {
Joe Hershberger5a49f172016-08-08 11:28:38 -0500648 devindex = uec_miiphy_find_dev_by_name(bus->name);
Heiko Schocher9bd64442020-05-25 07:27:26 +0200649 if (devindex >= 0)
richardretanubun0115b192008-09-26 08:59:12 -0400650 uec_write_phy_reg(devlist[devindex], addr, reg, value);
richardretanubun0115b192008-09-26 08:59:12 -0400651 }
Ben Warrend9d78ee2008-08-07 23:26:35 -0700652 return 0;
653}
Ben Warrend9d78ee2008-08-07 23:26:35 -0700654#endif
655
Heiko Schocher9bd64442020-05-25 07:27:26 +0200656static int uec_set_mac_address(struct uec_priv *uec, u8 *mac_addr)
Dave Liu7737d5c2006-11-03 12:11:15 -0600657{
658 uec_t *uec_regs;
659 u32 mac_addr1;
660 u32 mac_addr2;
661
662 if (!uec) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200663 printf("%s: uec not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600664 return -EINVAL;
665 }
666
667 uec_regs = uec->uec_regs;
668
Heiko Schocher9bd64442020-05-25 07:27:26 +0200669 /*
670 * if a station address of 0x12345678ABCD, perform a write to
671 * MACSTNADDR1 of 0xCDAB7856,
672 * MACSTNADDR2 of 0x34120000
673 */
Dave Liu7737d5c2006-11-03 12:11:15 -0600674
Heiko Schocher9bd64442020-05-25 07:27:26 +0200675 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) |
Dave Liu7737d5c2006-11-03 12:11:15 -0600676 (mac_addr[3] << 8) | (mac_addr[2]);
677 out_be32(&uec_regs->macstnaddr1, mac_addr1);
678
679 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
680 out_be32(&uec_regs->macstnaddr2, mac_addr2);
681
682 return 0;
683}
684
Heiko Schocher9bd64442020-05-25 07:27:26 +0200685static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
686 int *threads_num_ret)
Dave Liu7737d5c2006-11-03 12:11:15 -0600687{
688 int num_threads_numerica;
689
690 switch (threads_num) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200691 case UEC_NUM_OF_THREADS_1:
692 num_threads_numerica = 1;
693 break;
694 case UEC_NUM_OF_THREADS_2:
695 num_threads_numerica = 2;
696 break;
697 case UEC_NUM_OF_THREADS_4:
698 num_threads_numerica = 4;
699 break;
700 case UEC_NUM_OF_THREADS_6:
701 num_threads_numerica = 6;
702 break;
703 case UEC_NUM_OF_THREADS_8:
704 num_threads_numerica = 8;
705 break;
706 default:
707 printf("%s: Bad number of threads value.",
708 __func__);
709 return -EINVAL;
Dave Liu7737d5c2006-11-03 12:11:15 -0600710 }
711
712 *threads_num_ret = num_threads_numerica;
713
714 return 0;
715}
716
Heiko Schocher9bd64442020-05-25 07:27:26 +0200717static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600718{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200719 struct uec_inf *uec_info;
Dave Liu7737d5c2006-11-03 12:11:15 -0600720 u32 end_bd;
721 u8 bmrx = 0;
722 int i;
723
724 uec_info = uec->uec_info;
725
726 /* Alloc global Tx parameter RAM page */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200727 uec->tx_glbl_pram_offset =
728 qe_muram_alloc(sizeof(struct uec_tx_global_pram),
729 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
730 uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600731 qe_muram_addr(uec->tx_glbl_pram_offset);
732
733 /* Zero the global Tx prameter RAM */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200734 memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
Dave Liu7737d5c2006-11-03 12:11:15 -0600735
736 /* Init global Tx parameter RAM */
737
738 /* TEMODER, RMON statistics disable, one Tx queue */
739 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
740
741 /* SQPTR */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200742 uec->send_q_mem_reg_offset =
743 qe_muram_alloc(sizeof(struct uec_send_queue_qd),
744 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
745 uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600746 qe_muram_addr(uec->send_q_mem_reg_offset);
747 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
748
749 /* Setup the table with TxBDs ring */
750 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
751 * SIZEOFBD;
752 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200753 (u32)(uec->p_tx_bd_ring));
Dave Liu7737d5c2006-11-03 12:11:15 -0600754 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
Heiko Schocher9bd64442020-05-25 07:27:26 +0200755 end_bd);
Dave Liu7737d5c2006-11-03 12:11:15 -0600756
757 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
758 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
759
760 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
761 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
762
763 /* TSTATE, global snooping, big endian, the CSB bus selected */
764 bmrx = BMR_INIT_VALUE;
765 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
766
767 /* IPH_Offset */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200768 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
Dave Liu7737d5c2006-11-03 12:11:15 -0600769 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600770
771 /* VTAG table */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200772 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
Dave Liu7737d5c2006-11-03 12:11:15 -0600773 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600774
775 /* TQPTR */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200776 uec->thread_dat_tx_offset =
777 qe_muram_alloc(num_threads_tx *
778 sizeof(struct uec_thread_data_tx) +
779 32 * (num_threads_tx == 1),
780 UEC_THREAD_DATA_ALIGNMENT);
Dave Liu7737d5c2006-11-03 12:11:15 -0600781
Heiko Schocher9bd64442020-05-25 07:27:26 +0200782 uec->p_thread_data_tx = (struct uec_thread_data_tx *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600783 qe_muram_addr(uec->thread_dat_tx_offset);
784 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
785}
786
Heiko Schocher9bd64442020-05-25 07:27:26 +0200787static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600788{
789 u8 bmrx = 0;
790 int i;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200791 struct uec_82xx_add_filtering_pram *p_af_pram;
Dave Liu7737d5c2006-11-03 12:11:15 -0600792
793 /* Allocate global Rx parameter RAM page */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200794 uec->rx_glbl_pram_offset =
795 qe_muram_alloc(sizeof(struct uec_rx_global_pram),
796 UEC_RX_GLOBAL_PRAM_ALIGNMENT);
797 uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600798 qe_muram_addr(uec->rx_glbl_pram_offset);
799
800 /* Zero Global Rx parameter RAM */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200801 memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
Dave Liu7737d5c2006-11-03 12:11:15 -0600802
803 /* Init global Rx parameter RAM */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200804 /*
805 * REMODER, Extended feature mode disable, VLAN disable,
806 * LossLess flow control disable, Receive firmware statisic disable,
807 * Extended address parsing mode disable, One Rx queues,
808 * Dynamic maximum/minimum frame length disable, IP checksum check
809 * disable, IP address alignment disable
810 */
Dave Liu7737d5c2006-11-03 12:11:15 -0600811 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
812
813 /* RQPTR */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200814 uec->thread_dat_rx_offset =
815 qe_muram_alloc(num_threads_rx *
816 sizeof(struct uec_thread_data_rx),
817 UEC_THREAD_DATA_ALIGNMENT);
818 uec->p_thread_data_rx = (struct uec_thread_data_rx *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600819 qe_muram_addr(uec->thread_dat_rx_offset);
820 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
821
822 /* Type_or_Len */
823 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
824
825 /* RxRMON base pointer, we don't need it */
826 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
827
828 /* IntCoalescingPTR, we don't need it, no interrupt */
829 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
830
831 /* RSTATE, global snooping, big endian, the CSB bus selected */
832 bmrx = BMR_INIT_VALUE;
833 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
834
835 /* MRBLR */
836 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
837
838 /* RBDQPTR */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200839 uec->rx_bd_qs_tbl_offset =
840 qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
841 sizeof(struct uec_rx_pref_bds),
842 UEC_RX_BD_QUEUES_ALIGNMENT);
843 uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600844 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
845
846 /* Zero it */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200847 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
848 sizeof(struct uec_rx_pref_bds));
Dave Liu7737d5c2006-11-03 12:11:15 -0600849 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
850 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
851 (u32)uec->p_rx_bd_ring);
852
853 /* MFLR */
854 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
855 /* MINFLR */
856 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
857 /* MAXD1 */
858 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
859 /* MAXD2 */
860 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
861 /* ECAM_PTR */
862 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
863 /* L2QT */
864 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
865 /* L3QT */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200866 for (i = 0; i < 8; i++)
Dave Liu7737d5c2006-11-03 12:11:15 -0600867 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
Dave Liu7737d5c2006-11-03 12:11:15 -0600868
869 /* VLAN_TYPE */
870 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
871 /* TCI */
872 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
873
874 /* Clear PQ2 style address filtering hash table */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200875 p_af_pram = (struct uec_82xx_add_filtering_pram *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600876 uec->p_rx_glbl_pram->addressfiltering;
877
878 p_af_pram->iaddr_h = 0;
879 p_af_pram->iaddr_l = 0;
880 p_af_pram->gaddr_h = 0;
881 p_af_pram->gaddr_l = 0;
882}
883
Heiko Schocher9bd64442020-05-25 07:27:26 +0200884static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
885 int thread_tx, int thread_rx)
Dave Liu7737d5c2006-11-03 12:11:15 -0600886{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200887 struct uec_init_cmd_pram *p_init_enet_param;
Dave Liu7737d5c2006-11-03 12:11:15 -0600888 u32 init_enet_param_offset;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200889 struct uec_inf *uec_info;
890 struct ucc_fast_inf *uf_info;
Dave Liu7737d5c2006-11-03 12:11:15 -0600891 int i;
892 int snum;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200893 u32 off;
Dave Liu7737d5c2006-11-03 12:11:15 -0600894 u32 entry_val;
895 u32 command;
896 u32 cecr_subblock;
897
898 uec_info = uec->uec_info;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200899 uf_info = &uec_info->uf_info;
Dave Liu7737d5c2006-11-03 12:11:15 -0600900
901 /* Allocate init enet command parameter */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200902 uec->init_enet_param_offset =
903 qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
Dave Liu7737d5c2006-11-03 12:11:15 -0600904 init_enet_param_offset = uec->init_enet_param_offset;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200905 uec->p_init_enet_param = (struct uec_init_cmd_pram *)
Dave Liu7737d5c2006-11-03 12:11:15 -0600906 qe_muram_addr(uec->init_enet_param_offset);
907
908 /* Zero init enet command struct */
Heiko Schocher9bd64442020-05-25 07:27:26 +0200909 memset((void *)uec->p_init_enet_param, 0,
910 sizeof(struct uec_init_cmd_pram));
Dave Liu7737d5c2006-11-03 12:11:15 -0600911
912 /* Init the command struct */
913 p_init_enet_param = uec->p_init_enet_param;
914 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
915 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
916 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
917 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
918 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
919 p_init_enet_param->largestexternallookupkeysize = 0;
920
921 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
922 << ENET_INIT_PARAM_RGF_SHIFT;
923 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
924 << ENET_INIT_PARAM_TGF_SHIFT;
925
926 /* Init Rx global parameter pointer */
927 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
Haiying Wang52d6ad52009-05-21 15:32:13 -0400928 (u32)uec_info->risc_rx;
Dave Liu7737d5c2006-11-03 12:11:15 -0600929
930 /* Init Rx threads */
931 for (i = 0; i < (thread_rx + 1); i++) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200932 snum = qe_get_snum();
933 if (snum < 0) {
934 printf("%s can not get snum\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600935 return -ENOMEM;
936 }
937
Heiko Schocher9bd64442020-05-25 07:27:26 +0200938 if (i == 0) {
939 off = 0;
Dave Liu7737d5c2006-11-03 12:11:15 -0600940 } else {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200941 off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
942 UEC_THREAD_RX_PRAM_ALIGNMENT);
Dave Liu7737d5c2006-11-03 12:11:15 -0600943 }
944
945 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
Heiko Schocher9bd64442020-05-25 07:27:26 +0200946 off | (u32)uec_info->risc_rx;
Dave Liu7737d5c2006-11-03 12:11:15 -0600947 p_init_enet_param->rxthread[i] = entry_val;
948 }
949
950 /* Init Tx global parameter pointer */
951 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
Haiying Wang52d6ad52009-05-21 15:32:13 -0400952 (u32)uec_info->risc_tx;
Dave Liu7737d5c2006-11-03 12:11:15 -0600953
954 /* Init Tx threads */
955 for (i = 0; i < thread_tx; i++) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200956 snum = qe_get_snum();
957 if (snum < 0) {
958 printf("%s can not get snum\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600959 return -ENOMEM;
960 }
961
Heiko Schocher9bd64442020-05-25 07:27:26 +0200962 off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
963 UEC_THREAD_TX_PRAM_ALIGNMENT);
Dave Liu7737d5c2006-11-03 12:11:15 -0600964
965 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
Heiko Schocher9bd64442020-05-25 07:27:26 +0200966 off | (u32)uec_info->risc_tx;
Dave Liu7737d5c2006-11-03 12:11:15 -0600967 p_init_enet_param->txthread[i] = entry_val;
968 }
969
970 __asm__ __volatile__("sync");
971
972 /* Issue QE command */
973 command = QE_INIT_TX_RX;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200974 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
975 qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
976 init_enet_param_offset);
Dave Liu7737d5c2006-11-03 12:11:15 -0600977
978 return 0;
979}
980
Heiko Schocher9bd64442020-05-25 07:27:26 +0200981static int uec_startup(struct uec_priv *uec)
Dave Liu7737d5c2006-11-03 12:11:15 -0600982{
Heiko Schocher9bd64442020-05-25 07:27:26 +0200983 struct uec_inf *uec_info;
984 struct ucc_fast_inf *uf_info;
985 struct ucc_fast_priv *uccf;
Dave Liu7737d5c2006-11-03 12:11:15 -0600986 ucc_fast_t *uf_regs;
987 uec_t *uec_regs;
988 int num_threads_tx;
989 int num_threads_rx;
990 u32 utbipar;
Dave Liu7737d5c2006-11-03 12:11:15 -0600991 u32 length;
992 u32 align;
Heiko Schocher9bd64442020-05-25 07:27:26 +0200993 struct buffer_descriptor *bd;
Dave Liu7737d5c2006-11-03 12:11:15 -0600994 u8 *buf;
995 int i;
996
997 if (!uec || !uec->uec_info) {
Heiko Schocher9bd64442020-05-25 07:27:26 +0200998 printf("%s: uec or uec_info not initial\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -0600999 return -EINVAL;
1000 }
1001
1002 uec_info = uec->uec_info;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001003 uf_info = &uec_info->uf_info;
Dave Liu7737d5c2006-11-03 12:11:15 -06001004
1005 /* Check if Rx BD ring len is illegal */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001006 if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
1007 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001008 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
Heiko Schocher9bd64442020-05-25 07:27:26 +02001009 __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -06001010 return -EINVAL;
1011 }
1012
1013 /* Check if Tx BD ring len is illegal */
1014 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1015 printf("%s: Tx BD ring length must not be smaller than 2.\n",
Heiko Schocher9bd64442020-05-25 07:27:26 +02001016 __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -06001017 return -EINVAL;
1018 }
1019
1020 /* Check if MRBLR is illegal */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001021 if (MAX_RXBUF_LEN == 0 || MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001022 printf("%s: max rx buffer length must be mutliple of 128.\n",
Heiko Schocher9bd64442020-05-25 07:27:26 +02001023 __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -06001024 return -EINVAL;
1025 }
1026
1027 /* Both Rx and Tx are stopped */
1028 uec->grace_stopped_rx = 1;
1029 uec->grace_stopped_tx = 1;
1030
1031 /* Init UCC fast */
1032 if (ucc_fast_init(uf_info, &uccf)) {
Heiko Schocher9bd64442020-05-25 07:27:26 +02001033 printf("%s: failed to init ucc fast\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -06001034 return -ENOMEM;
1035 }
1036
1037 /* Save uccf */
1038 uec->uccf = uccf;
1039
1040 /* Convert the Tx threads number */
1041 if (uec_convert_threads_num(uec_info->num_threads_tx,
Heiko Schocher9bd64442020-05-25 07:27:26 +02001042 &num_threads_tx)) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001043 return -EINVAL;
1044 }
1045
1046 /* Convert the Rx threads number */
1047 if (uec_convert_threads_num(uec_info->num_threads_rx,
Heiko Schocher9bd64442020-05-25 07:27:26 +02001048 &num_threads_rx)) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001049 return -EINVAL;
1050 }
1051
1052 uf_regs = uccf->uf_regs;
1053
1054 /* UEC register is following UCC fast registers */
1055 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1056
1057 /* Save the UEC register pointer to UEC private struct */
1058 uec->uec_regs = uec_regs;
1059
1060 /* Init UPSMR, enable hardware statistics (UCC) */
1061 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1062
1063 /* Init MACCFG1, flow control disable, disable Tx and Rx */
1064 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1065
1066 /* Init MACCFG2, length check, MAC PAD and CRC enable */
1067 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1068
1069 /* Setup MAC interface mode */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001070 uec_set_mac_if_mode(uec, uec_info->enet_interface_type,
1071 uec_info->speed);
Dave Liu7737d5c2006-11-03 12:11:15 -06001072
Andy Flemingda9d4612007-08-14 00:14:25 -05001073 /* Setup MII management base */
1074#ifndef CONFIG_eTSEC_MDIO_BUS
1075 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1076#else
Heiko Schocher9bd64442020-05-25 07:27:26 +02001077 uec->uec_mii_regs = (uec_mii_t *)CONFIG_MIIM_ADDRESS;
Andy Flemingda9d4612007-08-14 00:14:25 -05001078#endif
1079
Dave Liu7737d5c2006-11-03 12:11:15 -06001080 /* Setup MII master clock source */
1081 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1082
1083 /* Setup UTBIPAR */
1084 utbipar = in_be32(&uec_regs->utbipar);
1085 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
Dave Liu7737d5c2006-11-03 12:11:15 -06001086
Richard Retanubun1a951932009-07-01 14:03:15 -04001087 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
1088 * This frees up the remaining SMI addresses for use.
1089 */
1090 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
Dave Liu7737d5c2006-11-03 12:11:15 -06001091 out_be32(&uec_regs->utbipar, utbipar);
1092
Haiying Wange8efef72009-06-04 16:12:42 -04001093 /* Configure the TBI for SGMII operation */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001094 if (uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII &&
1095 uec->uec_info->speed == SPEED_1000) {
Haiying Wange8efef72009-06-04 16:12:42 -04001096 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocher9bd64442020-05-25 07:27:26 +02001097 ENET_TBI_MII_ANA, TBIANA_SETTINGS);
Haiying Wange8efef72009-06-04 16:12:42 -04001098
1099 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocher9bd64442020-05-25 07:27:26 +02001100 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
Haiying Wange8efef72009-06-04 16:12:42 -04001101
1102 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocher9bd64442020-05-25 07:27:26 +02001103 ENET_TBI_MII_CR, TBICR_SETTINGS);
Haiying Wange8efef72009-06-04 16:12:42 -04001104 }
1105
Dave Liu7737d5c2006-11-03 12:11:15 -06001106 /* Allocate Tx BDs */
1107 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1108 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1110 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1111 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1112 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1113 }
1114
1115 align = UEC_TX_BD_RING_ALIGNMENT;
1116 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1117 if (uec->tx_bd_ring_offset != 0) {
1118 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1119 & ~(align - 1));
1120 }
1121
1122 /* Zero all of Tx BDs */
1123 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1124
1125 /* Allocate Rx BDs */
1126 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1127 align = UEC_RX_BD_RING_ALIGNMENT;
1128 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1129 if (uec->rx_bd_ring_offset != 0) {
1130 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1131 & ~(align - 1));
1132 }
1133
1134 /* Zero all of Rx BDs */
1135 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1136
1137 /* Allocate Rx buffer */
1138 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1139 align = UEC_RX_DATA_BUF_ALIGNMENT;
1140 uec->rx_buf_offset = (u32)malloc(length + align);
1141 if (uec->rx_buf_offset != 0) {
1142 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1143 & ~(align - 1));
1144 }
1145
1146 /* Zero all of the Rx buffer */
1147 memset((void *)(uec->rx_buf_offset), 0, length + align);
1148
1149 /* Init TxBD ring */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001150 bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
1151 uec->tx_bd = bd;
Dave Liu7737d5c2006-11-03 12:11:15 -06001152
1153 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1154 BD_DATA_CLEAR(bd);
1155 BD_STATUS_SET(bd, 0);
1156 BD_LENGTH_SET(bd, 0);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001157 bd++;
Dave Liu7737d5c2006-11-03 12:11:15 -06001158 }
Heiko Schocher9bd64442020-05-25 07:27:26 +02001159 BD_STATUS_SET((--bd), TX_BD_WRAP);
Dave Liu7737d5c2006-11-03 12:11:15 -06001160
1161 /* Init RxBD ring */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001162 bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
1163 uec->rx_bd = bd;
Dave Liu7737d5c2006-11-03 12:11:15 -06001164 buf = uec->p_rx_buf;
1165 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1166 BD_DATA_SET(bd, buf);
1167 BD_LENGTH_SET(bd, 0);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001168 BD_STATUS_SET(bd, RX_BD_EMPTY);
Dave Liu7737d5c2006-11-03 12:11:15 -06001169 buf += MAX_RXBUF_LEN;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001170 bd++;
Dave Liu7737d5c2006-11-03 12:11:15 -06001171 }
Heiko Schocher9bd64442020-05-25 07:27:26 +02001172 BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
Dave Liu7737d5c2006-11-03 12:11:15 -06001173
1174 /* Init global Tx parameter RAM */
1175 uec_init_tx_parameter(uec, num_threads_tx);
1176
1177 /* Init global Rx parameter RAM */
1178 uec_init_rx_parameter(uec, num_threads_rx);
1179
1180 /* Init ethernet Tx and Rx parameter command */
1181 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1182 num_threads_rx)) {
Heiko Schocher9bd64442020-05-25 07:27:26 +02001183 printf("%s issue init enet cmd failed\n", __func__);
Dave Liu7737d5c2006-11-03 12:11:15 -06001184 return -ENOMEM;
1185 }
1186
1187 return 0;
1188}
1189
Heiko Schocher9bd64442020-05-25 07:27:26 +02001190static int uec_init(struct eth_device *dev, struct bd_info *bd)
Dave Liu7737d5c2006-11-03 12:11:15 -06001191{
Heiko Schocher9bd64442020-05-25 07:27:26 +02001192 struct uec_priv *uec;
Kim Phillipsee62ed32008-01-15 14:11:00 -06001193 int err, i;
1194 struct phy_info *curphy;
York Sun4167a672016-11-18 11:05:38 -08001195#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wanga52d2f82011-02-11 01:25:30 -06001196 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1197#endif
Dave Liu7737d5c2006-11-03 12:11:15 -06001198
Heiko Schocher9bd64442020-05-25 07:27:26 +02001199 uec = (struct uec_priv *)dev->priv;
Dave Liu7737d5c2006-11-03 12:11:15 -06001200
Heiko Schocher9bd64442020-05-25 07:27:26 +02001201 if (!uec->the_first_run) {
York Sun4167a672016-11-18 11:05:38 -08001202#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Heiko Schocher9bd64442020-05-25 07:27:26 +02001203 /*
1204 * QE9 and QE12 need to be set for enabling QE MII
1205 * management signals
1206 */
1207 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1208 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
Haiying Wanga52d2f82011-02-11 01:25:30 -06001209#endif
1210
Kim Phillipsee62ed32008-01-15 14:11:00 -06001211 err = init_phy(dev);
1212 if (err) {
1213 printf("%s: Cannot initialize PHY, aborting.\n",
1214 dev->name);
1215 return err;
Dave Liu7737d5c2006-11-03 12:11:15 -06001216 }
Kim Phillipsee62ed32008-01-15 14:11:00 -06001217
1218 curphy = uec->mii_info->phyinfo;
1219
1220 if (curphy->config_aneg) {
1221 err = curphy->config_aneg(uec->mii_info);
1222 if (err) {
1223 printf("%s: Can't negotiate PHY\n", dev->name);
1224 return err;
1225 }
1226 }
1227
1228 /* Give PHYs up to 5 sec to report a link */
1229 i = 50;
1230 do {
1231 err = curphy->read_status(uec->mii_info);
Joakim Tjernlundbd6c25a2010-08-11 11:44:21 +02001232 if (!(((i-- > 0) && !uec->mii_info->link) || err))
1233 break;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001234 mdelay(100);
Joakim Tjernlundbd6c25a2010-08-11 11:44:21 +02001235 } while (1);
Kim Phillipsee62ed32008-01-15 14:11:00 -06001236
York Sun4167a672016-11-18 11:05:38 -08001237#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wanga52d2f82011-02-11 01:25:30 -06001238 /* QE12 needs to be released for enabling LBCTL signal*/
1239 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1240#endif
1241
Kim Phillipsee62ed32008-01-15 14:11:00 -06001242 if (err || i <= 0)
1243 printf("warning: %s: timeout on PHY link\n", dev->name);
1244
Heiko Schocher582c55a2010-01-20 09:04:28 +01001245 adjust_link(dev);
Dave Liu7737d5c2006-11-03 12:11:15 -06001246 uec->the_first_run = 1;
1247 }
1248
Kim Phillipsee62ed32008-01-15 14:11:00 -06001249 /* Set up the MAC address */
1250 if (dev->enetaddr[0] & 0x01) {
1251 printf("%s: MacAddress is multcast address\n",
Heiko Schocher9bd64442020-05-25 07:27:26 +02001252 __func__);
Kim Phillipsee62ed32008-01-15 14:11:00 -06001253 return -1;
1254 }
1255 uec_set_mac_address(uec, dev->enetaddr);
1256
Dave Liu7737d5c2006-11-03 12:11:15 -06001257 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1258 if (err) {
1259 printf("%s: cannot enable UEC device\n", dev->name);
Ben Warren422b1a02008-01-09 18:15:53 -05001260 return -1;
Dave Liu7737d5c2006-11-03 12:11:15 -06001261 }
1262
Kim Phillipsee62ed32008-01-15 14:11:00 -06001263 phy_change(dev);
1264
Heiko Schocher9bd64442020-05-25 07:27:26 +02001265 return uec->mii_info->link ? 0 : -1;
Dave Liu7737d5c2006-11-03 12:11:15 -06001266}
1267
Heiko Schocher9bd64442020-05-25 07:27:26 +02001268static void uec_halt(struct eth_device *dev)
Dave Liu7737d5c2006-11-03 12:11:15 -06001269{
Heiko Schocher9bd64442020-05-25 07:27:26 +02001270 struct uec_priv *uec = (struct uec_priv *)dev->priv;
1271
Dave Liu7737d5c2006-11-03 12:11:15 -06001272 uec_stop(uec, COMM_DIR_RX_AND_TX);
1273}
1274
Joe Hershberger7ae84d52012-05-22 07:56:21 +00001275static int uec_send(struct eth_device *dev, void *buf, int len)
Dave Liu7737d5c2006-11-03 12:11:15 -06001276{
Heiko Schocher9bd64442020-05-25 07:27:26 +02001277 struct uec_priv *uec;
1278 struct ucc_fast_priv *uccf;
1279 struct buffer_descriptor *bd;
Dave Liuddd02492006-12-06 11:38:17 +08001280 u16 status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001281 int i;
1282 int result = 0;
1283
Heiko Schocher9bd64442020-05-25 07:27:26 +02001284 uec = (struct uec_priv *)dev->priv;
Dave Liu7737d5c2006-11-03 12:11:15 -06001285 uccf = uec->uccf;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001286 bd = uec->tx_bd;
Dave Liu7737d5c2006-11-03 12:11:15 -06001287
1288 /* Find an empty TxBD */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001289 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001290 if (i > 0x100000) {
1291 printf("%s: tx buffer not ready\n", dev->name);
1292 return result;
1293 }
1294 }
1295
1296 /* Init TxBD */
1297 BD_DATA_SET(bd, buf);
1298 BD_LENGTH_SET(bd, len);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001299 status = BD_STATUS(bd);
Dave Liu7737d5c2006-11-03 12:11:15 -06001300 status &= BD_WRAP;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001301 status |= (TX_BD_READY | TX_BD_LAST);
Dave Liu7737d5c2006-11-03 12:11:15 -06001302 BD_STATUS_SET(bd, status);
1303
1304 /* Tell UCC to transmit the buffer */
1305 ucc_fast_transmit_on_demand(uccf);
1306
1307 /* Wait for buffer to be transmitted */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001308 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001309 if (i > 0x100000) {
1310 printf("%s: tx error\n", dev->name);
1311 return result;
1312 }
Dave Liu7737d5c2006-11-03 12:11:15 -06001313 }
1314
1315 /* Ok, the buffer be transimitted */
1316 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001317 uec->tx_bd = bd;
Dave Liu7737d5c2006-11-03 12:11:15 -06001318 result = 1;
1319
1320 return result;
1321}
1322
Heiko Schocher9bd64442020-05-25 07:27:26 +02001323static int uec_recv(struct eth_device *dev)
Dave Liu7737d5c2006-11-03 12:11:15 -06001324{
Heiko Schocher9bd64442020-05-25 07:27:26 +02001325 struct uec_priv *uec = dev->priv;
1326 struct buffer_descriptor *bd;
Dave Liuddd02492006-12-06 11:38:17 +08001327 u16 status;
Dave Liu7737d5c2006-11-03 12:11:15 -06001328 u16 len;
1329 u8 *data;
1330
Heiko Schocher9bd64442020-05-25 07:27:26 +02001331 bd = uec->rx_bd;
1332 status = BD_STATUS(bd);
Dave Liu7737d5c2006-11-03 12:11:15 -06001333
Heiko Schocher9bd64442020-05-25 07:27:26 +02001334 while (!(status & RX_BD_EMPTY)) {
1335 if (!(status & RX_BD_ERROR)) {
Dave Liu7737d5c2006-11-03 12:11:15 -06001336 data = BD_DATA(bd);
1337 len = BD_LENGTH(bd);
Joe Hershberger1fd92db2015-04-08 01:41:06 -05001338 net_process_received_packet(data, len);
Dave Liu7737d5c2006-11-03 12:11:15 -06001339 } else {
1340 printf("%s: Rx error\n", dev->name);
1341 }
1342 status &= BD_CLEAN;
1343 BD_LENGTH_SET(bd, 0);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001344 BD_STATUS_SET(bd, status | RX_BD_EMPTY);
Dave Liu7737d5c2006-11-03 12:11:15 -06001345 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
Heiko Schocher9bd64442020-05-25 07:27:26 +02001346 status = BD_STATUS(bd);
Dave Liu7737d5c2006-11-03 12:11:15 -06001347 }
Heiko Schocher9bd64442020-05-25 07:27:26 +02001348 uec->rx_bd = bd;
Dave Liu7737d5c2006-11-03 12:11:15 -06001349
1350 return 1;
1351}
1352
Heiko Schocher9bd64442020-05-25 07:27:26 +02001353int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info)
Dave Liu7737d5c2006-11-03 12:11:15 -06001354{
1355 struct eth_device *dev;
1356 int i;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001357 struct uec_priv *uec;
Dave Liu7737d5c2006-11-03 12:11:15 -06001358 int err;
1359
1360 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1361 if (!dev)
1362 return 0;
1363 memset(dev, 0, sizeof(struct eth_device));
1364
1365 /* Allocate the UEC private struct */
Heiko Schocher9bd64442020-05-25 07:27:26 +02001366 uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
1367 if (!uec)
Dave Liu7737d5c2006-11-03 12:11:15 -06001368 return -ENOMEM;
Heiko Schocher9bd64442020-05-25 07:27:26 +02001369
1370 memset(uec, 0, sizeof(struct uec_priv));
Dave Liu7737d5c2006-11-03 12:11:15 -06001371
Haiying Wang8e552582009-06-04 16:12:41 -04001372 /* Adjust uec_info */
1373#if (MAX_QE_RISC == 4)
1374 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1375 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
Dave Liu7737d5c2006-11-03 12:11:15 -06001376#endif
Dave Liu7737d5c2006-11-03 12:11:15 -06001377
Haiying Wang8e552582009-06-04 16:12:41 -04001378 devlist[uec_info->uf_info.ucc_num] = dev;
David Saadad5d28fe2008-03-31 02:37:38 -07001379
Dave Liu7737d5c2006-11-03 12:11:15 -06001380 uec->uec_info = uec_info;
Haiying Wange8efef72009-06-04 16:12:42 -04001381 uec->dev = dev;
Dave Liu7737d5c2006-11-03 12:11:15 -06001382
Kim Phillips78b7a8e2010-07-26 18:34:57 -05001383 sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
Dave Liu7737d5c2006-11-03 12:11:15 -06001384 dev->iobase = 0;
1385 dev->priv = (void *)uec;
1386 dev->init = uec_init;
1387 dev->halt = uec_halt;
1388 dev->send = uec_send;
1389 dev->recv = uec_recv;
1390
1391 /* Clear the ethnet address */
1392 for (i = 0; i < 6; i++)
1393 dev->enetaddr[i] = 0;
1394
1395 eth_register(dev);
1396
1397 err = uec_startup(uec);
1398 if (err) {
Heiko Schocher9bd64442020-05-25 07:27:26 +02001399 printf("%s: Cannot configure net device, aborting.", dev->name);
Dave Liu7737d5c2006-11-03 12:11:15 -06001400 return err;
1401 }
1402
Richard Retanubun23c34af2009-06-17 16:00:41 -04001403#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Joe Hershberger5a49f172016-08-08 11:28:38 -05001404 int retval;
1405 struct mii_dev *mdiodev = mdio_alloc();
Heiko Schocher9bd64442020-05-25 07:27:26 +02001406
Joe Hershberger5a49f172016-08-08 11:28:38 -05001407 if (!mdiodev)
1408 return -ENOMEM;
1409 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1410 mdiodev->read = uec_miiphy_read;
1411 mdiodev->write = uec_miiphy_write;
1412
1413 retval = mdio_register(mdiodev);
1414 if (retval < 0)
1415 return retval;
David Saadad5d28fe2008-03-31 02:37:38 -07001416#endif
1417
Dave Liu7737d5c2006-11-03 12:11:15 -06001418 return 1;
1419}
Haiying Wang8e552582009-06-04 16:12:41 -04001420
Heiko Schocher9bd64442020-05-25 07:27:26 +02001421int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num)
Haiying Wang8e552582009-06-04 16:12:41 -04001422{
1423 int i;
1424
1425 for (i = 0; i < num; i++)
1426 uec_initialize(bis, &uecs[i]);
1427
1428 return 0;
1429}
1430
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +09001431int uec_standard_init(struct bd_info *bis)
Haiying Wang8e552582009-06-04 16:12:41 -04001432{
1433 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
1434}