blob: e0dc180e3c1f10ef90d8407c937ca75f219777f5 [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Weijie Gao23f17162018-12-20 16:12:53 +080013#include <malloc.h>
14#include <miiphy.h>
Simon Glass90526e92020-05-10 11:39:56 -060015#include <net.h>
Weijie Gao23f17162018-12-20 16:12:53 +080016#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060020#include <asm/cache.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <asm/gpio.h>
22#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Simon Glassc05ed002020-05-10 11:40:11 -060024#include <linux/delay.h>
Weijie Gao23f17162018-12-20 16:12:53 +080025#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
29
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC 24
33#define NUM_RX_DESC 24
34#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
Landen Chao532de8d2020-02-18 16:49:37 +080038#define MT753X_NUM_PHYS 5
39#define MT753X_NUM_PORTS 7
40#define MT753X_DFL_SMI_ADDR 31
41#define MT753X_SMI_ADDR_MASK 0x1f
Weijie Gao23f17162018-12-20 16:12:53 +080042
Landen Chao532de8d2020-02-18 16:49:37 +080043#define MT753X_PHY_ADDR(base, addr) \
Weijie Gao23f17162018-12-20 16:12:53 +080044 (((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47 (0x20000000 | \
48 GDM_ICS_EN | \
49 GDM_TCS_EN | \
50 GDM_UCS_EN | \
51 STRP_CRC | \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
55 (DP_PDMA << UN_DP_S))
56
57#define GDMA_FWD_DISCARD \
58 (0x20000000 | \
59 GDM_ICS_EN | \
60 GDM_TCS_EN | \
61 GDM_UCS_EN | \
62 STRP_CRC | \
63 (DP_DISCARD << MYMAC_DP_S) | \
64 (DP_DISCARD << BC_DP_S) | \
65 (DP_DISCARD << MC_DP_S) | \
66 (DP_DISCARD << UN_DP_S))
67
Weijie Gao23f17162018-12-20 16:12:53 +080068enum mtk_switch {
69 SW_NONE,
Landen Chao532de8d2020-02-18 16:49:37 +080070 SW_MT7530,
71 SW_MT7531
Weijie Gao23f17162018-12-20 16:12:53 +080072};
73
Weijie Gao62596722022-09-09 19:59:21 +080074/* struct mtk_soc_data - This is the structure holding all differences
75 * among various plaforms
76 * @caps Flags shown the extra capability for the SoC
77 * @ana_rgc3: The offset for register ANA_RGC3 related to
78 * sgmiisys syscon
Weijie Gao76281942023-07-19 17:17:37 +080079 * @gdma_count: Number of GDMAs
Weijie Gaoe7ad0462022-09-09 19:59:26 +080080 * @pdma_base: Register base of PDMA block
81 * @txd_size: Tx DMA descriptor size.
82 * @rxd_size: Rx DMA descriptor size.
Weijie Gao62596722022-09-09 19:59:21 +080083 */
84struct mtk_soc_data {
85 u32 caps;
86 u32 ana_rgc3;
Weijie Gao76281942023-07-19 17:17:37 +080087 u32 gdma_count;
Weijie Gaoe7ad0462022-09-09 19:59:26 +080088 u32 pdma_base;
Weijie Gao7d928c32022-09-09 19:59:24 +080089 u32 txd_size;
90 u32 rxd_size;
Weijie Gao23f17162018-12-20 16:12:53 +080091};
92
93struct mtk_eth_priv {
94 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
95
Weijie Gao7d928c32022-09-09 19:59:24 +080096 void *tx_ring_noc;
97 void *rx_ring_noc;
Weijie Gao23f17162018-12-20 16:12:53 +080098
99 int rx_dma_owner_idx0;
100 int tx_cpu_owner_idx0;
101
102 void __iomem *fe_base;
103 void __iomem *gmac_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800104 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800105
Weijie Gao86062e72022-05-20 11:23:37 +0800106 struct regmap *ethsys_regmap;
107
Weijie Gao585a1a42023-07-19 17:17:22 +0800108 struct regmap *infra_regmap;
109
Weijie Gaoba026eb2023-07-19 17:17:31 +0800110 struct regmap *usxgmii_regmap;
111 struct regmap *xfi_pextp_regmap;
112 struct regmap *xfi_pll_regmap;
113 struct regmap *toprgu_regmap;
114
Weijie Gao23f17162018-12-20 16:12:53 +0800115 struct mii_dev *mdio_bus;
116 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
117 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
118 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
119 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
120 u16 val);
121
Weijie Gao62596722022-09-09 19:59:21 +0800122 const struct mtk_soc_data *soc;
Weijie Gao23f17162018-12-20 16:12:53 +0800123 int gmac_id;
124 int force_mode;
125 int speed;
126 int duplex;
Weijie Gao29a48bf2022-09-09 19:59:28 +0800127 bool pn_swap;
Weijie Gao23f17162018-12-20 16:12:53 +0800128
129 struct phy_device *phydev;
130 int phy_interface;
131 int phy_addr;
132
133 enum mtk_switch sw;
134 int (*switch_init)(struct mtk_eth_priv *priv);
Weijie Gaoc73d3872023-07-19 17:16:54 +0800135 void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
Landen Chao532de8d2020-02-18 16:49:37 +0800136 u32 mt753x_smi_addr;
137 u32 mt753x_phy_base;
Weijie Gaoc73d3872023-07-19 17:16:54 +0800138 u32 mt753x_pmcr;
Weijie Gaoc41a0582023-07-19 17:16:59 +0800139 u32 mt753x_reset_wait_time;
Weijie Gao23f17162018-12-20 16:12:53 +0800140
141 struct gpio_desc rst_gpio;
142 int mcm;
143
144 struct reset_ctl rst_fe;
145 struct reset_ctl rst_mcm;
146};
147
148static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
149{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800150 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800151}
152
153static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
154 u32 set)
155{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800156 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
Weijie Gao23f17162018-12-20 16:12:53 +0800157}
158
159static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
160 u32 val)
161{
162 u32 gdma_base;
163
Weijie Gao76281942023-07-19 17:17:37 +0800164 if (no == 2)
165 gdma_base = GDMA3_BASE;
166 else if (no == 1)
Weijie Gao23f17162018-12-20 16:12:53 +0800167 gdma_base = GDMA2_BASE;
168 else
169 gdma_base = GDMA1_BASE;
170
171 writel(val, priv->fe_base + gdma_base + reg);
172}
173
174static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
175{
176 return readl(priv->gmac_base + reg);
177}
178
179static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
180{
181 writel(val, priv->gmac_base + reg);
182}
183
184static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
185{
186 clrsetbits_le32(priv->gmac_base + reg, clr, set);
187}
188
189static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
190 u32 set)
191{
Weijie Gao86062e72022-05-20 11:23:37 +0800192 uint val;
193
194 regmap_read(priv->ethsys_regmap, reg, &val);
195 val &= ~clr;
196 val |= set;
197 regmap_write(priv->ethsys_regmap, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800198}
199
Weijie Gao585a1a42023-07-19 17:17:22 +0800200static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
201 u32 set)
202{
203 uint val;
204
205 regmap_read(priv->infra_regmap, reg, &val);
206 val &= ~clr;
207 val |= set;
208 regmap_write(priv->infra_regmap, reg, val);
209}
210
Weijie Gao23f17162018-12-20 16:12:53 +0800211/* Direct MDIO clause 22/45 access via SoC */
212static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
213 u32 cmd, u32 st)
214{
215 int ret;
216 u32 val;
217
218 val = (st << MDIO_ST_S) |
219 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
220 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
221 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
222
Weijie Gaoc94ad002023-07-19 17:17:03 +0800223 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
Weijie Gao23f17162018-12-20 16:12:53 +0800224 val |= data & MDIO_RW_DATA_M;
225
226 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
227
228 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
229 PHY_ACS_ST, 0, 5000, 0);
230 if (ret) {
231 pr_warn("MDIO access timeout\n");
232 return ret;
233 }
234
Weijie Gaoc94ad002023-07-19 17:17:03 +0800235 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
Weijie Gao23f17162018-12-20 16:12:53 +0800236 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
237 return val & MDIO_RW_DATA_M;
238 }
239
240 return 0;
241}
242
243/* Direct MDIO clause 22 read via SoC */
244static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
245{
246 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
247}
248
249/* Direct MDIO clause 22 write via SoC */
250static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
251{
252 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
253}
254
255/* Direct MDIO clause 45 read via SoC */
256static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
257{
258 int ret;
259
260 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
261 if (ret)
262 return ret;
263
264 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
265 MDIO_ST_C45);
266}
267
268/* Direct MDIO clause 45 write via SoC */
269static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
270 u16 reg, u16 val)
271{
272 int ret;
273
274 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
275 if (ret)
276 return ret;
277
278 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
279 MDIO_ST_C45);
280}
281
282/* Indirect MDIO clause 45 read via MII registers */
283static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
284 u16 reg)
285{
286 int ret;
287
288 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
289 (MMD_ADDR << MMD_CMD_S) |
290 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
291 if (ret)
292 return ret;
293
294 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
295 if (ret)
296 return ret;
297
298 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
299 (MMD_DATA << MMD_CMD_S) |
300 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
301 if (ret)
302 return ret;
303
304 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
305}
306
307/* Indirect MDIO clause 45 write via MII registers */
308static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
309 u16 reg, u16 val)
310{
311 int ret;
312
313 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
314 (MMD_ADDR << MMD_CMD_S) |
315 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
316 if (ret)
317 return ret;
318
319 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
320 if (ret)
321 return ret;
322
323 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
324 (MMD_DATA << MMD_CMD_S) |
325 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
326 if (ret)
327 return ret;
328
329 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
330}
331
Landen Chao532de8d2020-02-18 16:49:37 +0800332/*
333 * MT7530 Internal Register Address Bits
334 * -------------------------------------------------------------------
335 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
336 * |----------------------------------------|---------------|--------|
337 * | Page Address | Reg Address | Unused |
338 * -------------------------------------------------------------------
339 */
340
341static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
342{
343 int ret, low_word, high_word;
344
345 /* Write page address */
346 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
347 if (ret)
348 return ret;
349
350 /* Read low word */
351 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
352 if (low_word < 0)
353 return low_word;
354
355 /* Read high word */
356 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
357 if (high_word < 0)
358 return high_word;
359
360 if (data)
361 *data = ((u32)high_word << 16) | (low_word & 0xffff);
362
363 return 0;
364}
365
366static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
367{
368 int ret;
369
370 /* Write page address */
371 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
372 if (ret)
373 return ret;
374
375 /* Write low word */
376 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
377 data & 0xffff);
378 if (ret)
379 return ret;
380
381 /* Write high word */
382 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
383}
384
385static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
386 u32 set)
387{
388 u32 val;
389
390 mt753x_reg_read(priv, reg, &val);
391 val &= ~clr;
392 val |= set;
393 mt753x_reg_write(priv, reg, val);
394}
395
396/* Indirect MDIO clause 22/45 access */
397static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
398 u32 cmd, u32 st)
399{
400 ulong timeout;
401 u32 val, timeout_ms;
402 int ret = 0;
403
404 val = (st << MDIO_ST_S) |
405 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
406 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
407 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
408
409 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
410 val |= data & MDIO_RW_DATA_M;
411
412 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
413
414 timeout_ms = 100;
415 timeout = get_timer(0);
416 while (1) {
417 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
418
419 if ((val & PHY_ACS_ST) == 0)
420 break;
421
422 if (get_timer(timeout) > timeout_ms)
423 return -ETIMEDOUT;
424 }
425
426 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
427 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
428 ret = val & MDIO_RW_DATA_M;
429 }
430
431 return ret;
432}
433
434static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
435{
436 u8 phy_addr;
437
438 if (phy >= MT753X_NUM_PHYS)
439 return -EINVAL;
440
441 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
442
443 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
444 MDIO_ST_C22);
445}
446
447static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
448 u16 val)
449{
450 u8 phy_addr;
451
452 if (phy >= MT753X_NUM_PHYS)
453 return -EINVAL;
454
455 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
456
457 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
458 MDIO_ST_C22);
459}
460
Weijie Gao159458d2023-07-19 17:17:07 +0800461static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
462 u16 reg)
Landen Chao532de8d2020-02-18 16:49:37 +0800463{
464 u8 phy_addr;
465 int ret;
466
467 if (addr >= MT753X_NUM_PHYS)
468 return -EINVAL;
469
470 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
471
472 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
473 MDIO_ST_C45);
474 if (ret)
475 return ret;
476
477 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
478 MDIO_ST_C45);
479}
480
481static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
482 u16 reg, u16 val)
483{
484 u8 phy_addr;
485 int ret;
486
487 if (addr >= MT753X_NUM_PHYS)
488 return 0;
489
490 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
491
492 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
493 MDIO_ST_C45);
494 if (ret)
495 return ret;
496
497 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
498 MDIO_ST_C45);
499}
500
Weijie Gao23f17162018-12-20 16:12:53 +0800501static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
502{
503 struct mtk_eth_priv *priv = bus->priv;
504
505 if (devad < 0)
506 return priv->mii_read(priv, addr, reg);
507 else
508 return priv->mmd_read(priv, addr, devad, reg);
509}
510
511static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
512 u16 val)
513{
514 struct mtk_eth_priv *priv = bus->priv;
515
516 if (devad < 0)
517 return priv->mii_write(priv, addr, reg, val);
518 else
519 return priv->mmd_write(priv, addr, devad, reg, val);
520}
521
522static int mtk_mdio_register(struct udevice *dev)
523{
524 struct mtk_eth_priv *priv = dev_get_priv(dev);
525 struct mii_dev *mdio_bus = mdio_alloc();
526 int ret;
527
528 if (!mdio_bus)
529 return -ENOMEM;
530
531 /* Assign MDIO access APIs according to the switch/phy */
532 switch (priv->sw) {
533 case SW_MT7530:
534 priv->mii_read = mtk_mii_read;
535 priv->mii_write = mtk_mii_write;
536 priv->mmd_read = mtk_mmd_ind_read;
537 priv->mmd_write = mtk_mmd_ind_write;
538 break;
Landen Chao532de8d2020-02-18 16:49:37 +0800539 case SW_MT7531:
540 priv->mii_read = mt7531_mii_ind_read;
541 priv->mii_write = mt7531_mii_ind_write;
542 priv->mmd_read = mt7531_mmd_ind_read;
543 priv->mmd_write = mt7531_mmd_ind_write;
544 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800545 default:
546 priv->mii_read = mtk_mii_read;
547 priv->mii_write = mtk_mii_write;
548 priv->mmd_read = mtk_mmd_read;
549 priv->mmd_write = mtk_mmd_write;
550 }
551
552 mdio_bus->read = mtk_mdio_read;
553 mdio_bus->write = mtk_mdio_write;
554 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
555
556 mdio_bus->priv = (void *)priv;
557
558 ret = mdio_register(mdio_bus);
559
560 if (ret)
561 return ret;
562
563 priv->mdio_bus = mdio_bus;
564
565 return 0;
566}
567
Landen Chao532de8d2020-02-18 16:49:37 +0800568static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
Weijie Gao23f17162018-12-20 16:12:53 +0800569{
Landen Chao532de8d2020-02-18 16:49:37 +0800570 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800571
Landen Chao532de8d2020-02-18 16:49:37 +0800572 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800573}
574
Landen Chao532de8d2020-02-18 16:49:37 +0800575static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
Weijie Gao23f17162018-12-20 16:12:53 +0800576{
Landen Chao532de8d2020-02-18 16:49:37 +0800577 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800578
Landen Chao532de8d2020-02-18 16:49:37 +0800579 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800580}
581
582static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
583{
584 u32 ncpo1, ssc_delta;
585
586 switch (mode) {
587 case PHY_INTERFACE_MODE_RGMII:
588 ncpo1 = 0x0c80;
589 ssc_delta = 0x87;
590 break;
591 default:
592 printf("error: xMII mode %d not supported\n", mode);
593 return -EINVAL;
594 }
595
596 /* Disable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800597 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800598
599 /* Disable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800600 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800601 (2 << RG_GSWPLL_POSDIV_200M_S) |
602 (32 << RG_GSWPLL_FBKDIV_200M_S));
603
604 /* For MT7530 core clock = 500Mhz */
Landen Chao532de8d2020-02-18 16:49:37 +0800605 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800606 (1 << RG_GSWPLL_POSDIV_500M_S) |
607 (25 << RG_GSWPLL_FBKDIV_500M_S));
608
609 /* Enable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800610 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800611 (2 << RG_GSWPLL_POSDIV_200M_S) |
612 (32 << RG_GSWPLL_FBKDIV_200M_S) |
613 RG_GSWPLL_EN_PRE);
614
615 udelay(20);
616
Landen Chao532de8d2020-02-18 16:49:37 +0800617 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
Weijie Gao23f17162018-12-20 16:12:53 +0800618
619 /* Setup the MT7530 TRGMII Tx Clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800620 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
621 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
622 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
623 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
624 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
Weijie Gao23f17162018-12-20 16:12:53 +0800625 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
626
Landen Chao532de8d2020-02-18 16:49:37 +0800627 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800628 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
629 (1 << RG_SYSPLL_POSDIV_S));
630
Landen Chao532de8d2020-02-18 16:49:37 +0800631 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
Weijie Gao23f17162018-12-20 16:12:53 +0800632 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
633 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
634
635 /* Enable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800636 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
Weijie Gao23f17162018-12-20 16:12:53 +0800637 REG_GSWCK_EN | REG_TRGMIICK_EN);
638
639 return 0;
640}
641
Weijie Gaoc73d3872023-07-19 17:16:54 +0800642static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
643{
644 u32 pmcr = FORCE_MODE;
645
646 if (enable)
647 pmcr = priv->mt753x_pmcr;
648
649 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
650}
651
Weijie Gao23f17162018-12-20 16:12:53 +0800652static int mt7530_setup(struct mtk_eth_priv *priv)
653{
654 u16 phy_addr, phy_val;
Weijie Gaoad80d482022-05-20 11:23:42 +0800655 u32 val, txdrv;
Weijie Gao23f17162018-12-20 16:12:53 +0800656 int i;
657
Weijie Gao62596722022-09-09 19:59:21 +0800658 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gaoad80d482022-05-20 11:23:42 +0800659 /* Select 250MHz clk for RGMII mode */
660 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
661 ETHSYS_TRGMII_CLK_SEL362_5, 0);
662
663 txdrv = 8;
664 } else {
665 txdrv = 4;
666 }
Weijie Gao23f17162018-12-20 16:12:53 +0800667
Landen Chao532de8d2020-02-18 16:49:37 +0800668 /* Modify HWTRAP first to allow direct access to internal PHYs */
669 mt753x_reg_read(priv, HWTRAP_REG, &val);
670 val |= CHG_TRAP;
671 val &= ~C_MDIO_BPS;
672 mt753x_reg_write(priv, MHWTRAP_REG, val);
673
674 /* Calculate the phy base address */
675 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
676 priv->mt753x_phy_base = (val | 0x7) + 1;
677
678 /* Turn off PHYs */
679 for (i = 0; i < MT753X_NUM_PHYS; i++) {
680 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
681 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
682 phy_val |= BMCR_PDOWN;
683 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
684 }
685
686 /* Force MAC link down before reset */
687 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
688 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
689
690 /* MT7530 reset */
691 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
692 udelay(100);
693
694 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
695 MAC_MODE | FORCE_MODE |
696 MAC_TX_EN | MAC_RX_EN |
697 BKOFF_EN | BACKPR_EN |
698 (SPEED_1000M << FORCE_SPD_S) |
699 FORCE_DPX | FORCE_LINK;
700
701 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
Weijie Gaoc73d3872023-07-19 17:16:54 +0800702 priv->mt753x_pmcr = val;
Landen Chao532de8d2020-02-18 16:49:37 +0800703
704 /* MT7530 Port5: Forced link down */
705 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
706
Weijie Gaoc73d3872023-07-19 17:16:54 +0800707 /* Keep MAC link down before starting eth */
708 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
709
Landen Chao532de8d2020-02-18 16:49:37 +0800710 /* MT7530 Port6: Set to RGMII */
711 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
712
713 /* Hardware Trap: Enable Port6, Disable Port5 */
714 mt753x_reg_read(priv, HWTRAP_REG, &val);
715 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
716 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
717 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
718 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
719 mt753x_reg_write(priv, MHWTRAP_REG, val);
720
721 /* Setup switch core pll */
722 mt7530_pad_clk_setup(priv, priv->phy_interface);
723
724 /* Lower Tx Driving for TRGMII path */
725 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
726 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
Weijie Gaoad80d482022-05-20 11:23:42 +0800727 (txdrv << TD_DM_DRVP_S) |
728 (txdrv << TD_DM_DRVN_S));
Landen Chao532de8d2020-02-18 16:49:37 +0800729
730 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
731 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
732
733 /* Turn on PHYs */
734 for (i = 0; i < MT753X_NUM_PHYS; i++) {
735 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
736 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
737 phy_val &= ~BMCR_PDOWN;
738 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
739 }
740
741 return 0;
742}
743
744static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
745{
746 /* Step 1 : Disable MT7531 COREPLL */
747 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
748
749 /* Step 2: switch to XTAL output */
750 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
751
752 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
753
754 /* Step 3: disable PLLGP and enable program PLLGP */
755 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
756
757 /* Step 4: program COREPLL output frequency to 500MHz */
758 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
759 2 << RG_COREPLL_POSDIV_S);
760 udelay(25);
761
762 /* Currently, support XTAL 25Mhz only */
763 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
764 0x140000 << RG_COREPLL_SDM_PCW_S);
765
766 /* Set feedback divide ratio update signal to high */
767 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
768 RG_COREPLL_SDM_PCW_CHG);
769
770 /* Wait for at least 16 XTAL clocks */
771 udelay(10);
772
773 /* Step 5: set feedback divide ratio update signal to low */
774 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
775
776 /* add enable 325M clock for SGMII */
777 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
778
779 /* add enable 250SSC clock for RGMII */
780 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
781
782 /*Step 6: Enable MT7531 PLL */
783 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
784
785 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
786
787 udelay(25);
788}
789
790static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
791 u32 port)
792{
793 if (port != 5 && port != 6) {
794 printf("mt7531: port %d is not a SGMII port\n", port);
795 return -EINVAL;
796 }
797
798 /* Set SGMII GEN2 speed(2.5G) */
799 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
800 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
801
802 /* Disable SGMII AN */
803 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
804 SGMII_AN_ENABLE, 0);
805
806 /* SGMII force mode setting */
807 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
808
809 /* Release PHYA power down state */
810 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
811 SGMII_PHYA_PWD, 0);
812
813 return 0;
814}
815
816static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
817{
818 u32 val;
819
820 if (port != 5) {
821 printf("error: RGMII mode is not available for port %d\n",
822 port);
823 return -EINVAL;
824 }
825
826 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
827 val |= GP_CLK_EN;
828 val &= ~GP_MODE_M;
829 val |= GP_MODE_RGMII << GP_MODE_S;
830 val |= TXCLK_NO_REVERSE;
831 val |= RXCLK_NO_DELAY;
832 val &= ~CLK_SKEW_IN_M;
833 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
834 val &= ~CLK_SKEW_OUT_M;
835 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
836 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
837
838 return 0;
839}
840
841static void mt7531_phy_setting(struct mtk_eth_priv *priv)
842{
843 int i;
844 u32 val;
845
846 for (i = 0; i < MT753X_NUM_PHYS; i++) {
847 /* Enable HW auto downshift */
848 priv->mii_write(priv, i, 0x1f, 0x1);
849 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
850 val |= PHY_EN_DOWN_SHFIT;
851 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
852
853 /* PHY link down power saving enable */
854 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
855 val |= PHY_LINKDOWN_POWER_SAVING_EN;
856 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
857
858 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
859 val &= ~PHY_POWER_SAVING_M;
860 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
861 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
862 }
863}
864
Weijie Gaoc73d3872023-07-19 17:16:54 +0800865static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
866{
867 u32 pmcr = FORCE_MODE_LNK;
868
869 if (enable)
870 pmcr = priv->mt753x_pmcr;
871
872 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
873 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
874}
875
Landen Chao532de8d2020-02-18 16:49:37 +0800876static int mt7531_setup(struct mtk_eth_priv *priv)
877{
878 u16 phy_addr, phy_val;
879 u32 val;
880 u32 pmcr;
881 u32 port5_sgmii;
882 int i;
883
884 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
885 MT753X_SMI_ADDR_MASK;
886
887 /* Turn off PHYs */
888 for (i = 0; i < MT753X_NUM_PHYS; i++) {
889 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
890 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
891 phy_val |= BMCR_PDOWN;
892 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
893 }
894
895 /* Force MAC link down before reset */
896 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
897 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
898
899 /* Switch soft reset */
900 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
901 udelay(100);
902
903 /* Enable MDC input Schmitt Trigger */
904 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
905 SMT_IOLB_5_SMI_MDC_EN);
906
907 mt7531_core_pll_setup(priv, priv->mcm);
908
909 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
910 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
911
912 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
913 switch (priv->phy_interface) {
914 case PHY_INTERFACE_MODE_RGMII:
915 if (!port5_sgmii)
916 mt7531_port_rgmii_init(priv, 5);
917 break;
Weijie Gaobd70f3c2023-07-19 17:17:13 +0800918 case PHY_INTERFACE_MODE_2500BASEX:
Landen Chao532de8d2020-02-18 16:49:37 +0800919 mt7531_port_sgmii_init(priv, 6);
920 if (port5_sgmii)
921 mt7531_port_sgmii_init(priv, 5);
922 break;
923 default:
924 break;
925 }
926
927 pmcr = MT7531_FORCE_MODE |
928 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
929 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
930 BKOFF_EN | BACKPR_EN |
931 FORCE_RX_FC | FORCE_TX_FC |
932 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
933 FORCE_LINK;
934
Weijie Gaoc73d3872023-07-19 17:16:54 +0800935 priv->mt753x_pmcr = pmcr;
936
937 /* Keep MAC link down before starting eth */
938 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
939 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
Landen Chao532de8d2020-02-18 16:49:37 +0800940
941 /* Turn on PHYs */
942 for (i = 0; i < MT753X_NUM_PHYS; i++) {
943 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
944 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
945 phy_val &= ~BMCR_PDOWN;
946 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
947 }
948
949 mt7531_phy_setting(priv);
950
951 /* Enable Internal PHYs */
952 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
953 val |= MT7531_BYPASS_MODE;
954 val &= ~MT7531_POWER_ON_OFF;
955 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
956
957 return 0;
958}
959
Weijie Gao159458d2023-07-19 17:17:07 +0800960static int mt753x_switch_init(struct mtk_eth_priv *priv)
Landen Chao532de8d2020-02-18 16:49:37 +0800961{
962 int ret;
963 int i;
964
Weijie Gao23f17162018-12-20 16:12:53 +0800965 /* Global reset switch */
966 if (priv->mcm) {
967 reset_assert(&priv->rst_mcm);
968 udelay(1000);
969 reset_deassert(&priv->rst_mcm);
Weijie Gaoc41a0582023-07-19 17:16:59 +0800970 mdelay(priv->mt753x_reset_wait_time);
Weijie Gao23f17162018-12-20 16:12:53 +0800971 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
972 dm_gpio_set_value(&priv->rst_gpio, 0);
973 udelay(1000);
974 dm_gpio_set_value(&priv->rst_gpio, 1);
Weijie Gaoc41a0582023-07-19 17:16:59 +0800975 mdelay(priv->mt753x_reset_wait_time);
Weijie Gao23f17162018-12-20 16:12:53 +0800976 }
977
Landen Chao532de8d2020-02-18 16:49:37 +0800978 ret = priv->switch_init(priv);
979 if (ret)
980 return ret;
Weijie Gao23f17162018-12-20 16:12:53 +0800981
982 /* Set port isolation */
Landen Chao532de8d2020-02-18 16:49:37 +0800983 for (i = 0; i < MT753X_NUM_PORTS; i++) {
Weijie Gao23f17162018-12-20 16:12:53 +0800984 /* Set port matrix mode */
985 if (i != 6)
Landen Chao532de8d2020-02-18 16:49:37 +0800986 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800987 (0x40 << PORT_MATRIX_S));
988 else
Landen Chao532de8d2020-02-18 16:49:37 +0800989 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800990 (0x3f << PORT_MATRIX_S));
991
992 /* Set port mode to user port */
Landen Chao532de8d2020-02-18 16:49:37 +0800993 mt753x_reg_write(priv, PVC_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800994 (0x8100 << STAG_VPID_S) |
995 (VLAN_ATTR_USER << VLAN_ATTR_S));
996 }
997
998 return 0;
999}
1000
Weijie Gaoba026eb2023-07-19 17:17:31 +08001001static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1002{
1003 u16 lcl_adv = 0, rmt_adv = 0;
1004 u8 flowctrl;
1005 u32 mcr;
1006
1007 mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1008 mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1009
1010 if (priv->phydev->duplex) {
1011 if (priv->phydev->pause)
1012 rmt_adv = LPA_PAUSE_CAP;
1013 if (priv->phydev->asym_pause)
1014 rmt_adv |= LPA_PAUSE_ASYM;
1015
1016 if (priv->phydev->advertising & ADVERTISED_Pause)
1017 lcl_adv |= ADVERTISE_PAUSE_CAP;
1018 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1019 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1020
1021 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1022
1023 if (flowctrl & FLOW_CTRL_TX)
1024 mcr |= XGMAC_FORCE_TX_FC;
1025 if (flowctrl & FLOW_CTRL_RX)
1026 mcr |= XGMAC_FORCE_RX_FC;
1027
1028 debug("rx pause %s, tx pause %s\n",
1029 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1030 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1031 }
1032
1033 mcr &= ~(XGMAC_TRX_DISABLE);
1034 mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1035}
1036
Weijie Gao23f17162018-12-20 16:12:53 +08001037static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1038{
1039 u16 lcl_adv = 0, rmt_adv = 0;
1040 u8 flowctrl;
1041 u32 mcr;
1042
Landen Chao532de8d2020-02-18 16:49:37 +08001043 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001044 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1045 MAC_MODE | FORCE_MODE |
1046 MAC_TX_EN | MAC_RX_EN |
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001047 DEL_RXFIFO_CLR |
Weijie Gao23f17162018-12-20 16:12:53 +08001048 BKOFF_EN | BACKPR_EN;
1049
1050 switch (priv->phydev->speed) {
1051 case SPEED_10:
1052 mcr |= (SPEED_10M << FORCE_SPD_S);
1053 break;
1054 case SPEED_100:
1055 mcr |= (SPEED_100M << FORCE_SPD_S);
1056 break;
1057 case SPEED_1000:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001058 case SPEED_2500:
Weijie Gao23f17162018-12-20 16:12:53 +08001059 mcr |= (SPEED_1000M << FORCE_SPD_S);
1060 break;
1061 };
1062
1063 if (priv->phydev->link)
1064 mcr |= FORCE_LINK;
1065
1066 if (priv->phydev->duplex) {
1067 mcr |= FORCE_DPX;
1068
1069 if (priv->phydev->pause)
1070 rmt_adv = LPA_PAUSE_CAP;
1071 if (priv->phydev->asym_pause)
1072 rmt_adv |= LPA_PAUSE_ASYM;
1073
1074 if (priv->phydev->advertising & ADVERTISED_Pause)
1075 lcl_adv |= ADVERTISE_PAUSE_CAP;
1076 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1077 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1078
1079 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1080
1081 if (flowctrl & FLOW_CTRL_TX)
1082 mcr |= FORCE_TX_FC;
1083 if (flowctrl & FLOW_CTRL_RX)
1084 mcr |= FORCE_RX_FC;
1085
1086 debug("rx pause %s, tx pause %s\n",
1087 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1088 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1089 }
1090
1091 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1092}
1093
1094static int mtk_phy_start(struct mtk_eth_priv *priv)
1095{
1096 struct phy_device *phydev = priv->phydev;
1097 int ret;
1098
1099 ret = phy_startup(phydev);
1100
1101 if (ret) {
1102 debug("Could not initialize PHY %s\n", phydev->dev->name);
1103 return ret;
1104 }
1105
1106 if (!phydev->link) {
1107 debug("%s: link down.\n", phydev->dev->name);
1108 return 0;
1109 }
1110
Weijie Gaoba026eb2023-07-19 17:17:31 +08001111 if (!priv->force_mode) {
1112 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII)
1113 mtk_xphy_link_adjust(priv);
1114 else
1115 mtk_phy_link_adjust(priv);
1116 }
Weijie Gao23f17162018-12-20 16:12:53 +08001117
1118 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1119 (phydev->duplex) ? "full" : "half",
1120 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1121
1122 return 0;
1123}
1124
1125static int mtk_phy_probe(struct udevice *dev)
1126{
1127 struct mtk_eth_priv *priv = dev_get_priv(dev);
1128 struct phy_device *phydev;
1129
1130 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1131 priv->phy_interface);
1132 if (!phydev)
1133 return -ENODEV;
1134
1135 phydev->supported &= PHY_GBIT_FEATURES;
1136 phydev->advertising = phydev->supported;
1137
1138 priv->phydev = phydev;
1139 phy_config(phydev);
1140
1141 return 0;
1142}
1143
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001144static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1145{
1146 /* Set SGMII GEN1 speed(1G) */
1147 clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1148 SGMSYS_SPEED_2500, 0);
1149
1150 /* Enable SGMII AN */
1151 setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1152 SGMII_AN_ENABLE);
1153
1154 /* SGMII AN mode setting */
1155 writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1156
1157 /* SGMII PN SWAP setting */
1158 if (priv->pn_swap) {
1159 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1160 SGMII_PN_SWAP_TX_RX);
1161 }
1162
1163 /* Release PHYA power down state */
1164 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1165 SGMII_PHYA_PWD, 0);
1166}
1167
1168static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
MarkLeeb4ef49a2020-01-21 19:31:57 +08001169{
1170 /* Set SGMII GEN2 speed(2.5G) */
Weijie Gao62596722022-09-09 19:59:21 +08001171 setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1172 SGMSYS_SPEED_2500);
MarkLeeb4ef49a2020-01-21 19:31:57 +08001173
1174 /* Disable SGMII AN */
1175 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1176 SGMII_AN_ENABLE, 0);
1177
1178 /* SGMII force mode setting */
1179 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1180
Weijie Gao29a48bf2022-09-09 19:59:28 +08001181 /* SGMII PN SWAP setting */
1182 if (priv->pn_swap) {
1183 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1184 SGMII_PN_SWAP_TX_RX);
1185 }
1186
MarkLeeb4ef49a2020-01-21 19:31:57 +08001187 /* Release PHYA power down state */
1188 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1189 SGMII_PHYA_PWD, 0);
1190}
1191
Weijie Gaoba026eb2023-07-19 17:17:31 +08001192static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1193{
1194 u32 val = 0;
1195
1196 /* Add software workaround for USXGMII PLL TCL issue */
1197 regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1198 RG_XFI_PLL_ANA_SWWA);
1199
1200 regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1201 val |= RG_XFI_PLL_EN;
1202 regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1203}
1204
1205static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1206{
1207 switch (priv->gmac_id) {
1208 case 1:
1209 regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1210 regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1211 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1212 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1213 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1214 break;
1215 case 2:
1216 regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1217 regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1218 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1219 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1220 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1221 break;
1222 }
1223
1224 mdelay(10);
1225}
1226
1227static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1228{
1229 regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1230 regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1231 regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1232 ndelay(1020);
1233 regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1234 ndelay(1020);
1235 regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1236
1237 regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1238 regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1239 regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1240 regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1241 regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1242 regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1243 regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1244 regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1245 regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1246 regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1247 regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1248 regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1249 regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1250 regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1251 regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1252 regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1253 regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1254 regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1255 regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1256 regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1257 regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1258 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1259 regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1260 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1261 ndelay(1020);
1262 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1263 regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1264 regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1265 regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1266 regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1267 regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1268 regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1269 regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1270 regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1271 regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1272 regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1273 regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1274 regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1275 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1276 udelay(150);
1277 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1278 ndelay(1020);
1279 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1280 udelay(15);
1281 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1282 ndelay(1020);
1283 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1284 udelay(100);
1285 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1286 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1287 regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1288 udelay(400);
1289}
1290
1291static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1292{
1293 mtk_xfi_pll_enable(priv);
1294 mtk_usxgmii_reset(priv);
1295 mtk_usxgmii_setup_phya_an_10000(priv);
1296}
1297
Weijie Gao23f17162018-12-20 16:12:53 +08001298static void mtk_mac_init(struct mtk_eth_priv *priv)
1299{
1300 int i, ge_mode = 0;
1301 u32 mcr;
1302
1303 switch (priv->phy_interface) {
1304 case PHY_INTERFACE_MODE_RGMII_RXID:
1305 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +08001306 ge_mode = GE_MODE_RGMII;
1307 break;
Weijie Gao23f17162018-12-20 16:12:53 +08001308 case PHY_INTERFACE_MODE_SGMII:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001309 case PHY_INTERFACE_MODE_2500BASEX:
Weijie Gao585a1a42023-07-19 17:17:22 +08001310 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1311 mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1312 SGMII_QPHY_SEL);
1313 }
1314
Weijie Gao23f17162018-12-20 16:12:53 +08001315 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +08001316 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1317 SYSCFG0_SGMII_SEL(priv->gmac_id));
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001318 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1319 mtk_sgmii_an_init(priv);
1320 else
1321 mtk_sgmii_force_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001322 break;
1323 case PHY_INTERFACE_MODE_MII:
1324 case PHY_INTERFACE_MODE_GMII:
1325 ge_mode = GE_MODE_MII;
1326 break;
1327 case PHY_INTERFACE_MODE_RMII:
1328 ge_mode = GE_MODE_RMII;
1329 break;
1330 default:
1331 break;
1332 }
1333
1334 /* set the gmac to the right mode */
1335 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1336 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1337 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1338
1339 if (priv->force_mode) {
Landen Chao532de8d2020-02-18 16:49:37 +08001340 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001341 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1342 MAC_MODE | FORCE_MODE |
1343 MAC_TX_EN | MAC_RX_EN |
1344 BKOFF_EN | BACKPR_EN |
1345 FORCE_LINK;
1346
1347 switch (priv->speed) {
1348 case SPEED_10:
1349 mcr |= SPEED_10M << FORCE_SPD_S;
1350 break;
1351 case SPEED_100:
1352 mcr |= SPEED_100M << FORCE_SPD_S;
1353 break;
1354 case SPEED_1000:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001355 case SPEED_2500:
Weijie Gao23f17162018-12-20 16:12:53 +08001356 mcr |= SPEED_1000M << FORCE_SPD_S;
1357 break;
1358 }
1359
1360 if (priv->duplex)
1361 mcr |= FORCE_DPX;
1362
1363 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1364 }
1365
Weijie Gao62596722022-09-09 19:59:21 +08001366 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1367 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001368 /* Lower Tx Driving for TRGMII path */
1369 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1370 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1371 (8 << TD_DM_DRVP_S) |
1372 (8 << TD_DM_DRVN_S));
1373
1374 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1375 RX_RST | RXC_DQSISEL);
1376 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1377 }
1378}
1379
Weijie Gaoba026eb2023-07-19 17:17:31 +08001380static void mtk_xmac_init(struct mtk_eth_priv *priv)
1381{
1382 u32 sts;
1383
1384 switch (priv->phy_interface) {
1385 case PHY_INTERFACE_MODE_USXGMII:
1386 mtk_usxgmii_an_init(priv);
1387 break;
1388 default:
1389 break;
1390 }
1391
1392 /* Set GMAC to the correct mode */
1393 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1394 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1395 0);
1396
1397 if (priv->gmac_id == 1) {
1398 mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1399 NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
1400 } else if (priv->gmac_id == 2) {
1401 sts = mtk_gmac_read(priv, XGMAC_STS(priv->gmac_id));
1402 sts |= XGMAC_FORCE_LINK;
1403 mtk_gmac_write(priv, XGMAC_STS(priv->gmac_id), sts);
1404 }
1405
1406 /* Force GMAC link down */
1407 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
1408}
1409
Weijie Gao23f17162018-12-20 16:12:53 +08001410static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1411{
1412 char *pkt_base = priv->pkt_pool;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001413 struct mtk_tx_dma_v2 *txd;
1414 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001415 int i;
1416
1417 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1418 udelay(500);
1419
Weijie Gao7d928c32022-09-09 19:59:24 +08001420 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1421 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1422 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
Weijie Gao23f17162018-12-20 16:12:53 +08001423
Frank Wunderlich47b14312020-01-31 10:23:29 +01001424 flush_dcache_range((ulong)pkt_base,
1425 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +08001426
1427 priv->rx_dma_owner_idx0 = 0;
1428 priv->tx_cpu_owner_idx0 = 0;
1429
1430 for (i = 0; i < NUM_TX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001431 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
Weijie Gao23f17162018-12-20 16:12:53 +08001432
Weijie Gao7d928c32022-09-09 19:59:24 +08001433 txd->txd1 = virt_to_phys(pkt_base);
1434 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001435
Weijie Gao76281942023-07-19 17:17:37 +08001436 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1437 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1438 15 : priv->gmac_id + 1);
1439 else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001440 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1441 else
1442 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
Weijie Gao7d928c32022-09-09 19:59:24 +08001443
Weijie Gao23f17162018-12-20 16:12:53 +08001444 pkt_base += PKTSIZE_ALIGN;
1445 }
1446
1447 for (i = 0; i < NUM_RX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001448 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1449
1450 rxd->rxd1 = virt_to_phys(pkt_base);
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001451
Weijie Gao76281942023-07-19 17:17:37 +08001452 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1453 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001454 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1455 else
1456 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001457
Weijie Gao23f17162018-12-20 16:12:53 +08001458 pkt_base += PKTSIZE_ALIGN;
1459 }
1460
1461 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1462 virt_to_phys(priv->tx_ring_noc));
1463 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1464 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1465
1466 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1467 virt_to_phys(priv->rx_ring_noc));
1468 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1469 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1470
1471 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1472}
1473
1474static int mtk_eth_start(struct udevice *dev)
1475{
1476 struct mtk_eth_priv *priv = dev_get_priv(dev);
Weijie Gao76281942023-07-19 17:17:37 +08001477 int i, ret;
Weijie Gao23f17162018-12-20 16:12:53 +08001478
1479 /* Reset FE */
1480 reset_assert(&priv->rst_fe);
1481 udelay(1000);
1482 reset_deassert(&priv->rst_fe);
1483 mdelay(10);
1484
Weijie Gao76281942023-07-19 17:17:37 +08001485 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1486 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001487 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1488
Weijie Gao23f17162018-12-20 16:12:53 +08001489 /* Packets forward to PDMA */
1490 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1491
Weijie Gao76281942023-07-19 17:17:37 +08001492 for (i = 0; i < priv->soc->gdma_count; i++) {
1493 if (i == priv->gmac_id)
1494 continue;
1495
1496 mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1497 }
1498
1499 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
1500 mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1501 GDMA_CPU_BRIDGE_EN);
1502 }
Weijie Gao23f17162018-12-20 16:12:53 +08001503
1504 udelay(500);
1505
1506 mtk_eth_fifo_init(priv);
1507
Weijie Gaoc73d3872023-07-19 17:16:54 +08001508 if (priv->switch_mac_control)
1509 priv->switch_mac_control(priv, true);
1510
Weijie Gao23f17162018-12-20 16:12:53 +08001511 /* Start PHY */
1512 if (priv->sw == SW_NONE) {
1513 ret = mtk_phy_start(priv);
1514 if (ret)
1515 return ret;
1516 }
1517
1518 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1519 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1520 udelay(500);
1521
1522 return 0;
1523}
1524
1525static void mtk_eth_stop(struct udevice *dev)
1526{
1527 struct mtk_eth_priv *priv = dev_get_priv(dev);
1528
Weijie Gaoc73d3872023-07-19 17:16:54 +08001529 if (priv->switch_mac_control)
1530 priv->switch_mac_control(priv, false);
1531
Weijie Gao23f17162018-12-20 16:12:53 +08001532 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1533 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1534 udelay(500);
1535
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001536 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
Weijie Gao23f17162018-12-20 16:12:53 +08001537 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1538}
1539
1540static int mtk_eth_write_hwaddr(struct udevice *dev)
1541{
Simon Glassc69cda22020-12-03 16:55:20 -07001542 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001543 struct mtk_eth_priv *priv = dev_get_priv(dev);
1544 unsigned char *mac = pdata->enetaddr;
1545 u32 macaddr_lsb, macaddr_msb;
1546
1547 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1548 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1549 ((u32)mac[4] << 8) | (u32)mac[5];
1550
1551 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1552 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1553
1554 return 0;
1555}
1556
1557static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1558{
1559 struct mtk_eth_priv *priv = dev_get_priv(dev);
1560 u32 idx = priv->tx_cpu_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001561 struct mtk_tx_dma_v2 *txd;
Weijie Gao23f17162018-12-20 16:12:53 +08001562 void *pkt_base;
1563
Weijie Gao7d928c32022-09-09 19:59:24 +08001564 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1565
1566 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001567 debug("mtk-eth: TX DMA descriptor ring is full\n");
1568 return -EPERM;
1569 }
1570
Weijie Gao7d928c32022-09-09 19:59:24 +08001571 pkt_base = (void *)phys_to_virt(txd->txd1);
Weijie Gao23f17162018-12-20 16:12:53 +08001572 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001573 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001574 roundup(length, ARCH_DMA_MINALIGN));
1575
Weijie Gao76281942023-07-19 17:17:37 +08001576 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1577 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001578 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1579 else
1580 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
Weijie Gao23f17162018-12-20 16:12:53 +08001581
1582 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1583 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1584
1585 return 0;
1586}
1587
1588static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1589{
1590 struct mtk_eth_priv *priv = dev_get_priv(dev);
1591 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001592 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001593 uchar *pkt_base;
1594 u32 length;
1595
Weijie Gao7d928c32022-09-09 19:59:24 +08001596 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1597
1598 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001599 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1600 return -EAGAIN;
1601 }
1602
Weijie Gao76281942023-07-19 17:17:37 +08001603 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1604 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001605 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1606 else
1607 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
Weijie Gao7d928c32022-09-09 19:59:24 +08001608
1609 pkt_base = (void *)phys_to_virt(rxd->rxd1);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001610 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001611 roundup(length, ARCH_DMA_MINALIGN));
1612
1613 if (packetp)
1614 *packetp = pkt_base;
1615
1616 return length;
1617}
1618
1619static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1620{
1621 struct mtk_eth_priv *priv = dev_get_priv(dev);
1622 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001623 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001624
Weijie Gao7d928c32022-09-09 19:59:24 +08001625 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1626
Weijie Gao76281942023-07-19 17:17:37 +08001627 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1628 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001629 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1630 else
1631 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao23f17162018-12-20 16:12:53 +08001632
1633 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1634 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1635
1636 return 0;
1637}
1638
1639static int mtk_eth_probe(struct udevice *dev)
1640{
Simon Glassc69cda22020-12-03 16:55:20 -07001641 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001642 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001643 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001644 int ret;
1645
1646 /* Frame Engine Register Base */
1647 priv->fe_base = (void *)iobase;
1648
1649 /* GMAC Register Base */
1650 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1651
1652 /* MDIO register */
1653 ret = mtk_mdio_register(dev);
1654 if (ret)
1655 return ret;
1656
1657 /* Prepare for tx/rx rings */
Weijie Gao7d928c32022-09-09 19:59:24 +08001658 priv->tx_ring_noc = (void *)
1659 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001660 ARCH_DMA_MINALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001661 priv->rx_ring_noc = (void *)
1662 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001663 ARCH_DMA_MINALIGN);
1664
1665 /* Set MAC mode */
Weijie Gaoba026eb2023-07-19 17:17:31 +08001666 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII)
1667 mtk_xmac_init(priv);
1668 else
1669 mtk_mac_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001670
1671 /* Probe phy if switch is not specified */
1672 if (priv->sw == SW_NONE)
1673 return mtk_phy_probe(dev);
1674
1675 /* Initialize switch */
Landen Chao532de8d2020-02-18 16:49:37 +08001676 return mt753x_switch_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001677}
1678
1679static int mtk_eth_remove(struct udevice *dev)
1680{
1681 struct mtk_eth_priv *priv = dev_get_priv(dev);
1682
1683 /* MDIO unregister */
1684 mdio_unregister(priv->mdio_bus);
1685 mdio_free(priv->mdio_bus);
1686
1687 /* Stop possibly started DMA */
1688 mtk_eth_stop(dev);
1689
1690 return 0;
1691}
1692
Simon Glassd1998a92020-12-03 16:55:21 -07001693static int mtk_eth_of_to_plat(struct udevice *dev)
Weijie Gao23f17162018-12-20 16:12:53 +08001694{
Simon Glassc69cda22020-12-03 16:55:20 -07001695 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001696 struct mtk_eth_priv *priv = dev_get_priv(dev);
1697 struct ofnode_phandle_args args;
1698 struct regmap *regmap;
1699 const char *str;
1700 ofnode subnode;
1701 int ret;
1702
Weijie Gao62596722022-09-09 19:59:21 +08001703 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1704 if (!priv->soc) {
1705 dev_err(dev, "missing soc compatible data\n");
1706 return -EINVAL;
1707 }
Weijie Gao23f17162018-12-20 16:12:53 +08001708
Weijie Gao528e4832022-05-20 11:23:31 +08001709 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001710
1711 /* get corresponding ethsys phandle */
1712 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1713 &args);
1714 if (ret)
1715 return ret;
1716
Weijie Gao86062e72022-05-20 11:23:37 +08001717 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1718 if (IS_ERR(priv->ethsys_regmap))
1719 return PTR_ERR(priv->ethsys_regmap);
Weijie Gao23f17162018-12-20 16:12:53 +08001720
Weijie Gao585a1a42023-07-19 17:17:22 +08001721 if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1722 /* get corresponding infracfg phandle */
1723 ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1724 NULL, 0, 0, &args);
1725
1726 if (ret)
1727 return ret;
1728
1729 priv->infra_regmap = syscon_node_to_regmap(args.node);
1730 if (IS_ERR(priv->infra_regmap))
1731 return PTR_ERR(priv->infra_regmap);
1732 }
1733
Weijie Gao23f17162018-12-20 16:12:53 +08001734 /* Reset controllers */
1735 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1736 if (ret) {
1737 printf("error: Unable to get reset ctrl for frame engine\n");
1738 return ret;
1739 }
1740
1741 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1742
1743 /* Interface mode is required */
Marek Behún123ca112022-04-07 00:33:01 +02001744 pdata->phy_interface = dev_read_phy_mode(dev);
1745 priv->phy_interface = pdata->phy_interface;
Marek Behúnffb0f6f2022-04-07 00:33:03 +02001746 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Weijie Gao23f17162018-12-20 16:12:53 +08001747 printf("error: phy-mode is not set\n");
1748 return -EINVAL;
1749 }
1750
1751 /* Force mode or autoneg */
1752 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1753 if (ofnode_valid(subnode)) {
1754 priv->force_mode = 1;
1755 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1756 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1757
1758 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001759 priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
1760 priv->speed != SPEED_10000) {
Weijie Gao23f17162018-12-20 16:12:53 +08001761 printf("error: no valid speed set in fixed-link\n");
1762 return -EINVAL;
1763 }
1764 }
1765
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001766 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1767 priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
MarkLeeb4ef49a2020-01-21 19:31:57 +08001768 /* get corresponding sgmii phandle */
1769 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1770 NULL, 0, 0, &args);
1771 if (ret)
1772 return ret;
1773
1774 regmap = syscon_node_to_regmap(args.node);
1775
1776 if (IS_ERR(regmap))
1777 return PTR_ERR(regmap);
1778
1779 priv->sgmii_base = regmap_get_range(regmap, 0);
1780
1781 if (!priv->sgmii_base) {
1782 dev_err(dev, "Unable to find sgmii\n");
1783 return -ENODEV;
1784 }
Weijie Gao29a48bf2022-09-09 19:59:28 +08001785
1786 priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
Weijie Gaoba026eb2023-07-19 17:17:31 +08001787 } else if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII) {
1788 /* get corresponding usxgmii phandle */
1789 ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
1790 NULL, 0, 0, &args);
1791 if (ret)
1792 return ret;
1793
1794 priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
1795 if (IS_ERR(priv->usxgmii_regmap))
1796 return PTR_ERR(priv->usxgmii_regmap);
1797
1798 /* get corresponding xfi_pextp phandle */
1799 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
1800 NULL, 0, 0, &args);
1801 if (ret)
1802 return ret;
1803
1804 priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
1805 if (IS_ERR(priv->xfi_pextp_regmap))
1806 return PTR_ERR(priv->xfi_pextp_regmap);
1807
1808 /* get corresponding xfi_pll phandle */
1809 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
1810 NULL, 0, 0, &args);
1811 if (ret)
1812 return ret;
1813
1814 priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
1815 if (IS_ERR(priv->xfi_pll_regmap))
1816 return PTR_ERR(priv->xfi_pll_regmap);
1817
1818 /* get corresponding toprgu phandle */
1819 ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
1820 NULL, 0, 0, &args);
1821 if (ret)
1822 return ret;
1823
1824 priv->toprgu_regmap = syscon_node_to_regmap(args.node);
1825 if (IS_ERR(priv->toprgu_regmap))
1826 return PTR_ERR(priv->toprgu_regmap);
MarkLeeb4ef49a2020-01-21 19:31:57 +08001827 }
1828
Weijie Gao23f17162018-12-20 16:12:53 +08001829 /* check for switch first, otherwise phy will be used */
1830 priv->sw = SW_NONE;
1831 priv->switch_init = NULL;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001832 priv->switch_mac_control = NULL;
Weijie Gao23f17162018-12-20 16:12:53 +08001833 str = dev_read_string(dev, "mediatek,switch");
1834
1835 if (str) {
1836 if (!strcmp(str, "mt7530")) {
1837 priv->sw = SW_MT7530;
1838 priv->switch_init = mt7530_setup;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001839 priv->switch_mac_control = mt7530_mac_control;
Landen Chao532de8d2020-02-18 16:49:37 +08001840 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gaoc41a0582023-07-19 17:16:59 +08001841 priv->mt753x_reset_wait_time = 1000;
Landen Chao532de8d2020-02-18 16:49:37 +08001842 } else if (!strcmp(str, "mt7531")) {
1843 priv->sw = SW_MT7531;
1844 priv->switch_init = mt7531_setup;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001845 priv->switch_mac_control = mt7531_mac_control;
Landen Chao532de8d2020-02-18 16:49:37 +08001846 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gaoc41a0582023-07-19 17:16:59 +08001847 priv->mt753x_reset_wait_time = 200;
Weijie Gao23f17162018-12-20 16:12:53 +08001848 } else {
1849 printf("error: unsupported switch\n");
1850 return -EINVAL;
1851 }
1852
1853 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1854 if (priv->mcm) {
1855 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1856 if (ret) {
1857 printf("error: no reset ctrl for mcm\n");
1858 return ret;
1859 }
1860 } else {
1861 gpio_request_by_name(dev, "reset-gpios", 0,
1862 &priv->rst_gpio, GPIOD_IS_OUT);
1863 }
1864 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001865 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1866 0, &args);
1867 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001868 printf("error: phy-handle is not specified\n");
1869 return ret;
1870 }
1871
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001872 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001873 if (priv->phy_addr < 0) {
1874 printf("error: phy address is not specified\n");
1875 return ret;
1876 }
1877 }
1878
1879 return 0;
1880}
1881
Weijie Gao29a48bf2022-09-09 19:59:28 +08001882static const struct mtk_soc_data mt7986_data = {
1883 .caps = MT7986_CAPS,
1884 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08001885 .gdma_count = 2,
Weijie Gao29a48bf2022-09-09 19:59:28 +08001886 .pdma_base = PDMA_V2_BASE,
1887 .txd_size = sizeof(struct mtk_tx_dma_v2),
1888 .rxd_size = sizeof(struct mtk_rx_dma_v2),
1889};
1890
1891static const struct mtk_soc_data mt7981_data = {
Weijie Gao585a1a42023-07-19 17:17:22 +08001892 .caps = MT7981_CAPS,
Weijie Gao29a48bf2022-09-09 19:59:28 +08001893 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08001894 .gdma_count = 2,
Weijie Gao29a48bf2022-09-09 19:59:28 +08001895 .pdma_base = PDMA_V2_BASE,
1896 .txd_size = sizeof(struct mtk_tx_dma_v2),
1897 .rxd_size = sizeof(struct mtk_rx_dma_v2),
1898};
1899
Weijie Gao62596722022-09-09 19:59:21 +08001900static const struct mtk_soc_data mt7629_data = {
1901 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08001902 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001903 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001904 .txd_size = sizeof(struct mtk_tx_dma),
1905 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001906};
1907
1908static const struct mtk_soc_data mt7623_data = {
1909 .caps = MT7623_CAPS,
Weijie Gao76281942023-07-19 17:17:37 +08001910 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001911 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001912 .txd_size = sizeof(struct mtk_tx_dma),
1913 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001914};
1915
1916static const struct mtk_soc_data mt7622_data = {
1917 .ana_rgc3 = 0x2028,
Weijie Gao76281942023-07-19 17:17:37 +08001918 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001919 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001920 .txd_size = sizeof(struct mtk_tx_dma),
1921 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001922};
1923
1924static const struct mtk_soc_data mt7621_data = {
1925 .caps = MT7621_CAPS,
Weijie Gao76281942023-07-19 17:17:37 +08001926 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001927 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001928 .txd_size = sizeof(struct mtk_tx_dma),
1929 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001930};
1931
Weijie Gao23f17162018-12-20 16:12:53 +08001932static const struct udevice_id mtk_eth_ids[] = {
Weijie Gao29a48bf2022-09-09 19:59:28 +08001933 { .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
1934 { .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
Weijie Gao62596722022-09-09 19:59:21 +08001935 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
1936 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
1937 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
1938 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
Weijie Gao23f17162018-12-20 16:12:53 +08001939 {}
1940};
1941
1942static const struct eth_ops mtk_eth_ops = {
1943 .start = mtk_eth_start,
1944 .stop = mtk_eth_stop,
1945 .send = mtk_eth_send,
1946 .recv = mtk_eth_recv,
1947 .free_pkt = mtk_eth_free_pkt,
1948 .write_hwaddr = mtk_eth_write_hwaddr,
1949};
1950
1951U_BOOT_DRIVER(mtk_eth) = {
1952 .name = "mtk-eth",
1953 .id = UCLASS_ETH,
1954 .of_match = mtk_eth_ids,
Simon Glassd1998a92020-12-03 16:55:21 -07001955 .of_to_plat = mtk_eth_of_to_plat,
Simon Glasscaa4daa2020-12-03 16:55:18 -07001956 .plat_auto = sizeof(struct eth_pdata),
Weijie Gao23f17162018-12-20 16:12:53 +08001957 .probe = mtk_eth_probe,
1958 .remove = mtk_eth_remove,
1959 .ops = &mtk_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -07001960 .priv_auto = sizeof(struct mtk_eth_priv),
Weijie Gao23f17162018-12-20 16:12:53 +08001961 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1962};