blob: d4111e73df1d61f7613cc7c5849cdeddb60a4896 [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Weijie Gao23f17162018-12-20 16:12:53 +080013#include <malloc.h>
14#include <miiphy.h>
Simon Glass90526e92020-05-10 11:39:56 -060015#include <net.h>
Weijie Gao23f17162018-12-20 16:12:53 +080016#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060020#include <asm/cache.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <asm/gpio.h>
22#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Simon Glassc05ed002020-05-10 11:40:11 -060024#include <linux/delay.h>
Weijie Gao23f17162018-12-20 16:12:53 +080025#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
29
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC 24
33#define NUM_RX_DESC 24
34#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
Landen Chao532de8d2020-02-18 16:49:37 +080038#define MT753X_NUM_PHYS 5
39#define MT753X_NUM_PORTS 7
40#define MT753X_DFL_SMI_ADDR 31
41#define MT753X_SMI_ADDR_MASK 0x1f
Weijie Gao23f17162018-12-20 16:12:53 +080042
Landen Chao532de8d2020-02-18 16:49:37 +080043#define MT753X_PHY_ADDR(base, addr) \
Weijie Gao23f17162018-12-20 16:12:53 +080044 (((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47 (0x20000000 | \
48 GDM_ICS_EN | \
49 GDM_TCS_EN | \
50 GDM_UCS_EN | \
51 STRP_CRC | \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
55 (DP_PDMA << UN_DP_S))
56
Weijie Gao93eb7072023-07-19 17:17:41 +080057#define GDMA_BRIDGE_TO_CPU \
58 (0xC0000000 | \
59 GDM_ICS_EN | \
60 GDM_TCS_EN | \
61 GDM_UCS_EN | \
62 (DP_PDMA << MYMAC_DP_S) | \
63 (DP_PDMA << BC_DP_S) | \
64 (DP_PDMA << MC_DP_S) | \
65 (DP_PDMA << UN_DP_S))
66
Weijie Gao23f17162018-12-20 16:12:53 +080067#define GDMA_FWD_DISCARD \
68 (0x20000000 | \
69 GDM_ICS_EN | \
70 GDM_TCS_EN | \
71 GDM_UCS_EN | \
72 STRP_CRC | \
73 (DP_DISCARD << MYMAC_DP_S) | \
74 (DP_DISCARD << BC_DP_S) | \
75 (DP_DISCARD << MC_DP_S) | \
76 (DP_DISCARD << UN_DP_S))
77
Weijie Gao23f17162018-12-20 16:12:53 +080078enum mtk_switch {
79 SW_NONE,
Landen Chao532de8d2020-02-18 16:49:37 +080080 SW_MT7530,
Weijie Gao93eb7072023-07-19 17:17:41 +080081 SW_MT7531,
82 SW_MT7988,
Weijie Gao23f17162018-12-20 16:12:53 +080083};
84
Weijie Gao62596722022-09-09 19:59:21 +080085/* struct mtk_soc_data - This is the structure holding all differences
86 * among various plaforms
87 * @caps Flags shown the extra capability for the SoC
88 * @ana_rgc3: The offset for register ANA_RGC3 related to
89 * sgmiisys syscon
Weijie Gao76281942023-07-19 17:17:37 +080090 * @gdma_count: Number of GDMAs
Weijie Gaoe7ad0462022-09-09 19:59:26 +080091 * @pdma_base: Register base of PDMA block
92 * @txd_size: Tx DMA descriptor size.
93 * @rxd_size: Rx DMA descriptor size.
Weijie Gao62596722022-09-09 19:59:21 +080094 */
95struct mtk_soc_data {
96 u32 caps;
97 u32 ana_rgc3;
Weijie Gao76281942023-07-19 17:17:37 +080098 u32 gdma_count;
Weijie Gaoe7ad0462022-09-09 19:59:26 +080099 u32 pdma_base;
Weijie Gao7d928c32022-09-09 19:59:24 +0800100 u32 txd_size;
101 u32 rxd_size;
Weijie Gao23f17162018-12-20 16:12:53 +0800102};
103
104struct mtk_eth_priv {
105 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
106
Weijie Gao7d928c32022-09-09 19:59:24 +0800107 void *tx_ring_noc;
108 void *rx_ring_noc;
Weijie Gao23f17162018-12-20 16:12:53 +0800109
110 int rx_dma_owner_idx0;
111 int tx_cpu_owner_idx0;
112
113 void __iomem *fe_base;
114 void __iomem *gmac_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800115 void __iomem *sgmii_base;
Weijie Gao93eb7072023-07-19 17:17:41 +0800116 void __iomem *gsw_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800117
Weijie Gao86062e72022-05-20 11:23:37 +0800118 struct regmap *ethsys_regmap;
119
Weijie Gao585a1a42023-07-19 17:17:22 +0800120 struct regmap *infra_regmap;
121
Weijie Gaoba026eb2023-07-19 17:17:31 +0800122 struct regmap *usxgmii_regmap;
123 struct regmap *xfi_pextp_regmap;
124 struct regmap *xfi_pll_regmap;
125 struct regmap *toprgu_regmap;
126
Weijie Gao23f17162018-12-20 16:12:53 +0800127 struct mii_dev *mdio_bus;
128 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
129 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
130 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
131 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
132 u16 val);
133
Weijie Gao62596722022-09-09 19:59:21 +0800134 const struct mtk_soc_data *soc;
Weijie Gao23f17162018-12-20 16:12:53 +0800135 int gmac_id;
136 int force_mode;
137 int speed;
138 int duplex;
Weijie Gao29a48bf2022-09-09 19:59:28 +0800139 bool pn_swap;
Weijie Gao23f17162018-12-20 16:12:53 +0800140
141 struct phy_device *phydev;
142 int phy_interface;
143 int phy_addr;
144
145 enum mtk_switch sw;
146 int (*switch_init)(struct mtk_eth_priv *priv);
Weijie Gaoc73d3872023-07-19 17:16:54 +0800147 void (*switch_mac_control)(struct mtk_eth_priv *priv, bool enable);
Landen Chao532de8d2020-02-18 16:49:37 +0800148 u32 mt753x_smi_addr;
149 u32 mt753x_phy_base;
Weijie Gaoc73d3872023-07-19 17:16:54 +0800150 u32 mt753x_pmcr;
Weijie Gaoc41a0582023-07-19 17:16:59 +0800151 u32 mt753x_reset_wait_time;
Weijie Gao23f17162018-12-20 16:12:53 +0800152
153 struct gpio_desc rst_gpio;
154 int mcm;
155
156 struct reset_ctl rst_fe;
157 struct reset_ctl rst_mcm;
158};
159
160static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
161{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800162 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800163}
164
165static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
166 u32 set)
167{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800168 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
Weijie Gao23f17162018-12-20 16:12:53 +0800169}
170
171static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
172 u32 val)
173{
174 u32 gdma_base;
175
Weijie Gao76281942023-07-19 17:17:37 +0800176 if (no == 2)
177 gdma_base = GDMA3_BASE;
178 else if (no == 1)
Weijie Gao23f17162018-12-20 16:12:53 +0800179 gdma_base = GDMA2_BASE;
180 else
181 gdma_base = GDMA1_BASE;
182
183 writel(val, priv->fe_base + gdma_base + reg);
184}
185
Weijie Gao93eb7072023-07-19 17:17:41 +0800186static void mtk_fe_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
187{
188 clrsetbits_le32(priv->fe_base + reg, clr, set);
189}
190
Weijie Gao23f17162018-12-20 16:12:53 +0800191static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
192{
193 return readl(priv->gmac_base + reg);
194}
195
196static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
197{
198 writel(val, priv->gmac_base + reg);
199}
200
201static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
202{
203 clrsetbits_le32(priv->gmac_base + reg, clr, set);
204}
205
206static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
207 u32 set)
208{
Weijie Gao86062e72022-05-20 11:23:37 +0800209 uint val;
210
211 regmap_read(priv->ethsys_regmap, reg, &val);
212 val &= ~clr;
213 val |= set;
214 regmap_write(priv->ethsys_regmap, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800215}
216
Weijie Gao585a1a42023-07-19 17:17:22 +0800217static void mtk_infra_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
218 u32 set)
219{
220 uint val;
221
222 regmap_read(priv->infra_regmap, reg, &val);
223 val &= ~clr;
224 val |= set;
225 regmap_write(priv->infra_regmap, reg, val);
226}
227
Weijie Gao93eb7072023-07-19 17:17:41 +0800228static u32 mtk_gsw_read(struct mtk_eth_priv *priv, u32 reg)
229{
230 return readl(priv->gsw_base + reg);
231}
232
233static void mtk_gsw_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
234{
235 writel(val, priv->gsw_base + reg);
236}
237
Weijie Gao23f17162018-12-20 16:12:53 +0800238/* Direct MDIO clause 22/45 access via SoC */
239static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
240 u32 cmd, u32 st)
241{
242 int ret;
243 u32 val;
244
245 val = (st << MDIO_ST_S) |
246 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
247 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
248 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
249
Weijie Gaoc94ad002023-07-19 17:17:03 +0800250 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
Weijie Gao23f17162018-12-20 16:12:53 +0800251 val |= data & MDIO_RW_DATA_M;
252
253 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
254
255 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
256 PHY_ACS_ST, 0, 5000, 0);
257 if (ret) {
258 pr_warn("MDIO access timeout\n");
259 return ret;
260 }
261
Weijie Gaoc94ad002023-07-19 17:17:03 +0800262 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
Weijie Gao23f17162018-12-20 16:12:53 +0800263 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
264 return val & MDIO_RW_DATA_M;
265 }
266
267 return 0;
268}
269
270/* Direct MDIO clause 22 read via SoC */
271static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
272{
273 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
274}
275
276/* Direct MDIO clause 22 write via SoC */
277static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
278{
279 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
280}
281
282/* Direct MDIO clause 45 read via SoC */
283static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
284{
285 int ret;
286
287 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
288 if (ret)
289 return ret;
290
291 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
292 MDIO_ST_C45);
293}
294
295/* Direct MDIO clause 45 write via SoC */
296static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
297 u16 reg, u16 val)
298{
299 int ret;
300
301 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
302 if (ret)
303 return ret;
304
305 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
306 MDIO_ST_C45);
307}
308
309/* Indirect MDIO clause 45 read via MII registers */
310static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
311 u16 reg)
312{
313 int ret;
314
315 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
316 (MMD_ADDR << MMD_CMD_S) |
317 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
318 if (ret)
319 return ret;
320
321 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
322 if (ret)
323 return ret;
324
325 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
326 (MMD_DATA << MMD_CMD_S) |
327 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
328 if (ret)
329 return ret;
330
331 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
332}
333
334/* Indirect MDIO clause 45 write via MII registers */
335static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
336 u16 reg, u16 val)
337{
338 int ret;
339
340 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
341 (MMD_ADDR << MMD_CMD_S) |
342 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
343 if (ret)
344 return ret;
345
346 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
347 if (ret)
348 return ret;
349
350 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
351 (MMD_DATA << MMD_CMD_S) |
352 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
353 if (ret)
354 return ret;
355
356 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
357}
358
Landen Chao532de8d2020-02-18 16:49:37 +0800359/*
360 * MT7530 Internal Register Address Bits
361 * -------------------------------------------------------------------
362 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
363 * |----------------------------------------|---------------|--------|
364 * | Page Address | Reg Address | Unused |
365 * -------------------------------------------------------------------
366 */
367
368static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
369{
370 int ret, low_word, high_word;
371
Weijie Gao93eb7072023-07-19 17:17:41 +0800372 if (priv->sw == SW_MT7988) {
373 *data = mtk_gsw_read(priv, reg);
374 return 0;
375 }
376
Landen Chao532de8d2020-02-18 16:49:37 +0800377 /* Write page address */
378 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
379 if (ret)
380 return ret;
381
382 /* Read low word */
383 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
384 if (low_word < 0)
385 return low_word;
386
387 /* Read high word */
388 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
389 if (high_word < 0)
390 return high_word;
391
392 if (data)
393 *data = ((u32)high_word << 16) | (low_word & 0xffff);
394
395 return 0;
396}
397
398static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
399{
400 int ret;
401
Weijie Gao93eb7072023-07-19 17:17:41 +0800402 if (priv->sw == SW_MT7988) {
403 mtk_gsw_write(priv, reg, data);
404 return 0;
405 }
406
Landen Chao532de8d2020-02-18 16:49:37 +0800407 /* Write page address */
408 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
409 if (ret)
410 return ret;
411
412 /* Write low word */
413 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
414 data & 0xffff);
415 if (ret)
416 return ret;
417
418 /* Write high word */
419 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
420}
421
422static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
423 u32 set)
424{
425 u32 val;
426
427 mt753x_reg_read(priv, reg, &val);
428 val &= ~clr;
429 val |= set;
430 mt753x_reg_write(priv, reg, val);
431}
432
433/* Indirect MDIO clause 22/45 access */
434static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
435 u32 cmd, u32 st)
436{
437 ulong timeout;
438 u32 val, timeout_ms;
439 int ret = 0;
440
441 val = (st << MDIO_ST_S) |
442 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
443 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
444 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
445
446 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
447 val |= data & MDIO_RW_DATA_M;
448
449 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
450
451 timeout_ms = 100;
452 timeout = get_timer(0);
453 while (1) {
454 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
455
456 if ((val & PHY_ACS_ST) == 0)
457 break;
458
459 if (get_timer(timeout) > timeout_ms)
460 return -ETIMEDOUT;
461 }
462
463 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
464 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
465 ret = val & MDIO_RW_DATA_M;
466 }
467
468 return ret;
469}
470
471static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
472{
473 u8 phy_addr;
474
475 if (phy >= MT753X_NUM_PHYS)
476 return -EINVAL;
477
478 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
479
480 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
481 MDIO_ST_C22);
482}
483
484static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
485 u16 val)
486{
487 u8 phy_addr;
488
489 if (phy >= MT753X_NUM_PHYS)
490 return -EINVAL;
491
492 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
493
494 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
495 MDIO_ST_C22);
496}
497
Weijie Gao159458d2023-07-19 17:17:07 +0800498static int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
499 u16 reg)
Landen Chao532de8d2020-02-18 16:49:37 +0800500{
501 u8 phy_addr;
502 int ret;
503
504 if (addr >= MT753X_NUM_PHYS)
505 return -EINVAL;
506
507 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
508
509 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
510 MDIO_ST_C45);
511 if (ret)
512 return ret;
513
514 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
515 MDIO_ST_C45);
516}
517
518static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
519 u16 reg, u16 val)
520{
521 u8 phy_addr;
522 int ret;
523
524 if (addr >= MT753X_NUM_PHYS)
525 return 0;
526
527 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
528
529 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
530 MDIO_ST_C45);
531 if (ret)
532 return ret;
533
534 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
535 MDIO_ST_C45);
536}
537
Weijie Gao23f17162018-12-20 16:12:53 +0800538static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
539{
540 struct mtk_eth_priv *priv = bus->priv;
541
542 if (devad < 0)
543 return priv->mii_read(priv, addr, reg);
544 else
545 return priv->mmd_read(priv, addr, devad, reg);
546}
547
548static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
549 u16 val)
550{
551 struct mtk_eth_priv *priv = bus->priv;
552
553 if (devad < 0)
554 return priv->mii_write(priv, addr, reg, val);
555 else
556 return priv->mmd_write(priv, addr, devad, reg, val);
557}
558
559static int mtk_mdio_register(struct udevice *dev)
560{
561 struct mtk_eth_priv *priv = dev_get_priv(dev);
562 struct mii_dev *mdio_bus = mdio_alloc();
563 int ret;
564
565 if (!mdio_bus)
566 return -ENOMEM;
567
568 /* Assign MDIO access APIs according to the switch/phy */
569 switch (priv->sw) {
570 case SW_MT7530:
571 priv->mii_read = mtk_mii_read;
572 priv->mii_write = mtk_mii_write;
573 priv->mmd_read = mtk_mmd_ind_read;
574 priv->mmd_write = mtk_mmd_ind_write;
575 break;
Landen Chao532de8d2020-02-18 16:49:37 +0800576 case SW_MT7531:
Weijie Gao93eb7072023-07-19 17:17:41 +0800577 case SW_MT7988:
Landen Chao532de8d2020-02-18 16:49:37 +0800578 priv->mii_read = mt7531_mii_ind_read;
579 priv->mii_write = mt7531_mii_ind_write;
580 priv->mmd_read = mt7531_mmd_ind_read;
581 priv->mmd_write = mt7531_mmd_ind_write;
582 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800583 default:
584 priv->mii_read = mtk_mii_read;
585 priv->mii_write = mtk_mii_write;
586 priv->mmd_read = mtk_mmd_read;
587 priv->mmd_write = mtk_mmd_write;
588 }
589
590 mdio_bus->read = mtk_mdio_read;
591 mdio_bus->write = mtk_mdio_write;
592 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
593
594 mdio_bus->priv = (void *)priv;
595
596 ret = mdio_register(mdio_bus);
597
598 if (ret)
599 return ret;
600
601 priv->mdio_bus = mdio_bus;
602
603 return 0;
604}
605
Landen Chao532de8d2020-02-18 16:49:37 +0800606static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
Weijie Gao23f17162018-12-20 16:12:53 +0800607{
Landen Chao532de8d2020-02-18 16:49:37 +0800608 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800609
Landen Chao532de8d2020-02-18 16:49:37 +0800610 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800611}
612
Landen Chao532de8d2020-02-18 16:49:37 +0800613static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
Weijie Gao23f17162018-12-20 16:12:53 +0800614{
Landen Chao532de8d2020-02-18 16:49:37 +0800615 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800616
Landen Chao532de8d2020-02-18 16:49:37 +0800617 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800618}
619
620static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
621{
622 u32 ncpo1, ssc_delta;
623
624 switch (mode) {
625 case PHY_INTERFACE_MODE_RGMII:
626 ncpo1 = 0x0c80;
627 ssc_delta = 0x87;
628 break;
629 default:
630 printf("error: xMII mode %d not supported\n", mode);
631 return -EINVAL;
632 }
633
634 /* Disable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800635 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800636
637 /* Disable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800638 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800639 (2 << RG_GSWPLL_POSDIV_200M_S) |
640 (32 << RG_GSWPLL_FBKDIV_200M_S));
641
642 /* For MT7530 core clock = 500Mhz */
Landen Chao532de8d2020-02-18 16:49:37 +0800643 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800644 (1 << RG_GSWPLL_POSDIV_500M_S) |
645 (25 << RG_GSWPLL_FBKDIV_500M_S));
646
647 /* Enable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800648 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800649 (2 << RG_GSWPLL_POSDIV_200M_S) |
650 (32 << RG_GSWPLL_FBKDIV_200M_S) |
651 RG_GSWPLL_EN_PRE);
652
653 udelay(20);
654
Landen Chao532de8d2020-02-18 16:49:37 +0800655 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
Weijie Gao23f17162018-12-20 16:12:53 +0800656
657 /* Setup the MT7530 TRGMII Tx Clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800658 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
659 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
660 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
661 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
662 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
Weijie Gao23f17162018-12-20 16:12:53 +0800663 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
664
Landen Chao532de8d2020-02-18 16:49:37 +0800665 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800666 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
667 (1 << RG_SYSPLL_POSDIV_S));
668
Landen Chao532de8d2020-02-18 16:49:37 +0800669 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
Weijie Gao23f17162018-12-20 16:12:53 +0800670 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
671 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
672
673 /* Enable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800674 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
Weijie Gao23f17162018-12-20 16:12:53 +0800675 REG_GSWCK_EN | REG_TRGMIICK_EN);
676
677 return 0;
678}
679
Weijie Gaoc73d3872023-07-19 17:16:54 +0800680static void mt7530_mac_control(struct mtk_eth_priv *priv, bool enable)
681{
682 u32 pmcr = FORCE_MODE;
683
684 if (enable)
685 pmcr = priv->mt753x_pmcr;
686
687 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
688}
689
Weijie Gao23f17162018-12-20 16:12:53 +0800690static int mt7530_setup(struct mtk_eth_priv *priv)
691{
692 u16 phy_addr, phy_val;
Weijie Gaoad80d482022-05-20 11:23:42 +0800693 u32 val, txdrv;
Weijie Gao23f17162018-12-20 16:12:53 +0800694 int i;
695
Weijie Gao62596722022-09-09 19:59:21 +0800696 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gaoad80d482022-05-20 11:23:42 +0800697 /* Select 250MHz clk for RGMII mode */
698 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
699 ETHSYS_TRGMII_CLK_SEL362_5, 0);
700
701 txdrv = 8;
702 } else {
703 txdrv = 4;
704 }
Weijie Gao23f17162018-12-20 16:12:53 +0800705
Landen Chao532de8d2020-02-18 16:49:37 +0800706 /* Modify HWTRAP first to allow direct access to internal PHYs */
707 mt753x_reg_read(priv, HWTRAP_REG, &val);
708 val |= CHG_TRAP;
709 val &= ~C_MDIO_BPS;
710 mt753x_reg_write(priv, MHWTRAP_REG, val);
711
712 /* Calculate the phy base address */
713 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
714 priv->mt753x_phy_base = (val | 0x7) + 1;
715
716 /* Turn off PHYs */
717 for (i = 0; i < MT753X_NUM_PHYS; i++) {
718 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
719 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
720 phy_val |= BMCR_PDOWN;
721 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
722 }
723
724 /* Force MAC link down before reset */
725 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
726 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
727
728 /* MT7530 reset */
729 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
730 udelay(100);
731
732 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
733 MAC_MODE | FORCE_MODE |
734 MAC_TX_EN | MAC_RX_EN |
735 BKOFF_EN | BACKPR_EN |
736 (SPEED_1000M << FORCE_SPD_S) |
737 FORCE_DPX | FORCE_LINK;
738
739 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
Weijie Gaoc73d3872023-07-19 17:16:54 +0800740 priv->mt753x_pmcr = val;
Landen Chao532de8d2020-02-18 16:49:37 +0800741
742 /* MT7530 Port5: Forced link down */
743 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
744
Weijie Gaoc73d3872023-07-19 17:16:54 +0800745 /* Keep MAC link down before starting eth */
746 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
747
Landen Chao532de8d2020-02-18 16:49:37 +0800748 /* MT7530 Port6: Set to RGMII */
749 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
750
751 /* Hardware Trap: Enable Port6, Disable Port5 */
752 mt753x_reg_read(priv, HWTRAP_REG, &val);
753 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
754 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
755 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
756 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
757 mt753x_reg_write(priv, MHWTRAP_REG, val);
758
759 /* Setup switch core pll */
760 mt7530_pad_clk_setup(priv, priv->phy_interface);
761
762 /* Lower Tx Driving for TRGMII path */
763 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
764 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
Weijie Gaoad80d482022-05-20 11:23:42 +0800765 (txdrv << TD_DM_DRVP_S) |
766 (txdrv << TD_DM_DRVN_S));
Landen Chao532de8d2020-02-18 16:49:37 +0800767
768 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
769 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
770
771 /* Turn on PHYs */
772 for (i = 0; i < MT753X_NUM_PHYS; i++) {
773 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
774 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
775 phy_val &= ~BMCR_PDOWN;
776 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
777 }
778
779 return 0;
780}
781
782static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
783{
784 /* Step 1 : Disable MT7531 COREPLL */
785 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
786
787 /* Step 2: switch to XTAL output */
788 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
789
790 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
791
792 /* Step 3: disable PLLGP and enable program PLLGP */
793 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
794
795 /* Step 4: program COREPLL output frequency to 500MHz */
796 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
797 2 << RG_COREPLL_POSDIV_S);
798 udelay(25);
799
800 /* Currently, support XTAL 25Mhz only */
801 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
802 0x140000 << RG_COREPLL_SDM_PCW_S);
803
804 /* Set feedback divide ratio update signal to high */
805 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
806 RG_COREPLL_SDM_PCW_CHG);
807
808 /* Wait for at least 16 XTAL clocks */
809 udelay(10);
810
811 /* Step 5: set feedback divide ratio update signal to low */
812 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
813
814 /* add enable 325M clock for SGMII */
815 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
816
817 /* add enable 250SSC clock for RGMII */
818 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
819
820 /*Step 6: Enable MT7531 PLL */
821 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
822
823 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
824
825 udelay(25);
826}
827
828static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
829 u32 port)
830{
831 if (port != 5 && port != 6) {
832 printf("mt7531: port %d is not a SGMII port\n", port);
833 return -EINVAL;
834 }
835
836 /* Set SGMII GEN2 speed(2.5G) */
837 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
838 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
839
840 /* Disable SGMII AN */
841 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
842 SGMII_AN_ENABLE, 0);
843
844 /* SGMII force mode setting */
845 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
846
847 /* Release PHYA power down state */
848 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
849 SGMII_PHYA_PWD, 0);
850
851 return 0;
852}
853
854static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
855{
856 u32 val;
857
858 if (port != 5) {
859 printf("error: RGMII mode is not available for port %d\n",
860 port);
861 return -EINVAL;
862 }
863
864 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
865 val |= GP_CLK_EN;
866 val &= ~GP_MODE_M;
867 val |= GP_MODE_RGMII << GP_MODE_S;
868 val |= TXCLK_NO_REVERSE;
869 val |= RXCLK_NO_DELAY;
870 val &= ~CLK_SKEW_IN_M;
871 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
872 val &= ~CLK_SKEW_OUT_M;
873 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
874 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
875
876 return 0;
877}
878
879static void mt7531_phy_setting(struct mtk_eth_priv *priv)
880{
881 int i;
882 u32 val;
883
884 for (i = 0; i < MT753X_NUM_PHYS; i++) {
885 /* Enable HW auto downshift */
886 priv->mii_write(priv, i, 0x1f, 0x1);
887 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
888 val |= PHY_EN_DOWN_SHFIT;
889 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
890
891 /* PHY link down power saving enable */
892 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
893 val |= PHY_LINKDOWN_POWER_SAVING_EN;
894 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
895
896 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
897 val &= ~PHY_POWER_SAVING_M;
898 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
899 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
900 }
901}
902
Weijie Gaoc73d3872023-07-19 17:16:54 +0800903static void mt7531_mac_control(struct mtk_eth_priv *priv, bool enable)
904{
905 u32 pmcr = FORCE_MODE_LNK;
906
907 if (enable)
908 pmcr = priv->mt753x_pmcr;
909
910 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
911 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
912}
913
Landen Chao532de8d2020-02-18 16:49:37 +0800914static int mt7531_setup(struct mtk_eth_priv *priv)
915{
916 u16 phy_addr, phy_val;
917 u32 val;
918 u32 pmcr;
919 u32 port5_sgmii;
920 int i;
921
922 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
923 MT753X_SMI_ADDR_MASK;
924
925 /* Turn off PHYs */
926 for (i = 0; i < MT753X_NUM_PHYS; i++) {
927 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
928 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
929 phy_val |= BMCR_PDOWN;
930 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
931 }
932
933 /* Force MAC link down before reset */
934 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
935 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
936
937 /* Switch soft reset */
938 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
939 udelay(100);
940
941 /* Enable MDC input Schmitt Trigger */
942 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
943 SMT_IOLB_5_SMI_MDC_EN);
944
945 mt7531_core_pll_setup(priv, priv->mcm);
946
947 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
948 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
949
950 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
951 switch (priv->phy_interface) {
952 case PHY_INTERFACE_MODE_RGMII:
953 if (!port5_sgmii)
954 mt7531_port_rgmii_init(priv, 5);
955 break;
Weijie Gaobd70f3c2023-07-19 17:17:13 +0800956 case PHY_INTERFACE_MODE_2500BASEX:
Landen Chao532de8d2020-02-18 16:49:37 +0800957 mt7531_port_sgmii_init(priv, 6);
958 if (port5_sgmii)
959 mt7531_port_sgmii_init(priv, 5);
960 break;
961 default:
962 break;
963 }
964
965 pmcr = MT7531_FORCE_MODE |
966 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
967 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
968 BKOFF_EN | BACKPR_EN |
969 FORCE_RX_FC | FORCE_TX_FC |
970 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
971 FORCE_LINK;
972
Weijie Gaoc73d3872023-07-19 17:16:54 +0800973 priv->mt753x_pmcr = pmcr;
974
975 /* Keep MAC link down before starting eth */
976 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
977 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
Landen Chao532de8d2020-02-18 16:49:37 +0800978
979 /* Turn on PHYs */
980 for (i = 0; i < MT753X_NUM_PHYS; i++) {
981 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
982 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
983 phy_val &= ~BMCR_PDOWN;
984 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
985 }
986
987 mt7531_phy_setting(priv);
988
989 /* Enable Internal PHYs */
990 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
991 val |= MT7531_BYPASS_MODE;
992 val &= ~MT7531_POWER_ON_OFF;
993 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
994
995 return 0;
996}
997
Weijie Gao93eb7072023-07-19 17:17:41 +0800998static void mt7988_phy_setting(struct mtk_eth_priv *priv)
999{
1000 u16 val;
1001 u32 i;
1002
1003 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1004 /* Enable HW auto downshift */
1005 priv->mii_write(priv, i, 0x1f, 0x1);
1006 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
1007 val |= PHY_EN_DOWN_SHFIT;
1008 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
1009
1010 /* PHY link down power saving enable */
1011 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
1012 val |= PHY_LINKDOWN_POWER_SAVING_EN;
1013 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
1014 }
1015}
1016
1017static void mt7988_mac_control(struct mtk_eth_priv *priv, bool enable)
1018{
1019 u32 pmcr = FORCE_MODE_LNK;
1020
1021 if (enable)
1022 pmcr = priv->mt753x_pmcr;
1023
1024 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
1025}
1026
1027static int mt7988_setup(struct mtk_eth_priv *priv)
1028{
1029 u16 phy_addr, phy_val;
1030 u32 pmcr;
1031 int i;
1032
1033 priv->gsw_base = regmap_get_range(priv->ethsys_regmap, 0) + GSW_BASE;
1034
1035 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
1036 MT753X_SMI_ADDR_MASK;
1037
1038 /* Turn off PHYs */
1039 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1040 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1041 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1042 phy_val |= BMCR_PDOWN;
1043 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1044 }
1045
1046 switch (priv->phy_interface) {
1047 case PHY_INTERFACE_MODE_USXGMII:
1048 /* Use CPU bridge instead of actual USXGMII path */
1049
1050 /* Set GDM1 no drop */
1051 mtk_fe_rmw(priv, PSE_NO_DROP_CFG_REG, 0, PSE_NO_DROP_GDM1);
1052
1053 /* Enable GDM1 to GSW CPU bridge */
1054 mtk_gmac_rmw(priv, GMAC_MAC_MISC_REG, 0, BIT(0));
1055
1056 /* XGMAC force link up */
1057 mtk_gmac_rmw(priv, GMAC_XGMAC_STS_REG, 0, P1_XGMAC_FORCE_LINK);
1058
1059 /* Setup GSW CPU bridge IPG */
1060 mtk_gmac_rmw(priv, GMAC_GSW_CFG_REG, GSWTX_IPG_M | GSWRX_IPG_M,
1061 (0xB << GSWTX_IPG_S) | (0xB << GSWRX_IPG_S));
1062 break;
1063 default:
1064 printf("Error: MT7988 GSW does not support %s interface\n",
1065 phy_string_for_interface(priv->phy_interface));
1066 break;
1067 }
1068
1069 pmcr = MT7988_FORCE_MODE |
1070 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
1071 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
1072 BKOFF_EN | BACKPR_EN |
1073 FORCE_RX_FC | FORCE_TX_FC |
1074 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
1075 FORCE_LINK;
1076
1077 priv->mt753x_pmcr = pmcr;
1078
1079 /* Keep MAC link down before starting eth */
1080 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
1081
1082 /* Turn on PHYs */
1083 for (i = 0; i < MT753X_NUM_PHYS; i++) {
1084 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
1085 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
1086 phy_val &= ~BMCR_PDOWN;
1087 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
1088 }
1089
1090 mt7988_phy_setting(priv);
1091
1092 return 0;
1093}
1094
Weijie Gao159458d2023-07-19 17:17:07 +08001095static int mt753x_switch_init(struct mtk_eth_priv *priv)
Landen Chao532de8d2020-02-18 16:49:37 +08001096{
1097 int ret;
1098 int i;
1099
Weijie Gao23f17162018-12-20 16:12:53 +08001100 /* Global reset switch */
1101 if (priv->mcm) {
1102 reset_assert(&priv->rst_mcm);
1103 udelay(1000);
1104 reset_deassert(&priv->rst_mcm);
Weijie Gaoc41a0582023-07-19 17:16:59 +08001105 mdelay(priv->mt753x_reset_wait_time);
Weijie Gao23f17162018-12-20 16:12:53 +08001106 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
1107 dm_gpio_set_value(&priv->rst_gpio, 0);
1108 udelay(1000);
1109 dm_gpio_set_value(&priv->rst_gpio, 1);
Weijie Gaoc41a0582023-07-19 17:16:59 +08001110 mdelay(priv->mt753x_reset_wait_time);
Weijie Gao23f17162018-12-20 16:12:53 +08001111 }
1112
Landen Chao532de8d2020-02-18 16:49:37 +08001113 ret = priv->switch_init(priv);
1114 if (ret)
1115 return ret;
Weijie Gao23f17162018-12-20 16:12:53 +08001116
1117 /* Set port isolation */
Landen Chao532de8d2020-02-18 16:49:37 +08001118 for (i = 0; i < MT753X_NUM_PORTS; i++) {
Weijie Gao23f17162018-12-20 16:12:53 +08001119 /* Set port matrix mode */
1120 if (i != 6)
Landen Chao532de8d2020-02-18 16:49:37 +08001121 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +08001122 (0x40 << PORT_MATRIX_S));
1123 else
Landen Chao532de8d2020-02-18 16:49:37 +08001124 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +08001125 (0x3f << PORT_MATRIX_S));
1126
1127 /* Set port mode to user port */
Landen Chao532de8d2020-02-18 16:49:37 +08001128 mt753x_reg_write(priv, PVC_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +08001129 (0x8100 << STAG_VPID_S) |
1130 (VLAN_ATTR_USER << VLAN_ATTR_S));
1131 }
1132
1133 return 0;
1134}
1135
Weijie Gaoba026eb2023-07-19 17:17:31 +08001136static void mtk_xphy_link_adjust(struct mtk_eth_priv *priv)
1137{
1138 u16 lcl_adv = 0, rmt_adv = 0;
1139 u8 flowctrl;
1140 u32 mcr;
1141
1142 mcr = mtk_gmac_read(priv, XGMAC_PORT_MCR(priv->gmac_id));
1143 mcr &= ~(XGMAC_FORCE_TX_FC | XGMAC_FORCE_RX_FC);
1144
1145 if (priv->phydev->duplex) {
1146 if (priv->phydev->pause)
1147 rmt_adv = LPA_PAUSE_CAP;
1148 if (priv->phydev->asym_pause)
1149 rmt_adv |= LPA_PAUSE_ASYM;
1150
1151 if (priv->phydev->advertising & ADVERTISED_Pause)
1152 lcl_adv |= ADVERTISE_PAUSE_CAP;
1153 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1154 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1155
1156 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1157
1158 if (flowctrl & FLOW_CTRL_TX)
1159 mcr |= XGMAC_FORCE_TX_FC;
1160 if (flowctrl & FLOW_CTRL_RX)
1161 mcr |= XGMAC_FORCE_RX_FC;
1162
1163 debug("rx pause %s, tx pause %s\n",
1164 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1165 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1166 }
1167
1168 mcr &= ~(XGMAC_TRX_DISABLE);
1169 mtk_gmac_write(priv, XGMAC_PORT_MCR(priv->gmac_id), mcr);
1170}
1171
Weijie Gao23f17162018-12-20 16:12:53 +08001172static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1173{
1174 u16 lcl_adv = 0, rmt_adv = 0;
1175 u8 flowctrl;
1176 u32 mcr;
1177
Landen Chao532de8d2020-02-18 16:49:37 +08001178 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001179 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1180 MAC_MODE | FORCE_MODE |
1181 MAC_TX_EN | MAC_RX_EN |
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001182 DEL_RXFIFO_CLR |
Weijie Gao23f17162018-12-20 16:12:53 +08001183 BKOFF_EN | BACKPR_EN;
1184
1185 switch (priv->phydev->speed) {
1186 case SPEED_10:
1187 mcr |= (SPEED_10M << FORCE_SPD_S);
1188 break;
1189 case SPEED_100:
1190 mcr |= (SPEED_100M << FORCE_SPD_S);
1191 break;
1192 case SPEED_1000:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001193 case SPEED_2500:
Weijie Gao23f17162018-12-20 16:12:53 +08001194 mcr |= (SPEED_1000M << FORCE_SPD_S);
1195 break;
1196 };
1197
1198 if (priv->phydev->link)
1199 mcr |= FORCE_LINK;
1200
1201 if (priv->phydev->duplex) {
1202 mcr |= FORCE_DPX;
1203
1204 if (priv->phydev->pause)
1205 rmt_adv = LPA_PAUSE_CAP;
1206 if (priv->phydev->asym_pause)
1207 rmt_adv |= LPA_PAUSE_ASYM;
1208
1209 if (priv->phydev->advertising & ADVERTISED_Pause)
1210 lcl_adv |= ADVERTISE_PAUSE_CAP;
1211 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1212 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1213
1214 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1215
1216 if (flowctrl & FLOW_CTRL_TX)
1217 mcr |= FORCE_TX_FC;
1218 if (flowctrl & FLOW_CTRL_RX)
1219 mcr |= FORCE_RX_FC;
1220
1221 debug("rx pause %s, tx pause %s\n",
1222 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1223 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1224 }
1225
1226 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1227}
1228
1229static int mtk_phy_start(struct mtk_eth_priv *priv)
1230{
1231 struct phy_device *phydev = priv->phydev;
1232 int ret;
1233
1234 ret = phy_startup(phydev);
1235
1236 if (ret) {
1237 debug("Could not initialize PHY %s\n", phydev->dev->name);
1238 return ret;
1239 }
1240
1241 if (!phydev->link) {
1242 debug("%s: link down.\n", phydev->dev->name);
1243 return 0;
1244 }
1245
Weijie Gaoba026eb2023-07-19 17:17:31 +08001246 if (!priv->force_mode) {
1247 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII)
1248 mtk_xphy_link_adjust(priv);
1249 else
1250 mtk_phy_link_adjust(priv);
1251 }
Weijie Gao23f17162018-12-20 16:12:53 +08001252
1253 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1254 (phydev->duplex) ? "full" : "half",
1255 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1256
1257 return 0;
1258}
1259
1260static int mtk_phy_probe(struct udevice *dev)
1261{
1262 struct mtk_eth_priv *priv = dev_get_priv(dev);
1263 struct phy_device *phydev;
1264
1265 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1266 priv->phy_interface);
1267 if (!phydev)
1268 return -ENODEV;
1269
1270 phydev->supported &= PHY_GBIT_FEATURES;
1271 phydev->advertising = phydev->supported;
1272
1273 priv->phydev = phydev;
1274 phy_config(phydev);
1275
1276 return 0;
1277}
1278
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001279static void mtk_sgmii_an_init(struct mtk_eth_priv *priv)
1280{
1281 /* Set SGMII GEN1 speed(1G) */
1282 clrsetbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1283 SGMSYS_SPEED_2500, 0);
1284
1285 /* Enable SGMII AN */
1286 setbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1287 SGMII_AN_ENABLE);
1288
1289 /* SGMII AN mode setting */
1290 writel(SGMII_AN_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1291
1292 /* SGMII PN SWAP setting */
1293 if (priv->pn_swap) {
1294 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1295 SGMII_PN_SWAP_TX_RX);
1296 }
1297
1298 /* Release PHYA power down state */
1299 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1300 SGMII_PHYA_PWD, 0);
1301}
1302
1303static void mtk_sgmii_force_init(struct mtk_eth_priv *priv)
MarkLeeb4ef49a2020-01-21 19:31:57 +08001304{
1305 /* Set SGMII GEN2 speed(2.5G) */
Weijie Gao62596722022-09-09 19:59:21 +08001306 setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1307 SGMSYS_SPEED_2500);
MarkLeeb4ef49a2020-01-21 19:31:57 +08001308
1309 /* Disable SGMII AN */
1310 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1311 SGMII_AN_ENABLE, 0);
1312
1313 /* SGMII force mode setting */
1314 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1315
Weijie Gao29a48bf2022-09-09 19:59:28 +08001316 /* SGMII PN SWAP setting */
1317 if (priv->pn_swap) {
1318 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1319 SGMII_PN_SWAP_TX_RX);
1320 }
1321
MarkLeeb4ef49a2020-01-21 19:31:57 +08001322 /* Release PHYA power down state */
1323 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1324 SGMII_PHYA_PWD, 0);
1325}
1326
Weijie Gaoba026eb2023-07-19 17:17:31 +08001327static void mtk_xfi_pll_enable(struct mtk_eth_priv *priv)
1328{
1329 u32 val = 0;
1330
1331 /* Add software workaround for USXGMII PLL TCL issue */
1332 regmap_write(priv->xfi_pll_regmap, XFI_PLL_ANA_GLB8,
1333 RG_XFI_PLL_ANA_SWWA);
1334
1335 regmap_read(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, &val);
1336 val |= RG_XFI_PLL_EN;
1337 regmap_write(priv->xfi_pll_regmap, XFI_PLL_DIG_GLB8, val);
1338}
1339
1340static void mtk_usxgmii_reset(struct mtk_eth_priv *priv)
1341{
1342 switch (priv->gmac_id) {
1343 case 1:
1344 regmap_write(priv->toprgu_regmap, 0xFC, 0x0000A004);
1345 regmap_write(priv->toprgu_regmap, 0x18, 0x88F0A004);
1346 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1347 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1348 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1349 break;
1350 case 2:
1351 regmap_write(priv->toprgu_regmap, 0xFC, 0x00005002);
1352 regmap_write(priv->toprgu_regmap, 0x18, 0x88F05002);
1353 regmap_write(priv->toprgu_regmap, 0xFC, 0x00000000);
1354 regmap_write(priv->toprgu_regmap, 0x18, 0x88F00000);
1355 regmap_write(priv->toprgu_regmap, 0x18, 0x00F00000);
1356 break;
1357 }
1358
1359 mdelay(10);
1360}
1361
1362static void mtk_usxgmii_setup_phya_an_10000(struct mtk_eth_priv *priv)
1363{
1364 regmap_write(priv->usxgmii_regmap, 0x810, 0x000FFE6D);
1365 regmap_write(priv->usxgmii_regmap, 0x818, 0x07B1EC7B);
1366 regmap_write(priv->usxgmii_regmap, 0x80C, 0x30000000);
1367 ndelay(1020);
1368 regmap_write(priv->usxgmii_regmap, 0x80C, 0x10000000);
1369 ndelay(1020);
1370 regmap_write(priv->usxgmii_regmap, 0x80C, 0x00000000);
1371
1372 regmap_write(priv->xfi_pextp_regmap, 0x9024, 0x00C9071C);
1373 regmap_write(priv->xfi_pextp_regmap, 0x2020, 0xAA8585AA);
1374 regmap_write(priv->xfi_pextp_regmap, 0x2030, 0x0C020707);
1375 regmap_write(priv->xfi_pextp_regmap, 0x2034, 0x0E050F0F);
1376 regmap_write(priv->xfi_pextp_regmap, 0x2040, 0x00140032);
1377 regmap_write(priv->xfi_pextp_regmap, 0x50F0, 0x00C014AA);
1378 regmap_write(priv->xfi_pextp_regmap, 0x50E0, 0x3777C12B);
1379 regmap_write(priv->xfi_pextp_regmap, 0x506C, 0x005F9CFF);
1380 regmap_write(priv->xfi_pextp_regmap, 0x5070, 0x9D9DFAFA);
1381 regmap_write(priv->xfi_pextp_regmap, 0x5074, 0x27273F3F);
1382 regmap_write(priv->xfi_pextp_regmap, 0x5078, 0xA7883C68);
1383 regmap_write(priv->xfi_pextp_regmap, 0x507C, 0x11661166);
1384 regmap_write(priv->xfi_pextp_regmap, 0x5080, 0x0E000AAF);
1385 regmap_write(priv->xfi_pextp_regmap, 0x5084, 0x08080D0D);
1386 regmap_write(priv->xfi_pextp_regmap, 0x5088, 0x02030909);
1387 regmap_write(priv->xfi_pextp_regmap, 0x50E4, 0x0C0C0000);
1388 regmap_write(priv->xfi_pextp_regmap, 0x50E8, 0x04040000);
1389 regmap_write(priv->xfi_pextp_regmap, 0x50EC, 0x0F0F0C06);
1390 regmap_write(priv->xfi_pextp_regmap, 0x50A8, 0x506E8C8C);
1391 regmap_write(priv->xfi_pextp_regmap, 0x6004, 0x18190000);
1392 regmap_write(priv->xfi_pextp_regmap, 0x00F8, 0x01423342);
1393 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F20);
1394 regmap_write(priv->xfi_pextp_regmap, 0x0030, 0x00050C00);
1395 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x02002800);
1396 ndelay(1020);
1397 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000020);
1398 regmap_write(priv->xfi_pextp_regmap, 0x3028, 0x00008A01);
1399 regmap_write(priv->xfi_pextp_regmap, 0x302C, 0x0000A884);
1400 regmap_write(priv->xfi_pextp_regmap, 0x3024, 0x00083002);
1401 regmap_write(priv->xfi_pextp_regmap, 0x3010, 0x00022220);
1402 regmap_write(priv->xfi_pextp_regmap, 0x5064, 0x0F020A01);
1403 regmap_write(priv->xfi_pextp_regmap, 0x50B4, 0x06100600);
1404 regmap_write(priv->xfi_pextp_regmap, 0x3048, 0x40704000);
1405 regmap_write(priv->xfi_pextp_regmap, 0x3050, 0xA8000000);
1406 regmap_write(priv->xfi_pextp_regmap, 0x3054, 0x000000AA);
1407 regmap_write(priv->xfi_pextp_regmap, 0x306C, 0x00000F00);
1408 regmap_write(priv->xfi_pextp_regmap, 0xA060, 0x00040000);
1409 regmap_write(priv->xfi_pextp_regmap, 0x90D0, 0x00000001);
1410 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200E800);
1411 udelay(150);
1412 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C111);
1413 ndelay(1020);
1414 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0200C101);
1415 udelay(15);
1416 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C111);
1417 ndelay(1020);
1418 regmap_write(priv->xfi_pextp_regmap, 0x0070, 0x0202C101);
1419 udelay(100);
1420 regmap_write(priv->xfi_pextp_regmap, 0x30B0, 0x00000030);
1421 regmap_write(priv->xfi_pextp_regmap, 0x00F4, 0x80201F00);
1422 regmap_write(priv->xfi_pextp_regmap, 0x3040, 0x30000000);
1423 udelay(400);
1424}
1425
1426static void mtk_usxgmii_an_init(struct mtk_eth_priv *priv)
1427{
1428 mtk_xfi_pll_enable(priv);
1429 mtk_usxgmii_reset(priv);
1430 mtk_usxgmii_setup_phya_an_10000(priv);
1431}
1432
Weijie Gao23f17162018-12-20 16:12:53 +08001433static void mtk_mac_init(struct mtk_eth_priv *priv)
1434{
1435 int i, ge_mode = 0;
1436 u32 mcr;
1437
1438 switch (priv->phy_interface) {
1439 case PHY_INTERFACE_MODE_RGMII_RXID:
1440 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +08001441 ge_mode = GE_MODE_RGMII;
1442 break;
Weijie Gao23f17162018-12-20 16:12:53 +08001443 case PHY_INTERFACE_MODE_SGMII:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001444 case PHY_INTERFACE_MODE_2500BASEX:
Weijie Gao585a1a42023-07-19 17:17:22 +08001445 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC2_U3_QPHY)) {
1446 mtk_infra_rmw(priv, USB_PHY_SWITCH_REG, QPHY_SEL_MASK,
1447 SGMII_QPHY_SEL);
1448 }
1449
Weijie Gao23f17162018-12-20 16:12:53 +08001450 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +08001451 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1452 SYSCFG0_SGMII_SEL(priv->gmac_id));
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001453 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
1454 mtk_sgmii_an_init(priv);
1455 else
1456 mtk_sgmii_force_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001457 break;
1458 case PHY_INTERFACE_MODE_MII:
1459 case PHY_INTERFACE_MODE_GMII:
1460 ge_mode = GE_MODE_MII;
1461 break;
1462 case PHY_INTERFACE_MODE_RMII:
1463 ge_mode = GE_MODE_RMII;
1464 break;
1465 default:
1466 break;
1467 }
1468
1469 /* set the gmac to the right mode */
1470 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1471 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1472 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1473
1474 if (priv->force_mode) {
Landen Chao532de8d2020-02-18 16:49:37 +08001475 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001476 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1477 MAC_MODE | FORCE_MODE |
1478 MAC_TX_EN | MAC_RX_EN |
1479 BKOFF_EN | BACKPR_EN |
1480 FORCE_LINK;
1481
1482 switch (priv->speed) {
1483 case SPEED_10:
1484 mcr |= SPEED_10M << FORCE_SPD_S;
1485 break;
1486 case SPEED_100:
1487 mcr |= SPEED_100M << FORCE_SPD_S;
1488 break;
1489 case SPEED_1000:
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001490 case SPEED_2500:
Weijie Gao23f17162018-12-20 16:12:53 +08001491 mcr |= SPEED_1000M << FORCE_SPD_S;
1492 break;
1493 }
1494
1495 if (priv->duplex)
1496 mcr |= FORCE_DPX;
1497
1498 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1499 }
1500
Weijie Gao62596722022-09-09 19:59:21 +08001501 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1502 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001503 /* Lower Tx Driving for TRGMII path */
1504 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1505 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1506 (8 << TD_DM_DRVP_S) |
1507 (8 << TD_DM_DRVN_S));
1508
1509 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1510 RX_RST | RXC_DQSISEL);
1511 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1512 }
1513}
1514
Weijie Gaoba026eb2023-07-19 17:17:31 +08001515static void mtk_xmac_init(struct mtk_eth_priv *priv)
1516{
1517 u32 sts;
1518
1519 switch (priv->phy_interface) {
1520 case PHY_INTERFACE_MODE_USXGMII:
1521 mtk_usxgmii_an_init(priv);
1522 break;
1523 default:
1524 break;
1525 }
1526
1527 /* Set GMAC to the correct mode */
1528 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1529 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1530 0);
1531
1532 if (priv->gmac_id == 1) {
1533 mtk_infra_rmw(priv, TOPMISC_NETSYS_PCS_MUX,
1534 NETSYS_PCS_MUX_MASK, MUX_G2_USXGMII_SEL);
1535 } else if (priv->gmac_id == 2) {
1536 sts = mtk_gmac_read(priv, XGMAC_STS(priv->gmac_id));
1537 sts |= XGMAC_FORCE_LINK;
1538 mtk_gmac_write(priv, XGMAC_STS(priv->gmac_id), sts);
1539 }
1540
1541 /* Force GMAC link down */
1542 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), FORCE_MODE);
1543}
1544
Weijie Gao23f17162018-12-20 16:12:53 +08001545static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1546{
1547 char *pkt_base = priv->pkt_pool;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001548 struct mtk_tx_dma_v2 *txd;
1549 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001550 int i;
1551
1552 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1553 udelay(500);
1554
Weijie Gao7d928c32022-09-09 19:59:24 +08001555 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1556 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1557 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
Weijie Gao23f17162018-12-20 16:12:53 +08001558
Frank Wunderlich47b14312020-01-31 10:23:29 +01001559 flush_dcache_range((ulong)pkt_base,
1560 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +08001561
1562 priv->rx_dma_owner_idx0 = 0;
1563 priv->tx_cpu_owner_idx0 = 0;
1564
1565 for (i = 0; i < NUM_TX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001566 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
Weijie Gao23f17162018-12-20 16:12:53 +08001567
Weijie Gao7d928c32022-09-09 19:59:24 +08001568 txd->txd1 = virt_to_phys(pkt_base);
1569 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001570
Weijie Gao76281942023-07-19 17:17:37 +08001571 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
1572 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id == 2 ?
1573 15 : priv->gmac_id + 1);
1574 else if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001575 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1576 else
1577 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
Weijie Gao7d928c32022-09-09 19:59:24 +08001578
Weijie Gao23f17162018-12-20 16:12:53 +08001579 pkt_base += PKTSIZE_ALIGN;
1580 }
1581
1582 for (i = 0; i < NUM_RX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001583 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1584
1585 rxd->rxd1 = virt_to_phys(pkt_base);
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001586
Weijie Gao76281942023-07-19 17:17:37 +08001587 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1588 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001589 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1590 else
1591 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001592
Weijie Gao23f17162018-12-20 16:12:53 +08001593 pkt_base += PKTSIZE_ALIGN;
1594 }
1595
1596 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1597 virt_to_phys(priv->tx_ring_noc));
1598 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1599 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1600
1601 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1602 virt_to_phys(priv->rx_ring_noc));
1603 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1604 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1605
1606 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1607}
1608
1609static int mtk_eth_start(struct udevice *dev)
1610{
1611 struct mtk_eth_priv *priv = dev_get_priv(dev);
Weijie Gao76281942023-07-19 17:17:37 +08001612 int i, ret;
Weijie Gao23f17162018-12-20 16:12:53 +08001613
1614 /* Reset FE */
1615 reset_assert(&priv->rst_fe);
1616 udelay(1000);
1617 reset_deassert(&priv->rst_fe);
1618 mdelay(10);
1619
Weijie Gao76281942023-07-19 17:17:37 +08001620 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1621 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001622 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1623
Weijie Gao23f17162018-12-20 16:12:53 +08001624 /* Packets forward to PDMA */
1625 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1626
Weijie Gao76281942023-07-19 17:17:37 +08001627 for (i = 0; i < priv->soc->gdma_count; i++) {
1628 if (i == priv->gmac_id)
1629 continue;
1630
1631 mtk_gdma_write(priv, i, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1632 }
1633
1634 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3)) {
Weijie Gao93eb7072023-07-19 17:17:41 +08001635 if (priv->sw == SW_MT7988 && priv->gmac_id == 0) {
1636 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG,
1637 GDMA_BRIDGE_TO_CPU);
1638 }
1639
Weijie Gao76281942023-07-19 17:17:37 +08001640 mtk_gdma_write(priv, priv->gmac_id, GDMA_EG_CTRL_REG,
1641 GDMA_CPU_BRIDGE_EN);
1642 }
Weijie Gao23f17162018-12-20 16:12:53 +08001643
1644 udelay(500);
1645
1646 mtk_eth_fifo_init(priv);
1647
Weijie Gaoc73d3872023-07-19 17:16:54 +08001648 if (priv->switch_mac_control)
1649 priv->switch_mac_control(priv, true);
1650
Weijie Gao23f17162018-12-20 16:12:53 +08001651 /* Start PHY */
1652 if (priv->sw == SW_NONE) {
1653 ret = mtk_phy_start(priv);
1654 if (ret)
1655 return ret;
1656 }
1657
1658 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1659 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1660 udelay(500);
1661
1662 return 0;
1663}
1664
1665static void mtk_eth_stop(struct udevice *dev)
1666{
1667 struct mtk_eth_priv *priv = dev_get_priv(dev);
1668
Weijie Gaoc73d3872023-07-19 17:16:54 +08001669 if (priv->switch_mac_control)
1670 priv->switch_mac_control(priv, false);
1671
Weijie Gao23f17162018-12-20 16:12:53 +08001672 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1673 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1674 udelay(500);
1675
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001676 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
Weijie Gao23f17162018-12-20 16:12:53 +08001677 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1678}
1679
1680static int mtk_eth_write_hwaddr(struct udevice *dev)
1681{
Simon Glassc69cda22020-12-03 16:55:20 -07001682 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001683 struct mtk_eth_priv *priv = dev_get_priv(dev);
1684 unsigned char *mac = pdata->enetaddr;
1685 u32 macaddr_lsb, macaddr_msb;
1686
1687 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1688 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1689 ((u32)mac[4] << 8) | (u32)mac[5];
1690
1691 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1692 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1693
1694 return 0;
1695}
1696
1697static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1698{
1699 struct mtk_eth_priv *priv = dev_get_priv(dev);
1700 u32 idx = priv->tx_cpu_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001701 struct mtk_tx_dma_v2 *txd;
Weijie Gao23f17162018-12-20 16:12:53 +08001702 void *pkt_base;
1703
Weijie Gao7d928c32022-09-09 19:59:24 +08001704 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1705
1706 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001707 debug("mtk-eth: TX DMA descriptor ring is full\n");
1708 return -EPERM;
1709 }
1710
Weijie Gao7d928c32022-09-09 19:59:24 +08001711 pkt_base = (void *)phys_to_virt(txd->txd1);
Weijie Gao23f17162018-12-20 16:12:53 +08001712 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001713 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001714 roundup(length, ARCH_DMA_MINALIGN));
1715
Weijie Gao76281942023-07-19 17:17:37 +08001716 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1717 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001718 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1719 else
1720 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
Weijie Gao23f17162018-12-20 16:12:53 +08001721
1722 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1723 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1724
1725 return 0;
1726}
1727
1728static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1729{
1730 struct mtk_eth_priv *priv = dev_get_priv(dev);
1731 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001732 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001733 uchar *pkt_base;
1734 u32 length;
1735
Weijie Gao7d928c32022-09-09 19:59:24 +08001736 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1737
1738 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001739 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1740 return -EAGAIN;
1741 }
1742
Weijie Gao76281942023-07-19 17:17:37 +08001743 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1744 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001745 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1746 else
1747 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
Weijie Gao7d928c32022-09-09 19:59:24 +08001748
1749 pkt_base = (void *)phys_to_virt(rxd->rxd1);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001750 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001751 roundup(length, ARCH_DMA_MINALIGN));
1752
1753 if (packetp)
1754 *packetp = pkt_base;
1755
1756 return length;
1757}
1758
1759static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1760{
1761 struct mtk_eth_priv *priv = dev_get_priv(dev);
1762 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001763 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001764
Weijie Gao7d928c32022-09-09 19:59:24 +08001765 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1766
Weijie Gao76281942023-07-19 17:17:37 +08001767 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2) ||
1768 MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V3))
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001769 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1770 else
1771 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao23f17162018-12-20 16:12:53 +08001772
1773 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1774 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1775
1776 return 0;
1777}
1778
1779static int mtk_eth_probe(struct udevice *dev)
1780{
Simon Glassc69cda22020-12-03 16:55:20 -07001781 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001782 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001783 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001784 int ret;
1785
1786 /* Frame Engine Register Base */
1787 priv->fe_base = (void *)iobase;
1788
1789 /* GMAC Register Base */
1790 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1791
1792 /* MDIO register */
1793 ret = mtk_mdio_register(dev);
1794 if (ret)
1795 return ret;
1796
1797 /* Prepare for tx/rx rings */
Weijie Gao7d928c32022-09-09 19:59:24 +08001798 priv->tx_ring_noc = (void *)
1799 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001800 ARCH_DMA_MINALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001801 priv->rx_ring_noc = (void *)
1802 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001803 ARCH_DMA_MINALIGN);
1804
1805 /* Set MAC mode */
Weijie Gaoba026eb2023-07-19 17:17:31 +08001806 if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII)
1807 mtk_xmac_init(priv);
1808 else
1809 mtk_mac_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001810
1811 /* Probe phy if switch is not specified */
1812 if (priv->sw == SW_NONE)
1813 return mtk_phy_probe(dev);
1814
1815 /* Initialize switch */
Landen Chao532de8d2020-02-18 16:49:37 +08001816 return mt753x_switch_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001817}
1818
1819static int mtk_eth_remove(struct udevice *dev)
1820{
1821 struct mtk_eth_priv *priv = dev_get_priv(dev);
1822
1823 /* MDIO unregister */
1824 mdio_unregister(priv->mdio_bus);
1825 mdio_free(priv->mdio_bus);
1826
1827 /* Stop possibly started DMA */
1828 mtk_eth_stop(dev);
1829
1830 return 0;
1831}
1832
Simon Glassd1998a92020-12-03 16:55:21 -07001833static int mtk_eth_of_to_plat(struct udevice *dev)
Weijie Gao23f17162018-12-20 16:12:53 +08001834{
Simon Glassc69cda22020-12-03 16:55:20 -07001835 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001836 struct mtk_eth_priv *priv = dev_get_priv(dev);
1837 struct ofnode_phandle_args args;
1838 struct regmap *regmap;
1839 const char *str;
1840 ofnode subnode;
1841 int ret;
1842
Weijie Gao62596722022-09-09 19:59:21 +08001843 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1844 if (!priv->soc) {
1845 dev_err(dev, "missing soc compatible data\n");
1846 return -EINVAL;
1847 }
Weijie Gao23f17162018-12-20 16:12:53 +08001848
Weijie Gao528e4832022-05-20 11:23:31 +08001849 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001850
1851 /* get corresponding ethsys phandle */
1852 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1853 &args);
1854 if (ret)
1855 return ret;
1856
Weijie Gao86062e72022-05-20 11:23:37 +08001857 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1858 if (IS_ERR(priv->ethsys_regmap))
1859 return PTR_ERR(priv->ethsys_regmap);
Weijie Gao23f17162018-12-20 16:12:53 +08001860
Weijie Gao585a1a42023-07-19 17:17:22 +08001861 if (MTK_HAS_CAPS(priv->soc->caps, MTK_INFRA)) {
1862 /* get corresponding infracfg phandle */
1863 ret = dev_read_phandle_with_args(dev, "mediatek,infracfg",
1864 NULL, 0, 0, &args);
1865
1866 if (ret)
1867 return ret;
1868
1869 priv->infra_regmap = syscon_node_to_regmap(args.node);
1870 if (IS_ERR(priv->infra_regmap))
1871 return PTR_ERR(priv->infra_regmap);
1872 }
1873
Weijie Gao23f17162018-12-20 16:12:53 +08001874 /* Reset controllers */
1875 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1876 if (ret) {
1877 printf("error: Unable to get reset ctrl for frame engine\n");
1878 return ret;
1879 }
1880
1881 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1882
1883 /* Interface mode is required */
Marek Behún123ca112022-04-07 00:33:01 +02001884 pdata->phy_interface = dev_read_phy_mode(dev);
1885 priv->phy_interface = pdata->phy_interface;
Marek Behúnffb0f6f2022-04-07 00:33:03 +02001886 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Weijie Gao23f17162018-12-20 16:12:53 +08001887 printf("error: phy-mode is not set\n");
1888 return -EINVAL;
1889 }
1890
1891 /* Force mode or autoneg */
1892 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1893 if (ofnode_valid(subnode)) {
1894 priv->force_mode = 1;
1895 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1896 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1897
1898 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001899 priv->speed != SPEED_1000 && priv->speed != SPEED_2500 &&
1900 priv->speed != SPEED_10000) {
Weijie Gao23f17162018-12-20 16:12:53 +08001901 printf("error: no valid speed set in fixed-link\n");
1902 return -EINVAL;
1903 }
1904 }
1905
Weijie Gaobd70f3c2023-07-19 17:17:13 +08001906 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII ||
1907 priv->phy_interface == PHY_INTERFACE_MODE_2500BASEX) {
MarkLeeb4ef49a2020-01-21 19:31:57 +08001908 /* get corresponding sgmii phandle */
1909 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1910 NULL, 0, 0, &args);
1911 if (ret)
1912 return ret;
1913
1914 regmap = syscon_node_to_regmap(args.node);
1915
1916 if (IS_ERR(regmap))
1917 return PTR_ERR(regmap);
1918
1919 priv->sgmii_base = regmap_get_range(regmap, 0);
1920
1921 if (!priv->sgmii_base) {
1922 dev_err(dev, "Unable to find sgmii\n");
1923 return -ENODEV;
1924 }
Weijie Gao29a48bf2022-09-09 19:59:28 +08001925
1926 priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
Weijie Gaoba026eb2023-07-19 17:17:31 +08001927 } else if (priv->phy_interface == PHY_INTERFACE_MODE_USXGMII) {
1928 /* get corresponding usxgmii phandle */
1929 ret = dev_read_phandle_with_args(dev, "mediatek,usxgmiisys",
1930 NULL, 0, 0, &args);
1931 if (ret)
1932 return ret;
1933
1934 priv->usxgmii_regmap = syscon_node_to_regmap(args.node);
1935 if (IS_ERR(priv->usxgmii_regmap))
1936 return PTR_ERR(priv->usxgmii_regmap);
1937
1938 /* get corresponding xfi_pextp phandle */
1939 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pextp",
1940 NULL, 0, 0, &args);
1941 if (ret)
1942 return ret;
1943
1944 priv->xfi_pextp_regmap = syscon_node_to_regmap(args.node);
1945 if (IS_ERR(priv->xfi_pextp_regmap))
1946 return PTR_ERR(priv->xfi_pextp_regmap);
1947
1948 /* get corresponding xfi_pll phandle */
1949 ret = dev_read_phandle_with_args(dev, "mediatek,xfi_pll",
1950 NULL, 0, 0, &args);
1951 if (ret)
1952 return ret;
1953
1954 priv->xfi_pll_regmap = syscon_node_to_regmap(args.node);
1955 if (IS_ERR(priv->xfi_pll_regmap))
1956 return PTR_ERR(priv->xfi_pll_regmap);
1957
1958 /* get corresponding toprgu phandle */
1959 ret = dev_read_phandle_with_args(dev, "mediatek,toprgu",
1960 NULL, 0, 0, &args);
1961 if (ret)
1962 return ret;
1963
1964 priv->toprgu_regmap = syscon_node_to_regmap(args.node);
1965 if (IS_ERR(priv->toprgu_regmap))
1966 return PTR_ERR(priv->toprgu_regmap);
MarkLeeb4ef49a2020-01-21 19:31:57 +08001967 }
1968
Weijie Gao23f17162018-12-20 16:12:53 +08001969 /* check for switch first, otherwise phy will be used */
1970 priv->sw = SW_NONE;
1971 priv->switch_init = NULL;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001972 priv->switch_mac_control = NULL;
Weijie Gao23f17162018-12-20 16:12:53 +08001973 str = dev_read_string(dev, "mediatek,switch");
1974
1975 if (str) {
1976 if (!strcmp(str, "mt7530")) {
1977 priv->sw = SW_MT7530;
1978 priv->switch_init = mt7530_setup;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001979 priv->switch_mac_control = mt7530_mac_control;
Landen Chao532de8d2020-02-18 16:49:37 +08001980 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gaoc41a0582023-07-19 17:16:59 +08001981 priv->mt753x_reset_wait_time = 1000;
Landen Chao532de8d2020-02-18 16:49:37 +08001982 } else if (!strcmp(str, "mt7531")) {
1983 priv->sw = SW_MT7531;
1984 priv->switch_init = mt7531_setup;
Weijie Gaoc73d3872023-07-19 17:16:54 +08001985 priv->switch_mac_control = mt7531_mac_control;
Landen Chao532de8d2020-02-18 16:49:37 +08001986 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gaoc41a0582023-07-19 17:16:59 +08001987 priv->mt753x_reset_wait_time = 200;
Weijie Gao93eb7072023-07-19 17:17:41 +08001988 } else if (!strcmp(str, "mt7988")) {
1989 priv->sw = SW_MT7988;
1990 priv->switch_init = mt7988_setup;
1991 priv->switch_mac_control = mt7988_mac_control;
1992 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1993 priv->mt753x_reset_wait_time = 50;
Weijie Gao23f17162018-12-20 16:12:53 +08001994 } else {
1995 printf("error: unsupported switch\n");
1996 return -EINVAL;
1997 }
1998
1999 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
2000 if (priv->mcm) {
2001 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
2002 if (ret) {
2003 printf("error: no reset ctrl for mcm\n");
2004 return ret;
2005 }
2006 } else {
2007 gpio_request_by_name(dev, "reset-gpios", 0,
2008 &priv->rst_gpio, GPIOD_IS_OUT);
2009 }
2010 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08002011 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
2012 0, &args);
2013 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08002014 printf("error: phy-handle is not specified\n");
2015 return ret;
2016 }
2017
Weijie Gaoebb97ea2019-04-28 15:08:57 +08002018 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08002019 if (priv->phy_addr < 0) {
2020 printf("error: phy address is not specified\n");
2021 return ret;
2022 }
2023 }
2024
2025 return 0;
2026}
2027
Weijie Gao93eb7072023-07-19 17:17:41 +08002028static const struct mtk_soc_data mt7988_data = {
2029 .caps = MT7988_CAPS,
2030 .ana_rgc3 = 0x128,
2031 .gdma_count = 3,
2032 .pdma_base = PDMA_V3_BASE,
2033 .txd_size = sizeof(struct mtk_tx_dma_v2),
2034 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2035};
2036
Weijie Gao29a48bf2022-09-09 19:59:28 +08002037static const struct mtk_soc_data mt7986_data = {
2038 .caps = MT7986_CAPS,
2039 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08002040 .gdma_count = 2,
Weijie Gao29a48bf2022-09-09 19:59:28 +08002041 .pdma_base = PDMA_V2_BASE,
2042 .txd_size = sizeof(struct mtk_tx_dma_v2),
2043 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2044};
2045
2046static const struct mtk_soc_data mt7981_data = {
Weijie Gao585a1a42023-07-19 17:17:22 +08002047 .caps = MT7981_CAPS,
Weijie Gao29a48bf2022-09-09 19:59:28 +08002048 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08002049 .gdma_count = 2,
Weijie Gao29a48bf2022-09-09 19:59:28 +08002050 .pdma_base = PDMA_V2_BASE,
2051 .txd_size = sizeof(struct mtk_tx_dma_v2),
2052 .rxd_size = sizeof(struct mtk_rx_dma_v2),
2053};
2054
Weijie Gao62596722022-09-09 19:59:21 +08002055static const struct mtk_soc_data mt7629_data = {
2056 .ana_rgc3 = 0x128,
Weijie Gao76281942023-07-19 17:17:37 +08002057 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08002058 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08002059 .txd_size = sizeof(struct mtk_tx_dma),
2060 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08002061};
2062
2063static const struct mtk_soc_data mt7623_data = {
2064 .caps = MT7623_CAPS,
Weijie Gao76281942023-07-19 17:17:37 +08002065 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08002066 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08002067 .txd_size = sizeof(struct mtk_tx_dma),
2068 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08002069};
2070
2071static const struct mtk_soc_data mt7622_data = {
2072 .ana_rgc3 = 0x2028,
Weijie Gao76281942023-07-19 17:17:37 +08002073 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08002074 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08002075 .txd_size = sizeof(struct mtk_tx_dma),
2076 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08002077};
2078
2079static const struct mtk_soc_data mt7621_data = {
2080 .caps = MT7621_CAPS,
Weijie Gao76281942023-07-19 17:17:37 +08002081 .gdma_count = 2,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08002082 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08002083 .txd_size = sizeof(struct mtk_tx_dma),
2084 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08002085};
2086
Weijie Gao23f17162018-12-20 16:12:53 +08002087static const struct udevice_id mtk_eth_ids[] = {
Weijie Gao93eb7072023-07-19 17:17:41 +08002088 { .compatible = "mediatek,mt7988-eth", .data = (ulong)&mt7988_data },
Weijie Gao29a48bf2022-09-09 19:59:28 +08002089 { .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
2090 { .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
Weijie Gao62596722022-09-09 19:59:21 +08002091 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
2092 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
2093 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
2094 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
Weijie Gao23f17162018-12-20 16:12:53 +08002095 {}
2096};
2097
2098static const struct eth_ops mtk_eth_ops = {
2099 .start = mtk_eth_start,
2100 .stop = mtk_eth_stop,
2101 .send = mtk_eth_send,
2102 .recv = mtk_eth_recv,
2103 .free_pkt = mtk_eth_free_pkt,
2104 .write_hwaddr = mtk_eth_write_hwaddr,
2105};
2106
2107U_BOOT_DRIVER(mtk_eth) = {
2108 .name = "mtk-eth",
2109 .id = UCLASS_ETH,
2110 .of_match = mtk_eth_ids,
Simon Glassd1998a92020-12-03 16:55:21 -07002111 .of_to_plat = mtk_eth_of_to_plat,
Simon Glasscaa4daa2020-12-03 16:55:18 -07002112 .plat_auto = sizeof(struct eth_pdata),
Weijie Gao23f17162018-12-20 16:12:53 +08002113 .probe = mtk_eth_probe,
2114 .remove = mtk_eth_remove,
2115 .ops = &mtk_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -07002116 .priv_auto = sizeof(struct mtk_eth_priv),
Weijie Gao23f17162018-12-20 16:12:53 +08002117 .flags = DM_FLAG_ALLOC_PRIV_DMA,
2118};