blob: 4c9fb266c7aeb4f3c9f6fe5fe340ddeadaf3278d [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Weijie Gao23f17162018-12-20 16:12:53 +080013#include <malloc.h>
14#include <miiphy.h>
Simon Glass90526e92020-05-10 11:39:56 -060015#include <net.h>
Weijie Gao23f17162018-12-20 16:12:53 +080016#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060020#include <asm/cache.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <asm/gpio.h>
22#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Simon Glassc05ed002020-05-10 11:40:11 -060024#include <linux/delay.h>
Weijie Gao23f17162018-12-20 16:12:53 +080025#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
29
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC 24
33#define NUM_RX_DESC 24
34#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
Landen Chao532de8d2020-02-18 16:49:37 +080038#define MT753X_NUM_PHYS 5
39#define MT753X_NUM_PORTS 7
40#define MT753X_DFL_SMI_ADDR 31
41#define MT753X_SMI_ADDR_MASK 0x1f
Weijie Gao23f17162018-12-20 16:12:53 +080042
Landen Chao532de8d2020-02-18 16:49:37 +080043#define MT753X_PHY_ADDR(base, addr) \
Weijie Gao23f17162018-12-20 16:12:53 +080044 (((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47 (0x20000000 | \
48 GDM_ICS_EN | \
49 GDM_TCS_EN | \
50 GDM_UCS_EN | \
51 STRP_CRC | \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
55 (DP_PDMA << UN_DP_S))
56
57#define GDMA_FWD_DISCARD \
58 (0x20000000 | \
59 GDM_ICS_EN | \
60 GDM_TCS_EN | \
61 GDM_UCS_EN | \
62 STRP_CRC | \
63 (DP_DISCARD << MYMAC_DP_S) | \
64 (DP_DISCARD << BC_DP_S) | \
65 (DP_DISCARD << MC_DP_S) | \
66 (DP_DISCARD << UN_DP_S))
67
Weijie Gao23f17162018-12-20 16:12:53 +080068enum mtk_switch {
69 SW_NONE,
Landen Chao532de8d2020-02-18 16:49:37 +080070 SW_MT7530,
71 SW_MT7531
Weijie Gao23f17162018-12-20 16:12:53 +080072};
73
Weijie Gao62596722022-09-09 19:59:21 +080074/* struct mtk_soc_data - This is the structure holding all differences
75 * among various plaforms
76 * @caps Flags shown the extra capability for the SoC
77 * @ana_rgc3: The offset for register ANA_RGC3 related to
78 * sgmiisys syscon
Weijie Gaoe7ad0462022-09-09 19:59:26 +080079 * @pdma_base: Register base of PDMA block
80 * @txd_size: Tx DMA descriptor size.
81 * @rxd_size: Rx DMA descriptor size.
Weijie Gao62596722022-09-09 19:59:21 +080082 */
83struct mtk_soc_data {
84 u32 caps;
85 u32 ana_rgc3;
Weijie Gaoe7ad0462022-09-09 19:59:26 +080086 u32 pdma_base;
Weijie Gao7d928c32022-09-09 19:59:24 +080087 u32 txd_size;
88 u32 rxd_size;
Weijie Gao23f17162018-12-20 16:12:53 +080089};
90
91struct mtk_eth_priv {
92 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
93
Weijie Gao7d928c32022-09-09 19:59:24 +080094 void *tx_ring_noc;
95 void *rx_ring_noc;
Weijie Gao23f17162018-12-20 16:12:53 +080096
97 int rx_dma_owner_idx0;
98 int tx_cpu_owner_idx0;
99
100 void __iomem *fe_base;
101 void __iomem *gmac_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800102 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800103
Weijie Gao86062e72022-05-20 11:23:37 +0800104 struct regmap *ethsys_regmap;
105
Weijie Gao23f17162018-12-20 16:12:53 +0800106 struct mii_dev *mdio_bus;
107 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
108 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
109 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
110 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
111 u16 val);
112
Weijie Gao62596722022-09-09 19:59:21 +0800113 const struct mtk_soc_data *soc;
Weijie Gao23f17162018-12-20 16:12:53 +0800114 int gmac_id;
115 int force_mode;
116 int speed;
117 int duplex;
Weijie Gao29a48bf2022-09-09 19:59:28 +0800118 bool pn_swap;
Weijie Gao23f17162018-12-20 16:12:53 +0800119
120 struct phy_device *phydev;
121 int phy_interface;
122 int phy_addr;
123
124 enum mtk_switch sw;
125 int (*switch_init)(struct mtk_eth_priv *priv);
Landen Chao532de8d2020-02-18 16:49:37 +0800126 u32 mt753x_smi_addr;
127 u32 mt753x_phy_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800128
129 struct gpio_desc rst_gpio;
130 int mcm;
131
132 struct reset_ctl rst_fe;
133 struct reset_ctl rst_mcm;
134};
135
136static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
137{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800138 writel(val, priv->fe_base + priv->soc->pdma_base + reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800139}
140
141static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
142 u32 set)
143{
Weijie Gaoe7ad0462022-09-09 19:59:26 +0800144 clrsetbits_le32(priv->fe_base + priv->soc->pdma_base + reg, clr, set);
Weijie Gao23f17162018-12-20 16:12:53 +0800145}
146
147static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
148 u32 val)
149{
150 u32 gdma_base;
151
152 if (no == 1)
153 gdma_base = GDMA2_BASE;
154 else
155 gdma_base = GDMA1_BASE;
156
157 writel(val, priv->fe_base + gdma_base + reg);
158}
159
160static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
161{
162 return readl(priv->gmac_base + reg);
163}
164
165static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
166{
167 writel(val, priv->gmac_base + reg);
168}
169
170static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
171{
172 clrsetbits_le32(priv->gmac_base + reg, clr, set);
173}
174
175static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
176 u32 set)
177{
Weijie Gao86062e72022-05-20 11:23:37 +0800178 uint val;
179
180 regmap_read(priv->ethsys_regmap, reg, &val);
181 val &= ~clr;
182 val |= set;
183 regmap_write(priv->ethsys_regmap, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800184}
185
186/* Direct MDIO clause 22/45 access via SoC */
187static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
188 u32 cmd, u32 st)
189{
190 int ret;
191 u32 val;
192
193 val = (st << MDIO_ST_S) |
194 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
195 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
196 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
197
198 if (cmd == MDIO_CMD_WRITE)
199 val |= data & MDIO_RW_DATA_M;
200
201 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
202
203 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
204 PHY_ACS_ST, 0, 5000, 0);
205 if (ret) {
206 pr_warn("MDIO access timeout\n");
207 return ret;
208 }
209
210 if (cmd == MDIO_CMD_READ) {
211 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
212 return val & MDIO_RW_DATA_M;
213 }
214
215 return 0;
216}
217
218/* Direct MDIO clause 22 read via SoC */
219static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
220{
221 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
222}
223
224/* Direct MDIO clause 22 write via SoC */
225static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
226{
227 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
228}
229
230/* Direct MDIO clause 45 read via SoC */
231static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
232{
233 int ret;
234
235 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
236 if (ret)
237 return ret;
238
239 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
240 MDIO_ST_C45);
241}
242
243/* Direct MDIO clause 45 write via SoC */
244static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
245 u16 reg, u16 val)
246{
247 int ret;
248
249 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
250 if (ret)
251 return ret;
252
253 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
254 MDIO_ST_C45);
255}
256
257/* Indirect MDIO clause 45 read via MII registers */
258static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
259 u16 reg)
260{
261 int ret;
262
263 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
264 (MMD_ADDR << MMD_CMD_S) |
265 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
266 if (ret)
267 return ret;
268
269 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
270 if (ret)
271 return ret;
272
273 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
274 (MMD_DATA << MMD_CMD_S) |
275 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
276 if (ret)
277 return ret;
278
279 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
280}
281
282/* Indirect MDIO clause 45 write via MII registers */
283static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
284 u16 reg, u16 val)
285{
286 int ret;
287
288 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
289 (MMD_ADDR << MMD_CMD_S) |
290 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
291 if (ret)
292 return ret;
293
294 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
295 if (ret)
296 return ret;
297
298 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
299 (MMD_DATA << MMD_CMD_S) |
300 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
301 if (ret)
302 return ret;
303
304 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
305}
306
Landen Chao532de8d2020-02-18 16:49:37 +0800307/*
308 * MT7530 Internal Register Address Bits
309 * -------------------------------------------------------------------
310 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
311 * |----------------------------------------|---------------|--------|
312 * | Page Address | Reg Address | Unused |
313 * -------------------------------------------------------------------
314 */
315
316static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
317{
318 int ret, low_word, high_word;
319
320 /* Write page address */
321 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
322 if (ret)
323 return ret;
324
325 /* Read low word */
326 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
327 if (low_word < 0)
328 return low_word;
329
330 /* Read high word */
331 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
332 if (high_word < 0)
333 return high_word;
334
335 if (data)
336 *data = ((u32)high_word << 16) | (low_word & 0xffff);
337
338 return 0;
339}
340
341static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
342{
343 int ret;
344
345 /* Write page address */
346 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
347 if (ret)
348 return ret;
349
350 /* Write low word */
351 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
352 data & 0xffff);
353 if (ret)
354 return ret;
355
356 /* Write high word */
357 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
358}
359
360static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
361 u32 set)
362{
363 u32 val;
364
365 mt753x_reg_read(priv, reg, &val);
366 val &= ~clr;
367 val |= set;
368 mt753x_reg_write(priv, reg, val);
369}
370
371/* Indirect MDIO clause 22/45 access */
372static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
373 u32 cmd, u32 st)
374{
375 ulong timeout;
376 u32 val, timeout_ms;
377 int ret = 0;
378
379 val = (st << MDIO_ST_S) |
380 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
381 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
382 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
383
384 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
385 val |= data & MDIO_RW_DATA_M;
386
387 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
388
389 timeout_ms = 100;
390 timeout = get_timer(0);
391 while (1) {
392 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
393
394 if ((val & PHY_ACS_ST) == 0)
395 break;
396
397 if (get_timer(timeout) > timeout_ms)
398 return -ETIMEDOUT;
399 }
400
401 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
402 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
403 ret = val & MDIO_RW_DATA_M;
404 }
405
406 return ret;
407}
408
409static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
410{
411 u8 phy_addr;
412
413 if (phy >= MT753X_NUM_PHYS)
414 return -EINVAL;
415
416 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
417
418 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
419 MDIO_ST_C22);
420}
421
422static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
423 u16 val)
424{
425 u8 phy_addr;
426
427 if (phy >= MT753X_NUM_PHYS)
428 return -EINVAL;
429
430 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
431
432 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
433 MDIO_ST_C22);
434}
435
436int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
437{
438 u8 phy_addr;
439 int ret;
440
441 if (addr >= MT753X_NUM_PHYS)
442 return -EINVAL;
443
444 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
445
446 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
447 MDIO_ST_C45);
448 if (ret)
449 return ret;
450
451 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
452 MDIO_ST_C45);
453}
454
455static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
456 u16 reg, u16 val)
457{
458 u8 phy_addr;
459 int ret;
460
461 if (addr >= MT753X_NUM_PHYS)
462 return 0;
463
464 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
465
466 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
467 MDIO_ST_C45);
468 if (ret)
469 return ret;
470
471 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
472 MDIO_ST_C45);
473}
474
Weijie Gao23f17162018-12-20 16:12:53 +0800475static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
476{
477 struct mtk_eth_priv *priv = bus->priv;
478
479 if (devad < 0)
480 return priv->mii_read(priv, addr, reg);
481 else
482 return priv->mmd_read(priv, addr, devad, reg);
483}
484
485static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
486 u16 val)
487{
488 struct mtk_eth_priv *priv = bus->priv;
489
490 if (devad < 0)
491 return priv->mii_write(priv, addr, reg, val);
492 else
493 return priv->mmd_write(priv, addr, devad, reg, val);
494}
495
496static int mtk_mdio_register(struct udevice *dev)
497{
498 struct mtk_eth_priv *priv = dev_get_priv(dev);
499 struct mii_dev *mdio_bus = mdio_alloc();
500 int ret;
501
502 if (!mdio_bus)
503 return -ENOMEM;
504
505 /* Assign MDIO access APIs according to the switch/phy */
506 switch (priv->sw) {
507 case SW_MT7530:
508 priv->mii_read = mtk_mii_read;
509 priv->mii_write = mtk_mii_write;
510 priv->mmd_read = mtk_mmd_ind_read;
511 priv->mmd_write = mtk_mmd_ind_write;
512 break;
Landen Chao532de8d2020-02-18 16:49:37 +0800513 case SW_MT7531:
514 priv->mii_read = mt7531_mii_ind_read;
515 priv->mii_write = mt7531_mii_ind_write;
516 priv->mmd_read = mt7531_mmd_ind_read;
517 priv->mmd_write = mt7531_mmd_ind_write;
518 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800519 default:
520 priv->mii_read = mtk_mii_read;
521 priv->mii_write = mtk_mii_write;
522 priv->mmd_read = mtk_mmd_read;
523 priv->mmd_write = mtk_mmd_write;
524 }
525
526 mdio_bus->read = mtk_mdio_read;
527 mdio_bus->write = mtk_mdio_write;
528 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
529
530 mdio_bus->priv = (void *)priv;
531
532 ret = mdio_register(mdio_bus);
533
534 if (ret)
535 return ret;
536
537 priv->mdio_bus = mdio_bus;
538
539 return 0;
540}
541
Landen Chao532de8d2020-02-18 16:49:37 +0800542static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
Weijie Gao23f17162018-12-20 16:12:53 +0800543{
Landen Chao532de8d2020-02-18 16:49:37 +0800544 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800545
Landen Chao532de8d2020-02-18 16:49:37 +0800546 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800547}
548
Landen Chao532de8d2020-02-18 16:49:37 +0800549static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
Weijie Gao23f17162018-12-20 16:12:53 +0800550{
Landen Chao532de8d2020-02-18 16:49:37 +0800551 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800552
Landen Chao532de8d2020-02-18 16:49:37 +0800553 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800554}
555
556static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
557{
558 u32 ncpo1, ssc_delta;
559
560 switch (mode) {
561 case PHY_INTERFACE_MODE_RGMII:
562 ncpo1 = 0x0c80;
563 ssc_delta = 0x87;
564 break;
565 default:
566 printf("error: xMII mode %d not supported\n", mode);
567 return -EINVAL;
568 }
569
570 /* Disable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800571 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800572
573 /* Disable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800574 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800575 (2 << RG_GSWPLL_POSDIV_200M_S) |
576 (32 << RG_GSWPLL_FBKDIV_200M_S));
577
578 /* For MT7530 core clock = 500Mhz */
Landen Chao532de8d2020-02-18 16:49:37 +0800579 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800580 (1 << RG_GSWPLL_POSDIV_500M_S) |
581 (25 << RG_GSWPLL_FBKDIV_500M_S));
582
583 /* Enable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800584 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800585 (2 << RG_GSWPLL_POSDIV_200M_S) |
586 (32 << RG_GSWPLL_FBKDIV_200M_S) |
587 RG_GSWPLL_EN_PRE);
588
589 udelay(20);
590
Landen Chao532de8d2020-02-18 16:49:37 +0800591 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
Weijie Gao23f17162018-12-20 16:12:53 +0800592
593 /* Setup the MT7530 TRGMII Tx Clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800594 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
595 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
596 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
597 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
598 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
Weijie Gao23f17162018-12-20 16:12:53 +0800599 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
600
Landen Chao532de8d2020-02-18 16:49:37 +0800601 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800602 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
603 (1 << RG_SYSPLL_POSDIV_S));
604
Landen Chao532de8d2020-02-18 16:49:37 +0800605 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
Weijie Gao23f17162018-12-20 16:12:53 +0800606 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
607 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
608
609 /* Enable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800610 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
Weijie Gao23f17162018-12-20 16:12:53 +0800611 REG_GSWCK_EN | REG_TRGMIICK_EN);
612
613 return 0;
614}
615
616static int mt7530_setup(struct mtk_eth_priv *priv)
617{
618 u16 phy_addr, phy_val;
Weijie Gaoad80d482022-05-20 11:23:42 +0800619 u32 val, txdrv;
Weijie Gao23f17162018-12-20 16:12:53 +0800620 int i;
621
Weijie Gao62596722022-09-09 19:59:21 +0800622 if (!MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gaoad80d482022-05-20 11:23:42 +0800623 /* Select 250MHz clk for RGMII mode */
624 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
625 ETHSYS_TRGMII_CLK_SEL362_5, 0);
626
627 txdrv = 8;
628 } else {
629 txdrv = 4;
630 }
Weijie Gao23f17162018-12-20 16:12:53 +0800631
Landen Chao532de8d2020-02-18 16:49:37 +0800632 /* Modify HWTRAP first to allow direct access to internal PHYs */
633 mt753x_reg_read(priv, HWTRAP_REG, &val);
634 val |= CHG_TRAP;
635 val &= ~C_MDIO_BPS;
636 mt753x_reg_write(priv, MHWTRAP_REG, val);
637
638 /* Calculate the phy base address */
639 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
640 priv->mt753x_phy_base = (val | 0x7) + 1;
641
642 /* Turn off PHYs */
643 for (i = 0; i < MT753X_NUM_PHYS; i++) {
644 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
645 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
646 phy_val |= BMCR_PDOWN;
647 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
648 }
649
650 /* Force MAC link down before reset */
651 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
652 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
653
654 /* MT7530 reset */
655 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
656 udelay(100);
657
658 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
659 MAC_MODE | FORCE_MODE |
660 MAC_TX_EN | MAC_RX_EN |
661 BKOFF_EN | BACKPR_EN |
662 (SPEED_1000M << FORCE_SPD_S) |
663 FORCE_DPX | FORCE_LINK;
664
665 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
666 mt753x_reg_write(priv, PMCR_REG(6), val);
667
668 /* MT7530 Port5: Forced link down */
669 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
670
671 /* MT7530 Port6: Set to RGMII */
672 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
673
674 /* Hardware Trap: Enable Port6, Disable Port5 */
675 mt753x_reg_read(priv, HWTRAP_REG, &val);
676 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
677 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
678 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
679 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
680 mt753x_reg_write(priv, MHWTRAP_REG, val);
681
682 /* Setup switch core pll */
683 mt7530_pad_clk_setup(priv, priv->phy_interface);
684
685 /* Lower Tx Driving for TRGMII path */
686 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
687 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
Weijie Gaoad80d482022-05-20 11:23:42 +0800688 (txdrv << TD_DM_DRVP_S) |
689 (txdrv << TD_DM_DRVN_S));
Landen Chao532de8d2020-02-18 16:49:37 +0800690
691 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
692 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
693
694 /* Turn on PHYs */
695 for (i = 0; i < MT753X_NUM_PHYS; i++) {
696 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
697 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
698 phy_val &= ~BMCR_PDOWN;
699 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
700 }
701
702 return 0;
703}
704
705static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
706{
707 /* Step 1 : Disable MT7531 COREPLL */
708 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
709
710 /* Step 2: switch to XTAL output */
711 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
712
713 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
714
715 /* Step 3: disable PLLGP and enable program PLLGP */
716 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
717
718 /* Step 4: program COREPLL output frequency to 500MHz */
719 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
720 2 << RG_COREPLL_POSDIV_S);
721 udelay(25);
722
723 /* Currently, support XTAL 25Mhz only */
724 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
725 0x140000 << RG_COREPLL_SDM_PCW_S);
726
727 /* Set feedback divide ratio update signal to high */
728 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
729 RG_COREPLL_SDM_PCW_CHG);
730
731 /* Wait for at least 16 XTAL clocks */
732 udelay(10);
733
734 /* Step 5: set feedback divide ratio update signal to low */
735 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
736
737 /* add enable 325M clock for SGMII */
738 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
739
740 /* add enable 250SSC clock for RGMII */
741 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
742
743 /*Step 6: Enable MT7531 PLL */
744 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
745
746 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
747
748 udelay(25);
749}
750
751static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
752 u32 port)
753{
754 if (port != 5 && port != 6) {
755 printf("mt7531: port %d is not a SGMII port\n", port);
756 return -EINVAL;
757 }
758
759 /* Set SGMII GEN2 speed(2.5G) */
760 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
761 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
762
763 /* Disable SGMII AN */
764 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
765 SGMII_AN_ENABLE, 0);
766
767 /* SGMII force mode setting */
768 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
769
770 /* Release PHYA power down state */
771 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
772 SGMII_PHYA_PWD, 0);
773
774 return 0;
775}
776
777static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
778{
779 u32 val;
780
781 if (port != 5) {
782 printf("error: RGMII mode is not available for port %d\n",
783 port);
784 return -EINVAL;
785 }
786
787 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
788 val |= GP_CLK_EN;
789 val &= ~GP_MODE_M;
790 val |= GP_MODE_RGMII << GP_MODE_S;
791 val |= TXCLK_NO_REVERSE;
792 val |= RXCLK_NO_DELAY;
793 val &= ~CLK_SKEW_IN_M;
794 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
795 val &= ~CLK_SKEW_OUT_M;
796 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
797 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
798
799 return 0;
800}
801
802static void mt7531_phy_setting(struct mtk_eth_priv *priv)
803{
804 int i;
805 u32 val;
806
807 for (i = 0; i < MT753X_NUM_PHYS; i++) {
808 /* Enable HW auto downshift */
809 priv->mii_write(priv, i, 0x1f, 0x1);
810 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
811 val |= PHY_EN_DOWN_SHFIT;
812 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
813
814 /* PHY link down power saving enable */
815 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
816 val |= PHY_LINKDOWN_POWER_SAVING_EN;
817 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
818
819 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
820 val &= ~PHY_POWER_SAVING_M;
821 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
822 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
823 }
824}
825
826static int mt7531_setup(struct mtk_eth_priv *priv)
827{
828 u16 phy_addr, phy_val;
829 u32 val;
830 u32 pmcr;
831 u32 port5_sgmii;
832 int i;
833
834 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
835 MT753X_SMI_ADDR_MASK;
836
837 /* Turn off PHYs */
838 for (i = 0; i < MT753X_NUM_PHYS; i++) {
839 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
840 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
841 phy_val |= BMCR_PDOWN;
842 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
843 }
844
845 /* Force MAC link down before reset */
846 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
847 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
848
849 /* Switch soft reset */
850 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
851 udelay(100);
852
853 /* Enable MDC input Schmitt Trigger */
854 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
855 SMT_IOLB_5_SMI_MDC_EN);
856
857 mt7531_core_pll_setup(priv, priv->mcm);
858
859 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
860 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
861
862 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
863 switch (priv->phy_interface) {
864 case PHY_INTERFACE_MODE_RGMII:
865 if (!port5_sgmii)
866 mt7531_port_rgmii_init(priv, 5);
867 break;
868 case PHY_INTERFACE_MODE_SGMII:
869 mt7531_port_sgmii_init(priv, 6);
870 if (port5_sgmii)
871 mt7531_port_sgmii_init(priv, 5);
872 break;
873 default:
874 break;
875 }
876
877 pmcr = MT7531_FORCE_MODE |
878 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
879 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
880 BKOFF_EN | BACKPR_EN |
881 FORCE_RX_FC | FORCE_TX_FC |
882 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
883 FORCE_LINK;
884
885 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
886 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
887
888 /* Turn on PHYs */
889 for (i = 0; i < MT753X_NUM_PHYS; i++) {
890 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
891 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
892 phy_val &= ~BMCR_PDOWN;
893 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
894 }
895
896 mt7531_phy_setting(priv);
897
898 /* Enable Internal PHYs */
899 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
900 val |= MT7531_BYPASS_MODE;
901 val &= ~MT7531_POWER_ON_OFF;
902 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
903
904 return 0;
905}
906
907int mt753x_switch_init(struct mtk_eth_priv *priv)
908{
909 int ret;
910 int i;
911
Weijie Gao23f17162018-12-20 16:12:53 +0800912 /* Global reset switch */
913 if (priv->mcm) {
914 reset_assert(&priv->rst_mcm);
915 udelay(1000);
916 reset_deassert(&priv->rst_mcm);
917 mdelay(1000);
918 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
919 dm_gpio_set_value(&priv->rst_gpio, 0);
920 udelay(1000);
921 dm_gpio_set_value(&priv->rst_gpio, 1);
922 mdelay(1000);
923 }
924
Landen Chao532de8d2020-02-18 16:49:37 +0800925 ret = priv->switch_init(priv);
926 if (ret)
927 return ret;
Weijie Gao23f17162018-12-20 16:12:53 +0800928
929 /* Set port isolation */
Landen Chao532de8d2020-02-18 16:49:37 +0800930 for (i = 0; i < MT753X_NUM_PORTS; i++) {
Weijie Gao23f17162018-12-20 16:12:53 +0800931 /* Set port matrix mode */
932 if (i != 6)
Landen Chao532de8d2020-02-18 16:49:37 +0800933 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800934 (0x40 << PORT_MATRIX_S));
935 else
Landen Chao532de8d2020-02-18 16:49:37 +0800936 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800937 (0x3f << PORT_MATRIX_S));
938
939 /* Set port mode to user port */
Landen Chao532de8d2020-02-18 16:49:37 +0800940 mt753x_reg_write(priv, PVC_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800941 (0x8100 << STAG_VPID_S) |
942 (VLAN_ATTR_USER << VLAN_ATTR_S));
943 }
944
945 return 0;
946}
947
948static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
949{
950 u16 lcl_adv = 0, rmt_adv = 0;
951 u8 flowctrl;
952 u32 mcr;
953
Landen Chao532de8d2020-02-18 16:49:37 +0800954 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +0800955 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
956 MAC_MODE | FORCE_MODE |
957 MAC_TX_EN | MAC_RX_EN |
958 BKOFF_EN | BACKPR_EN;
959
960 switch (priv->phydev->speed) {
961 case SPEED_10:
962 mcr |= (SPEED_10M << FORCE_SPD_S);
963 break;
964 case SPEED_100:
965 mcr |= (SPEED_100M << FORCE_SPD_S);
966 break;
967 case SPEED_1000:
968 mcr |= (SPEED_1000M << FORCE_SPD_S);
969 break;
970 };
971
972 if (priv->phydev->link)
973 mcr |= FORCE_LINK;
974
975 if (priv->phydev->duplex) {
976 mcr |= FORCE_DPX;
977
978 if (priv->phydev->pause)
979 rmt_adv = LPA_PAUSE_CAP;
980 if (priv->phydev->asym_pause)
981 rmt_adv |= LPA_PAUSE_ASYM;
982
983 if (priv->phydev->advertising & ADVERTISED_Pause)
984 lcl_adv |= ADVERTISE_PAUSE_CAP;
985 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
986 lcl_adv |= ADVERTISE_PAUSE_ASYM;
987
988 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
989
990 if (flowctrl & FLOW_CTRL_TX)
991 mcr |= FORCE_TX_FC;
992 if (flowctrl & FLOW_CTRL_RX)
993 mcr |= FORCE_RX_FC;
994
995 debug("rx pause %s, tx pause %s\n",
996 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
997 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
998 }
999
1000 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1001}
1002
1003static int mtk_phy_start(struct mtk_eth_priv *priv)
1004{
1005 struct phy_device *phydev = priv->phydev;
1006 int ret;
1007
1008 ret = phy_startup(phydev);
1009
1010 if (ret) {
1011 debug("Could not initialize PHY %s\n", phydev->dev->name);
1012 return ret;
1013 }
1014
1015 if (!phydev->link) {
1016 debug("%s: link down.\n", phydev->dev->name);
1017 return 0;
1018 }
1019
1020 mtk_phy_link_adjust(priv);
1021
1022 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1023 (phydev->duplex) ? "full" : "half",
1024 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1025
1026 return 0;
1027}
1028
1029static int mtk_phy_probe(struct udevice *dev)
1030{
1031 struct mtk_eth_priv *priv = dev_get_priv(dev);
1032 struct phy_device *phydev;
1033
1034 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1035 priv->phy_interface);
1036 if (!phydev)
1037 return -ENODEV;
1038
1039 phydev->supported &= PHY_GBIT_FEATURES;
1040 phydev->advertising = phydev->supported;
1041
1042 priv->phydev = phydev;
1043 phy_config(phydev);
1044
1045 return 0;
1046}
1047
MarkLeeb4ef49a2020-01-21 19:31:57 +08001048static void mtk_sgmii_init(struct mtk_eth_priv *priv)
1049{
1050 /* Set SGMII GEN2 speed(2.5G) */
Weijie Gao62596722022-09-09 19:59:21 +08001051 setbits_le32(priv->sgmii_base + priv->soc->ana_rgc3,
1052 SGMSYS_SPEED_2500);
MarkLeeb4ef49a2020-01-21 19:31:57 +08001053
1054 /* Disable SGMII AN */
1055 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1056 SGMII_AN_ENABLE, 0);
1057
1058 /* SGMII force mode setting */
1059 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1060
Weijie Gao29a48bf2022-09-09 19:59:28 +08001061 /* SGMII PN SWAP setting */
1062 if (priv->pn_swap) {
1063 setbits_le32(priv->sgmii_base + SGMSYS_QPHY_WRAP_CTRL,
1064 SGMII_PN_SWAP_TX_RX);
1065 }
1066
MarkLeeb4ef49a2020-01-21 19:31:57 +08001067 /* Release PHYA power down state */
1068 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1069 SGMII_PHYA_PWD, 0);
1070}
1071
Weijie Gao23f17162018-12-20 16:12:53 +08001072static void mtk_mac_init(struct mtk_eth_priv *priv)
1073{
1074 int i, ge_mode = 0;
1075 u32 mcr;
1076
1077 switch (priv->phy_interface) {
1078 case PHY_INTERFACE_MODE_RGMII_RXID:
1079 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +08001080 ge_mode = GE_MODE_RGMII;
1081 break;
Weijie Gao23f17162018-12-20 16:12:53 +08001082 case PHY_INTERFACE_MODE_SGMII:
1083 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +08001084 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1085 SYSCFG0_SGMII_SEL(priv->gmac_id));
1086 mtk_sgmii_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001087 break;
1088 case PHY_INTERFACE_MODE_MII:
1089 case PHY_INTERFACE_MODE_GMII:
1090 ge_mode = GE_MODE_MII;
1091 break;
1092 case PHY_INTERFACE_MODE_RMII:
1093 ge_mode = GE_MODE_RMII;
1094 break;
1095 default:
1096 break;
1097 }
1098
1099 /* set the gmac to the right mode */
1100 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1101 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1102 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1103
1104 if (priv->force_mode) {
Landen Chao532de8d2020-02-18 16:49:37 +08001105 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001106 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1107 MAC_MODE | FORCE_MODE |
1108 MAC_TX_EN | MAC_RX_EN |
1109 BKOFF_EN | BACKPR_EN |
1110 FORCE_LINK;
1111
1112 switch (priv->speed) {
1113 case SPEED_10:
1114 mcr |= SPEED_10M << FORCE_SPD_S;
1115 break;
1116 case SPEED_100:
1117 mcr |= SPEED_100M << FORCE_SPD_S;
1118 break;
1119 case SPEED_1000:
1120 mcr |= SPEED_1000M << FORCE_SPD_S;
1121 break;
1122 }
1123
1124 if (priv->duplex)
1125 mcr |= FORCE_DPX;
1126
1127 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1128 }
1129
Weijie Gao62596722022-09-09 19:59:21 +08001130 if (MTK_HAS_CAPS(priv->soc->caps, MTK_GMAC1_TRGMII) &&
1131 !MTK_HAS_CAPS(priv->soc->caps, MTK_TRGMII_MT7621_CLK)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001132 /* Lower Tx Driving for TRGMII path */
1133 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1134 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1135 (8 << TD_DM_DRVP_S) |
1136 (8 << TD_DM_DRVN_S));
1137
1138 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1139 RX_RST | RXC_DQSISEL);
1140 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1141 }
1142}
1143
1144static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1145{
1146 char *pkt_base = priv->pkt_pool;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001147 struct mtk_tx_dma_v2 *txd;
1148 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001149 int i;
1150
1151 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1152 udelay(500);
1153
Weijie Gao7d928c32022-09-09 19:59:24 +08001154 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * priv->soc->txd_size);
1155 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * priv->soc->rxd_size);
1156 memset(priv->pkt_pool, 0xff, TOTAL_PKT_BUF_SIZE);
Weijie Gao23f17162018-12-20 16:12:53 +08001157
Frank Wunderlich47b14312020-01-31 10:23:29 +01001158 flush_dcache_range((ulong)pkt_base,
1159 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +08001160
1161 priv->rx_dma_owner_idx0 = 0;
1162 priv->tx_cpu_owner_idx0 = 0;
1163
1164 for (i = 0; i < NUM_TX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001165 txd = priv->tx_ring_noc + i * priv->soc->txd_size;
Weijie Gao23f17162018-12-20 16:12:53 +08001166
Weijie Gao7d928c32022-09-09 19:59:24 +08001167 txd->txd1 = virt_to_phys(pkt_base);
1168 txd->txd2 = PDMA_TXD2_DDONE | PDMA_TXD2_LS0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001169
1170 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1171 txd->txd5 = PDMA_V2_TXD5_FPORT_SET(priv->gmac_id + 1);
1172 else
1173 txd->txd4 = PDMA_V1_TXD4_FPORT_SET(priv->gmac_id + 1);
Weijie Gao7d928c32022-09-09 19:59:24 +08001174
Weijie Gao23f17162018-12-20 16:12:53 +08001175 pkt_base += PKTSIZE_ALIGN;
1176 }
1177
1178 for (i = 0; i < NUM_RX_DESC; i++) {
Weijie Gao7d928c32022-09-09 19:59:24 +08001179 rxd = priv->rx_ring_noc + i * priv->soc->rxd_size;
1180
1181 rxd->rxd1 = virt_to_phys(pkt_base);
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001182
1183 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1184 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1185 else
1186 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001187
Weijie Gao23f17162018-12-20 16:12:53 +08001188 pkt_base += PKTSIZE_ALIGN;
1189 }
1190
1191 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1192 virt_to_phys(priv->tx_ring_noc));
1193 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1194 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1195
1196 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1197 virt_to_phys(priv->rx_ring_noc));
1198 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1199 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1200
1201 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1202}
1203
1204static int mtk_eth_start(struct udevice *dev)
1205{
1206 struct mtk_eth_priv *priv = dev_get_priv(dev);
1207 int ret;
1208
1209 /* Reset FE */
1210 reset_assert(&priv->rst_fe);
1211 udelay(1000);
1212 reset_deassert(&priv->rst_fe);
1213 mdelay(10);
1214
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001215 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1216 setbits_le32(priv->fe_base + FE_GLO_MISC_REG, PDMA_VER_V2);
1217
Weijie Gao23f17162018-12-20 16:12:53 +08001218 /* Packets forward to PDMA */
1219 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1220
1221 if (priv->gmac_id == 0)
1222 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1223 else
1224 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1225
1226 udelay(500);
1227
1228 mtk_eth_fifo_init(priv);
1229
1230 /* Start PHY */
1231 if (priv->sw == SW_NONE) {
1232 ret = mtk_phy_start(priv);
1233 if (ret)
1234 return ret;
1235 }
1236
1237 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1238 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1239 udelay(500);
1240
1241 return 0;
1242}
1243
1244static void mtk_eth_stop(struct udevice *dev)
1245{
1246 struct mtk_eth_priv *priv = dev_get_priv(dev);
1247
1248 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1249 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1250 udelay(500);
1251
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001252 wait_for_bit_le32(priv->fe_base + priv->soc->pdma_base + PDMA_GLO_CFG_REG,
Weijie Gao23f17162018-12-20 16:12:53 +08001253 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1254}
1255
1256static int mtk_eth_write_hwaddr(struct udevice *dev)
1257{
Simon Glassc69cda22020-12-03 16:55:20 -07001258 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001259 struct mtk_eth_priv *priv = dev_get_priv(dev);
1260 unsigned char *mac = pdata->enetaddr;
1261 u32 macaddr_lsb, macaddr_msb;
1262
1263 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1264 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1265 ((u32)mac[4] << 8) | (u32)mac[5];
1266
1267 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1268 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1269
1270 return 0;
1271}
1272
1273static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1274{
1275 struct mtk_eth_priv *priv = dev_get_priv(dev);
1276 u32 idx = priv->tx_cpu_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001277 struct mtk_tx_dma_v2 *txd;
Weijie Gao23f17162018-12-20 16:12:53 +08001278 void *pkt_base;
1279
Weijie Gao7d928c32022-09-09 19:59:24 +08001280 txd = priv->tx_ring_noc + idx * priv->soc->txd_size;
1281
1282 if (!(txd->txd2 & PDMA_TXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001283 debug("mtk-eth: TX DMA descriptor ring is full\n");
1284 return -EPERM;
1285 }
1286
Weijie Gao7d928c32022-09-09 19:59:24 +08001287 pkt_base = (void *)phys_to_virt(txd->txd1);
Weijie Gao23f17162018-12-20 16:12:53 +08001288 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001289 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001290 roundup(length, ARCH_DMA_MINALIGN));
1291
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001292 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1293 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V2_TXD2_SDL0_SET(length);
1294 else
1295 txd->txd2 = PDMA_TXD2_LS0 | PDMA_V1_TXD2_SDL0_SET(length);
Weijie Gao23f17162018-12-20 16:12:53 +08001296
1297 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1298 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1299
1300 return 0;
1301}
1302
1303static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1304{
1305 struct mtk_eth_priv *priv = dev_get_priv(dev);
1306 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001307 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001308 uchar *pkt_base;
1309 u32 length;
1310
Weijie Gao7d928c32022-09-09 19:59:24 +08001311 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1312
1313 if (!(rxd->rxd2 & PDMA_RXD2_DDONE)) {
Weijie Gao23f17162018-12-20 16:12:53 +08001314 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1315 return -EAGAIN;
1316 }
1317
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001318 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1319 length = PDMA_V2_RXD2_PLEN0_GET(rxd->rxd2);
1320 else
1321 length = PDMA_V1_RXD2_PLEN0_GET(rxd->rxd2);
Weijie Gao7d928c32022-09-09 19:59:24 +08001322
1323 pkt_base = (void *)phys_to_virt(rxd->rxd1);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001324 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001325 roundup(length, ARCH_DMA_MINALIGN));
1326
1327 if (packetp)
1328 *packetp = pkt_base;
1329
1330 return length;
1331}
1332
1333static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1334{
1335 struct mtk_eth_priv *priv = dev_get_priv(dev);
1336 u32 idx = priv->rx_dma_owner_idx0;
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001337 struct mtk_rx_dma_v2 *rxd;
Weijie Gao23f17162018-12-20 16:12:53 +08001338
Weijie Gao7d928c32022-09-09 19:59:24 +08001339 rxd = priv->rx_ring_noc + idx * priv->soc->rxd_size;
1340
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001341 if (MTK_HAS_CAPS(priv->soc->caps, MTK_NETSYS_V2))
1342 rxd->rxd2 = PDMA_V2_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
1343 else
1344 rxd->rxd2 = PDMA_V1_RXD2_PLEN0_SET(PKTSIZE_ALIGN);
Weijie Gao23f17162018-12-20 16:12:53 +08001345
1346 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1347 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1348
1349 return 0;
1350}
1351
1352static int mtk_eth_probe(struct udevice *dev)
1353{
Simon Glassc69cda22020-12-03 16:55:20 -07001354 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001355 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001356 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001357 int ret;
1358
1359 /* Frame Engine Register Base */
1360 priv->fe_base = (void *)iobase;
1361
1362 /* GMAC Register Base */
1363 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1364
1365 /* MDIO register */
1366 ret = mtk_mdio_register(dev);
1367 if (ret)
1368 return ret;
1369
1370 /* Prepare for tx/rx rings */
Weijie Gao7d928c32022-09-09 19:59:24 +08001371 priv->tx_ring_noc = (void *)
1372 noncached_alloc(priv->soc->txd_size * NUM_TX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001373 ARCH_DMA_MINALIGN);
Weijie Gao7d928c32022-09-09 19:59:24 +08001374 priv->rx_ring_noc = (void *)
1375 noncached_alloc(priv->soc->rxd_size * NUM_RX_DESC,
Weijie Gao23f17162018-12-20 16:12:53 +08001376 ARCH_DMA_MINALIGN);
1377
1378 /* Set MAC mode */
1379 mtk_mac_init(priv);
1380
1381 /* Probe phy if switch is not specified */
1382 if (priv->sw == SW_NONE)
1383 return mtk_phy_probe(dev);
1384
1385 /* Initialize switch */
Landen Chao532de8d2020-02-18 16:49:37 +08001386 return mt753x_switch_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001387}
1388
1389static int mtk_eth_remove(struct udevice *dev)
1390{
1391 struct mtk_eth_priv *priv = dev_get_priv(dev);
1392
1393 /* MDIO unregister */
1394 mdio_unregister(priv->mdio_bus);
1395 mdio_free(priv->mdio_bus);
1396
1397 /* Stop possibly started DMA */
1398 mtk_eth_stop(dev);
1399
1400 return 0;
1401}
1402
Simon Glassd1998a92020-12-03 16:55:21 -07001403static int mtk_eth_of_to_plat(struct udevice *dev)
Weijie Gao23f17162018-12-20 16:12:53 +08001404{
Simon Glassc69cda22020-12-03 16:55:20 -07001405 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001406 struct mtk_eth_priv *priv = dev_get_priv(dev);
1407 struct ofnode_phandle_args args;
1408 struct regmap *regmap;
1409 const char *str;
1410 ofnode subnode;
1411 int ret;
1412
Weijie Gao62596722022-09-09 19:59:21 +08001413 priv->soc = (const struct mtk_soc_data *)dev_get_driver_data(dev);
1414 if (!priv->soc) {
1415 dev_err(dev, "missing soc compatible data\n");
1416 return -EINVAL;
1417 }
Weijie Gao23f17162018-12-20 16:12:53 +08001418
Weijie Gao528e4832022-05-20 11:23:31 +08001419 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001420
1421 /* get corresponding ethsys phandle */
1422 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1423 &args);
1424 if (ret)
1425 return ret;
1426
Weijie Gao86062e72022-05-20 11:23:37 +08001427 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1428 if (IS_ERR(priv->ethsys_regmap))
1429 return PTR_ERR(priv->ethsys_regmap);
Weijie Gao23f17162018-12-20 16:12:53 +08001430
1431 /* Reset controllers */
1432 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1433 if (ret) {
1434 printf("error: Unable to get reset ctrl for frame engine\n");
1435 return ret;
1436 }
1437
1438 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1439
1440 /* Interface mode is required */
Marek Behún123ca112022-04-07 00:33:01 +02001441 pdata->phy_interface = dev_read_phy_mode(dev);
1442 priv->phy_interface = pdata->phy_interface;
Marek Behúnffb0f6f2022-04-07 00:33:03 +02001443 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Weijie Gao23f17162018-12-20 16:12:53 +08001444 printf("error: phy-mode is not set\n");
1445 return -EINVAL;
1446 }
1447
1448 /* Force mode or autoneg */
1449 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1450 if (ofnode_valid(subnode)) {
1451 priv->force_mode = 1;
1452 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1453 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1454
1455 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1456 priv->speed != SPEED_1000) {
1457 printf("error: no valid speed set in fixed-link\n");
1458 return -EINVAL;
1459 }
1460 }
1461
MarkLeeb4ef49a2020-01-21 19:31:57 +08001462 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1463 /* get corresponding sgmii phandle */
1464 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1465 NULL, 0, 0, &args);
1466 if (ret)
1467 return ret;
1468
1469 regmap = syscon_node_to_regmap(args.node);
1470
1471 if (IS_ERR(regmap))
1472 return PTR_ERR(regmap);
1473
1474 priv->sgmii_base = regmap_get_range(regmap, 0);
1475
1476 if (!priv->sgmii_base) {
1477 dev_err(dev, "Unable to find sgmii\n");
1478 return -ENODEV;
1479 }
Weijie Gao29a48bf2022-09-09 19:59:28 +08001480
1481 priv->pn_swap = ofnode_read_bool(args.node, "pn_swap");
MarkLeeb4ef49a2020-01-21 19:31:57 +08001482 }
1483
Weijie Gao23f17162018-12-20 16:12:53 +08001484 /* check for switch first, otherwise phy will be used */
1485 priv->sw = SW_NONE;
1486 priv->switch_init = NULL;
1487 str = dev_read_string(dev, "mediatek,switch");
1488
1489 if (str) {
1490 if (!strcmp(str, "mt7530")) {
1491 priv->sw = SW_MT7530;
1492 priv->switch_init = mt7530_setup;
Landen Chao532de8d2020-02-18 16:49:37 +08001493 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1494 } else if (!strcmp(str, "mt7531")) {
1495 priv->sw = SW_MT7531;
1496 priv->switch_init = mt7531_setup;
1497 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gao23f17162018-12-20 16:12:53 +08001498 } else {
1499 printf("error: unsupported switch\n");
1500 return -EINVAL;
1501 }
1502
1503 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1504 if (priv->mcm) {
1505 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1506 if (ret) {
1507 printf("error: no reset ctrl for mcm\n");
1508 return ret;
1509 }
1510 } else {
1511 gpio_request_by_name(dev, "reset-gpios", 0,
1512 &priv->rst_gpio, GPIOD_IS_OUT);
1513 }
1514 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001515 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1516 0, &args);
1517 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001518 printf("error: phy-handle is not specified\n");
1519 return ret;
1520 }
1521
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001522 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001523 if (priv->phy_addr < 0) {
1524 printf("error: phy address is not specified\n");
1525 return ret;
1526 }
1527 }
1528
1529 return 0;
1530}
1531
Weijie Gao29a48bf2022-09-09 19:59:28 +08001532static const struct mtk_soc_data mt7986_data = {
1533 .caps = MT7986_CAPS,
1534 .ana_rgc3 = 0x128,
1535 .pdma_base = PDMA_V2_BASE,
1536 .txd_size = sizeof(struct mtk_tx_dma_v2),
1537 .rxd_size = sizeof(struct mtk_rx_dma_v2),
1538};
1539
1540static const struct mtk_soc_data mt7981_data = {
1541 .caps = MT7986_CAPS,
1542 .ana_rgc3 = 0x128,
1543 .pdma_base = PDMA_V2_BASE,
1544 .txd_size = sizeof(struct mtk_tx_dma_v2),
1545 .rxd_size = sizeof(struct mtk_rx_dma_v2),
1546};
1547
Weijie Gao62596722022-09-09 19:59:21 +08001548static const struct mtk_soc_data mt7629_data = {
1549 .ana_rgc3 = 0x128,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001550 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001551 .txd_size = sizeof(struct mtk_tx_dma),
1552 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001553};
1554
1555static const struct mtk_soc_data mt7623_data = {
1556 .caps = MT7623_CAPS,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001557 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001558 .txd_size = sizeof(struct mtk_tx_dma),
1559 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001560};
1561
1562static const struct mtk_soc_data mt7622_data = {
1563 .ana_rgc3 = 0x2028,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001564 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001565 .txd_size = sizeof(struct mtk_tx_dma),
1566 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001567};
1568
1569static const struct mtk_soc_data mt7621_data = {
1570 .caps = MT7621_CAPS,
Weijie Gaoe7ad0462022-09-09 19:59:26 +08001571 .pdma_base = PDMA_V1_BASE,
Weijie Gao7d928c32022-09-09 19:59:24 +08001572 .txd_size = sizeof(struct mtk_tx_dma),
1573 .rxd_size = sizeof(struct mtk_rx_dma),
Weijie Gao62596722022-09-09 19:59:21 +08001574};
1575
Weijie Gao23f17162018-12-20 16:12:53 +08001576static const struct udevice_id mtk_eth_ids[] = {
Weijie Gao29a48bf2022-09-09 19:59:28 +08001577 { .compatible = "mediatek,mt7986-eth", .data = (ulong)&mt7986_data },
1578 { .compatible = "mediatek,mt7981-eth", .data = (ulong)&mt7981_data },
Weijie Gao62596722022-09-09 19:59:21 +08001579 { .compatible = "mediatek,mt7629-eth", .data = (ulong)&mt7629_data },
1580 { .compatible = "mediatek,mt7623-eth", .data = (ulong)&mt7623_data },
1581 { .compatible = "mediatek,mt7622-eth", .data = (ulong)&mt7622_data },
1582 { .compatible = "mediatek,mt7621-eth", .data = (ulong)&mt7621_data },
Weijie Gao23f17162018-12-20 16:12:53 +08001583 {}
1584};
1585
1586static const struct eth_ops mtk_eth_ops = {
1587 .start = mtk_eth_start,
1588 .stop = mtk_eth_stop,
1589 .send = mtk_eth_send,
1590 .recv = mtk_eth_recv,
1591 .free_pkt = mtk_eth_free_pkt,
1592 .write_hwaddr = mtk_eth_write_hwaddr,
1593};
1594
1595U_BOOT_DRIVER(mtk_eth) = {
1596 .name = "mtk-eth",
1597 .id = UCLASS_ETH,
1598 .of_match = mtk_eth_ids,
Simon Glassd1998a92020-12-03 16:55:21 -07001599 .of_to_plat = mtk_eth_of_to_plat,
Simon Glasscaa4daa2020-12-03 16:55:18 -07001600 .plat_auto = sizeof(struct eth_pdata),
Weijie Gao23f17162018-12-20 16:12:53 +08001601 .probe = mtk_eth_probe,
1602 .remove = mtk_eth_remove,
1603 .ops = &mtk_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -07001604 .priv_auto = sizeof(struct mtk_eth_priv),
Weijie Gao23f17162018-12-20 16:12:53 +08001605 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1606};