blob: 4fe7ee0d36ad1afe825713ce0e5c3540dafa899e [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Weijie Gao23f17162018-12-20 16:12:53 +080013#include <malloc.h>
14#include <miiphy.h>
Simon Glass90526e92020-05-10 11:39:56 -060015#include <net.h>
Weijie Gao23f17162018-12-20 16:12:53 +080016#include <regmap.h>
17#include <reset.h>
18#include <syscon.h>
19#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060020#include <asm/cache.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <asm/gpio.h>
22#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070023#include <dm/device_compat.h>
Simon Glassc05ed002020-05-10 11:40:11 -060024#include <linux/delay.h>
Weijie Gao23f17162018-12-20 16:12:53 +080025#include <linux/err.h>
26#include <linux/ioport.h>
27#include <linux/mdio.h>
28#include <linux/mii.h>
29
30#include "mtk_eth.h"
31
32#define NUM_TX_DESC 24
33#define NUM_RX_DESC 24
34#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
35#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
36#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
37
Landen Chao532de8d2020-02-18 16:49:37 +080038#define MT753X_NUM_PHYS 5
39#define MT753X_NUM_PORTS 7
40#define MT753X_DFL_SMI_ADDR 31
41#define MT753X_SMI_ADDR_MASK 0x1f
Weijie Gao23f17162018-12-20 16:12:53 +080042
Landen Chao532de8d2020-02-18 16:49:37 +080043#define MT753X_PHY_ADDR(base, addr) \
Weijie Gao23f17162018-12-20 16:12:53 +080044 (((base) + (addr)) & 0x1f)
45
46#define GDMA_FWD_TO_CPU \
47 (0x20000000 | \
48 GDM_ICS_EN | \
49 GDM_TCS_EN | \
50 GDM_UCS_EN | \
51 STRP_CRC | \
52 (DP_PDMA << MYMAC_DP_S) | \
53 (DP_PDMA << BC_DP_S) | \
54 (DP_PDMA << MC_DP_S) | \
55 (DP_PDMA << UN_DP_S))
56
57#define GDMA_FWD_DISCARD \
58 (0x20000000 | \
59 GDM_ICS_EN | \
60 GDM_TCS_EN | \
61 GDM_UCS_EN | \
62 STRP_CRC | \
63 (DP_DISCARD << MYMAC_DP_S) | \
64 (DP_DISCARD << BC_DP_S) | \
65 (DP_DISCARD << MC_DP_S) | \
66 (DP_DISCARD << UN_DP_S))
67
68struct pdma_rxd_info1 {
69 u32 PDP0;
70};
71
72struct pdma_rxd_info2 {
73 u32 PLEN1 : 14;
74 u32 LS1 : 1;
75 u32 UN_USED : 1;
76 u32 PLEN0 : 14;
77 u32 LS0 : 1;
78 u32 DDONE : 1;
79};
80
81struct pdma_rxd_info3 {
82 u32 PDP1;
83};
84
85struct pdma_rxd_info4 {
86 u32 FOE_ENTRY : 14;
87 u32 CRSN : 5;
88 u32 SP : 3;
89 u32 L4F : 1;
90 u32 L4VLD : 1;
91 u32 TACK : 1;
92 u32 IP4F : 1;
93 u32 IP4 : 1;
94 u32 IP6 : 1;
95 u32 UN_USED : 4;
96};
97
98struct pdma_rxdesc {
99 struct pdma_rxd_info1 rxd_info1;
100 struct pdma_rxd_info2 rxd_info2;
101 struct pdma_rxd_info3 rxd_info3;
102 struct pdma_rxd_info4 rxd_info4;
103};
104
105struct pdma_txd_info1 {
106 u32 SDP0;
107};
108
109struct pdma_txd_info2 {
110 u32 SDL1 : 14;
111 u32 LS1 : 1;
112 u32 BURST : 1;
113 u32 SDL0 : 14;
114 u32 LS0 : 1;
115 u32 DDONE : 1;
116};
117
118struct pdma_txd_info3 {
119 u32 SDP1;
120};
121
122struct pdma_txd_info4 {
123 u32 VLAN_TAG : 16;
124 u32 INS : 1;
125 u32 RESV : 2;
126 u32 UDF : 6;
127 u32 FPORT : 3;
128 u32 TSO : 1;
129 u32 TUI_CO : 3;
130};
131
132struct pdma_txdesc {
133 struct pdma_txd_info1 txd_info1;
134 struct pdma_txd_info2 txd_info2;
135 struct pdma_txd_info3 txd_info3;
136 struct pdma_txd_info4 txd_info4;
137};
138
139enum mtk_switch {
140 SW_NONE,
Landen Chao532de8d2020-02-18 16:49:37 +0800141 SW_MT7530,
142 SW_MT7531
Weijie Gao23f17162018-12-20 16:12:53 +0800143};
144
145enum mtk_soc {
146 SOC_MT7623,
MarkLeee3957172020-01-21 19:31:58 +0800147 SOC_MT7629,
Weijie Gaoad80d482022-05-20 11:23:42 +0800148 SOC_MT7622,
149 SOC_MT7621
Weijie Gao23f17162018-12-20 16:12:53 +0800150};
151
152struct mtk_eth_priv {
153 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
154
155 struct pdma_txdesc *tx_ring_noc;
156 struct pdma_rxdesc *rx_ring_noc;
157
158 int rx_dma_owner_idx0;
159 int tx_cpu_owner_idx0;
160
161 void __iomem *fe_base;
162 void __iomem *gmac_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800163 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800164
Weijie Gao86062e72022-05-20 11:23:37 +0800165 struct regmap *ethsys_regmap;
166
Weijie Gao23f17162018-12-20 16:12:53 +0800167 struct mii_dev *mdio_bus;
168 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
169 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
170 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
171 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
172 u16 val);
173
174 enum mtk_soc soc;
175 int gmac_id;
176 int force_mode;
177 int speed;
178 int duplex;
179
180 struct phy_device *phydev;
181 int phy_interface;
182 int phy_addr;
183
184 enum mtk_switch sw;
185 int (*switch_init)(struct mtk_eth_priv *priv);
Landen Chao532de8d2020-02-18 16:49:37 +0800186 u32 mt753x_smi_addr;
187 u32 mt753x_phy_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800188
189 struct gpio_desc rst_gpio;
190 int mcm;
191
192 struct reset_ctl rst_fe;
193 struct reset_ctl rst_mcm;
194};
195
196static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
197{
198 writel(val, priv->fe_base + PDMA_BASE + reg);
199}
200
201static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
202 u32 set)
203{
204 clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
205}
206
207static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
208 u32 val)
209{
210 u32 gdma_base;
211
212 if (no == 1)
213 gdma_base = GDMA2_BASE;
214 else
215 gdma_base = GDMA1_BASE;
216
217 writel(val, priv->fe_base + gdma_base + reg);
218}
219
220static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
221{
222 return readl(priv->gmac_base + reg);
223}
224
225static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
226{
227 writel(val, priv->gmac_base + reg);
228}
229
230static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
231{
232 clrsetbits_le32(priv->gmac_base + reg, clr, set);
233}
234
235static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
236 u32 set)
237{
Weijie Gao86062e72022-05-20 11:23:37 +0800238 uint val;
239
240 regmap_read(priv->ethsys_regmap, reg, &val);
241 val &= ~clr;
242 val |= set;
243 regmap_write(priv->ethsys_regmap, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800244}
245
246/* Direct MDIO clause 22/45 access via SoC */
247static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
248 u32 cmd, u32 st)
249{
250 int ret;
251 u32 val;
252
253 val = (st << MDIO_ST_S) |
254 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
255 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
256 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
257
258 if (cmd == MDIO_CMD_WRITE)
259 val |= data & MDIO_RW_DATA_M;
260
261 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
262
263 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
264 PHY_ACS_ST, 0, 5000, 0);
265 if (ret) {
266 pr_warn("MDIO access timeout\n");
267 return ret;
268 }
269
270 if (cmd == MDIO_CMD_READ) {
271 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
272 return val & MDIO_RW_DATA_M;
273 }
274
275 return 0;
276}
277
278/* Direct MDIO clause 22 read via SoC */
279static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
280{
281 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
282}
283
284/* Direct MDIO clause 22 write via SoC */
285static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
286{
287 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
288}
289
290/* Direct MDIO clause 45 read via SoC */
291static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
292{
293 int ret;
294
295 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
296 if (ret)
297 return ret;
298
299 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
300 MDIO_ST_C45);
301}
302
303/* Direct MDIO clause 45 write via SoC */
304static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
305 u16 reg, u16 val)
306{
307 int ret;
308
309 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
310 if (ret)
311 return ret;
312
313 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
314 MDIO_ST_C45);
315}
316
317/* Indirect MDIO clause 45 read via MII registers */
318static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
319 u16 reg)
320{
321 int ret;
322
323 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
324 (MMD_ADDR << MMD_CMD_S) |
325 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
326 if (ret)
327 return ret;
328
329 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
330 if (ret)
331 return ret;
332
333 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
334 (MMD_DATA << MMD_CMD_S) |
335 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
336 if (ret)
337 return ret;
338
339 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
340}
341
342/* Indirect MDIO clause 45 write via MII registers */
343static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
344 u16 reg, u16 val)
345{
346 int ret;
347
348 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
349 (MMD_ADDR << MMD_CMD_S) |
350 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
351 if (ret)
352 return ret;
353
354 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
355 if (ret)
356 return ret;
357
358 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
359 (MMD_DATA << MMD_CMD_S) |
360 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
361 if (ret)
362 return ret;
363
364 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
365}
366
Landen Chao532de8d2020-02-18 16:49:37 +0800367/*
368 * MT7530 Internal Register Address Bits
369 * -------------------------------------------------------------------
370 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
371 * |----------------------------------------|---------------|--------|
372 * | Page Address | Reg Address | Unused |
373 * -------------------------------------------------------------------
374 */
375
376static int mt753x_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
377{
378 int ret, low_word, high_word;
379
380 /* Write page address */
381 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
382 if (ret)
383 return ret;
384
385 /* Read low word */
386 low_word = mtk_mii_read(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf);
387 if (low_word < 0)
388 return low_word;
389
390 /* Read high word */
391 high_word = mtk_mii_read(priv, priv->mt753x_smi_addr, 0x10);
392 if (high_word < 0)
393 return high_word;
394
395 if (data)
396 *data = ((u32)high_word << 16) | (low_word & 0xffff);
397
398 return 0;
399}
400
401static int mt753x_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
402{
403 int ret;
404
405 /* Write page address */
406 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, 0x1f, reg >> 6);
407 if (ret)
408 return ret;
409
410 /* Write low word */
411 ret = mtk_mii_write(priv, priv->mt753x_smi_addr, (reg >> 2) & 0xf,
412 data & 0xffff);
413 if (ret)
414 return ret;
415
416 /* Write high word */
417 return mtk_mii_write(priv, priv->mt753x_smi_addr, 0x10, data >> 16);
418}
419
420static void mt753x_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
421 u32 set)
422{
423 u32 val;
424
425 mt753x_reg_read(priv, reg, &val);
426 val &= ~clr;
427 val |= set;
428 mt753x_reg_write(priv, reg, val);
429}
430
431/* Indirect MDIO clause 22/45 access */
432static int mt7531_mii_rw(struct mtk_eth_priv *priv, int phy, int reg, u16 data,
433 u32 cmd, u32 st)
434{
435 ulong timeout;
436 u32 val, timeout_ms;
437 int ret = 0;
438
439 val = (st << MDIO_ST_S) |
440 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
441 ((phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
442 ((reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
443
444 if (cmd == MDIO_CMD_WRITE || cmd == MDIO_CMD_ADDR)
445 val |= data & MDIO_RW_DATA_M;
446
447 mt753x_reg_write(priv, MT7531_PHY_IAC, val | PHY_ACS_ST);
448
449 timeout_ms = 100;
450 timeout = get_timer(0);
451 while (1) {
452 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
453
454 if ((val & PHY_ACS_ST) == 0)
455 break;
456
457 if (get_timer(timeout) > timeout_ms)
458 return -ETIMEDOUT;
459 }
460
461 if (cmd == MDIO_CMD_READ || cmd == MDIO_CMD_READ_C45) {
462 mt753x_reg_read(priv, MT7531_PHY_IAC, &val);
463 ret = val & MDIO_RW_DATA_M;
464 }
465
466 return ret;
467}
468
469static int mt7531_mii_ind_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
470{
471 u8 phy_addr;
472
473 if (phy >= MT753X_NUM_PHYS)
474 return -EINVAL;
475
476 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
477
478 return mt7531_mii_rw(priv, phy_addr, reg, 0, MDIO_CMD_READ,
479 MDIO_ST_C22);
480}
481
482static int mt7531_mii_ind_write(struct mtk_eth_priv *priv, u8 phy, u8 reg,
483 u16 val)
484{
485 u8 phy_addr;
486
487 if (phy >= MT753X_NUM_PHYS)
488 return -EINVAL;
489
490 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, phy);
491
492 return mt7531_mii_rw(priv, phy_addr, reg, val, MDIO_CMD_WRITE,
493 MDIO_ST_C22);
494}
495
496int mt7531_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
497{
498 u8 phy_addr;
499 int ret;
500
501 if (addr >= MT753X_NUM_PHYS)
502 return -EINVAL;
503
504 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
505
506 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
507 MDIO_ST_C45);
508 if (ret)
509 return ret;
510
511 return mt7531_mii_rw(priv, phy_addr, devad, 0, MDIO_CMD_READ_C45,
512 MDIO_ST_C45);
513}
514
515static int mt7531_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
516 u16 reg, u16 val)
517{
518 u8 phy_addr;
519 int ret;
520
521 if (addr >= MT753X_NUM_PHYS)
522 return 0;
523
524 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, addr);
525
526 ret = mt7531_mii_rw(priv, phy_addr, devad, reg, MDIO_CMD_ADDR,
527 MDIO_ST_C45);
528 if (ret)
529 return ret;
530
531 return mt7531_mii_rw(priv, phy_addr, devad, val, MDIO_CMD_WRITE,
532 MDIO_ST_C45);
533}
534
Weijie Gao23f17162018-12-20 16:12:53 +0800535static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
536{
537 struct mtk_eth_priv *priv = bus->priv;
538
539 if (devad < 0)
540 return priv->mii_read(priv, addr, reg);
541 else
542 return priv->mmd_read(priv, addr, devad, reg);
543}
544
545static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
546 u16 val)
547{
548 struct mtk_eth_priv *priv = bus->priv;
549
550 if (devad < 0)
551 return priv->mii_write(priv, addr, reg, val);
552 else
553 return priv->mmd_write(priv, addr, devad, reg, val);
554}
555
556static int mtk_mdio_register(struct udevice *dev)
557{
558 struct mtk_eth_priv *priv = dev_get_priv(dev);
559 struct mii_dev *mdio_bus = mdio_alloc();
560 int ret;
561
562 if (!mdio_bus)
563 return -ENOMEM;
564
565 /* Assign MDIO access APIs according to the switch/phy */
566 switch (priv->sw) {
567 case SW_MT7530:
568 priv->mii_read = mtk_mii_read;
569 priv->mii_write = mtk_mii_write;
570 priv->mmd_read = mtk_mmd_ind_read;
571 priv->mmd_write = mtk_mmd_ind_write;
572 break;
Landen Chao532de8d2020-02-18 16:49:37 +0800573 case SW_MT7531:
574 priv->mii_read = mt7531_mii_ind_read;
575 priv->mii_write = mt7531_mii_ind_write;
576 priv->mmd_read = mt7531_mmd_ind_read;
577 priv->mmd_write = mt7531_mmd_ind_write;
578 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800579 default:
580 priv->mii_read = mtk_mii_read;
581 priv->mii_write = mtk_mii_write;
582 priv->mmd_read = mtk_mmd_read;
583 priv->mmd_write = mtk_mmd_write;
584 }
585
586 mdio_bus->read = mtk_mdio_read;
587 mdio_bus->write = mtk_mdio_write;
588 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
589
590 mdio_bus->priv = (void *)priv;
591
592 ret = mdio_register(mdio_bus);
593
594 if (ret)
595 return ret;
596
597 priv->mdio_bus = mdio_bus;
598
599 return 0;
600}
601
Landen Chao532de8d2020-02-18 16:49:37 +0800602static int mt753x_core_reg_read(struct mtk_eth_priv *priv, u32 reg)
Weijie Gao23f17162018-12-20 16:12:53 +0800603{
Landen Chao532de8d2020-02-18 16:49:37 +0800604 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800605
Landen Chao532de8d2020-02-18 16:49:37 +0800606 return priv->mmd_read(priv, phy_addr, 0x1f, reg);
Weijie Gao23f17162018-12-20 16:12:53 +0800607}
608
Landen Chao532de8d2020-02-18 16:49:37 +0800609static void mt753x_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
Weijie Gao23f17162018-12-20 16:12:53 +0800610{
Landen Chao532de8d2020-02-18 16:49:37 +0800611 u8 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800612
Landen Chao532de8d2020-02-18 16:49:37 +0800613 priv->mmd_write(priv, phy_addr, 0x1f, reg, val);
Weijie Gao23f17162018-12-20 16:12:53 +0800614}
615
616static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
617{
618 u32 ncpo1, ssc_delta;
619
620 switch (mode) {
621 case PHY_INTERFACE_MODE_RGMII:
622 ncpo1 = 0x0c80;
623 ssc_delta = 0x87;
624 break;
625 default:
626 printf("error: xMII mode %d not supported\n", mode);
627 return -EINVAL;
628 }
629
630 /* Disable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800631 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
Weijie Gao23f17162018-12-20 16:12:53 +0800632
633 /* Disable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800634 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800635 (2 << RG_GSWPLL_POSDIV_200M_S) |
636 (32 << RG_GSWPLL_FBKDIV_200M_S));
637
638 /* For MT7530 core clock = 500Mhz */
Landen Chao532de8d2020-02-18 16:49:37 +0800639 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800640 (1 << RG_GSWPLL_POSDIV_500M_S) |
641 (25 << RG_GSWPLL_FBKDIV_500M_S));
642
643 /* Enable MT7530 PLL */
Landen Chao532de8d2020-02-18 16:49:37 +0800644 mt753x_core_reg_write(priv, CORE_GSWPLL_GRP1,
Weijie Gao23f17162018-12-20 16:12:53 +0800645 (2 << RG_GSWPLL_POSDIV_200M_S) |
646 (32 << RG_GSWPLL_FBKDIV_200M_S) |
647 RG_GSWPLL_EN_PRE);
648
649 udelay(20);
650
Landen Chao532de8d2020-02-18 16:49:37 +0800651 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
Weijie Gao23f17162018-12-20 16:12:53 +0800652
653 /* Setup the MT7530 TRGMII Tx Clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800654 mt753x_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
655 mt753x_core_reg_write(priv, CORE_PLL_GROUP6, 0);
656 mt753x_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
657 mt753x_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
658 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
Weijie Gao23f17162018-12-20 16:12:53 +0800659 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
660
Landen Chao532de8d2020-02-18 16:49:37 +0800661 mt753x_core_reg_write(priv, CORE_PLL_GROUP2,
Weijie Gao23f17162018-12-20 16:12:53 +0800662 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
663 (1 << RG_SYSPLL_POSDIV_S));
664
Landen Chao532de8d2020-02-18 16:49:37 +0800665 mt753x_core_reg_write(priv, CORE_PLL_GROUP7,
Weijie Gao23f17162018-12-20 16:12:53 +0800666 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
667 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
668
669 /* Enable MT7530 core clock */
Landen Chao532de8d2020-02-18 16:49:37 +0800670 mt753x_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
Weijie Gao23f17162018-12-20 16:12:53 +0800671 REG_GSWCK_EN | REG_TRGMIICK_EN);
672
673 return 0;
674}
675
676static int mt7530_setup(struct mtk_eth_priv *priv)
677{
678 u16 phy_addr, phy_val;
Weijie Gaoad80d482022-05-20 11:23:42 +0800679 u32 val, txdrv;
Weijie Gao23f17162018-12-20 16:12:53 +0800680 int i;
681
Weijie Gaoad80d482022-05-20 11:23:42 +0800682 if (priv->soc != SOC_MT7621) {
683 /* Select 250MHz clk for RGMII mode */
684 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
685 ETHSYS_TRGMII_CLK_SEL362_5, 0);
686
687 txdrv = 8;
688 } else {
689 txdrv = 4;
690 }
Weijie Gao23f17162018-12-20 16:12:53 +0800691
Landen Chao532de8d2020-02-18 16:49:37 +0800692 /* Modify HWTRAP first to allow direct access to internal PHYs */
693 mt753x_reg_read(priv, HWTRAP_REG, &val);
694 val |= CHG_TRAP;
695 val &= ~C_MDIO_BPS;
696 mt753x_reg_write(priv, MHWTRAP_REG, val);
697
698 /* Calculate the phy base address */
699 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
700 priv->mt753x_phy_base = (val | 0x7) + 1;
701
702 /* Turn off PHYs */
703 for (i = 0; i < MT753X_NUM_PHYS; i++) {
704 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
705 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
706 phy_val |= BMCR_PDOWN;
707 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
708 }
709
710 /* Force MAC link down before reset */
711 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
712 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE);
713
714 /* MT7530 reset */
715 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
716 udelay(100);
717
718 val = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
719 MAC_MODE | FORCE_MODE |
720 MAC_TX_EN | MAC_RX_EN |
721 BKOFF_EN | BACKPR_EN |
722 (SPEED_1000M << FORCE_SPD_S) |
723 FORCE_DPX | FORCE_LINK;
724
725 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
726 mt753x_reg_write(priv, PMCR_REG(6), val);
727
728 /* MT7530 Port5: Forced link down */
729 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE);
730
731 /* MT7530 Port6: Set to RGMII */
732 mt753x_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
733
734 /* Hardware Trap: Enable Port6, Disable Port5 */
735 mt753x_reg_read(priv, HWTRAP_REG, &val);
736 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
737 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
738 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
739 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
740 mt753x_reg_write(priv, MHWTRAP_REG, val);
741
742 /* Setup switch core pll */
743 mt7530_pad_clk_setup(priv, priv->phy_interface);
744
745 /* Lower Tx Driving for TRGMII path */
746 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
747 mt753x_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
Weijie Gaoad80d482022-05-20 11:23:42 +0800748 (txdrv << TD_DM_DRVP_S) |
749 (txdrv << TD_DM_DRVN_S));
Landen Chao532de8d2020-02-18 16:49:37 +0800750
751 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
752 mt753x_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
753
754 /* Turn on PHYs */
755 for (i = 0; i < MT753X_NUM_PHYS; i++) {
756 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
757 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
758 phy_val &= ~BMCR_PDOWN;
759 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
760 }
761
762 return 0;
763}
764
765static void mt7531_core_pll_setup(struct mtk_eth_priv *priv, int mcm)
766{
767 /* Step 1 : Disable MT7531 COREPLL */
768 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, 0);
769
770 /* Step 2: switch to XTAL output */
771 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_CLKSW, SW_CLKSW);
772
773 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, 0);
774
775 /* Step 3: disable PLLGP and enable program PLLGP */
776 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, SW_PLLGP, SW_PLLGP);
777
778 /* Step 4: program COREPLL output frequency to 500MHz */
779 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_POSDIV_M,
780 2 << RG_COREPLL_POSDIV_S);
781 udelay(25);
782
783 /* Currently, support XTAL 25Mhz only */
784 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_M,
785 0x140000 << RG_COREPLL_SDM_PCW_S);
786
787 /* Set feedback divide ratio update signal to high */
788 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG,
789 RG_COREPLL_SDM_PCW_CHG);
790
791 /* Wait for at least 16 XTAL clocks */
792 udelay(10);
793
794 /* Step 5: set feedback divide ratio update signal to low */
795 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_SDM_PCW_CHG, 0);
796
797 /* add enable 325M clock for SGMII */
798 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
799
800 /* add enable 250SSC clock for RGMII */
801 mt753x_reg_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
802
803 /*Step 6: Enable MT7531 PLL */
804 mt753x_reg_rmw(priv, MT7531_PLLGP_CR0, RG_COREPLL_EN, RG_COREPLL_EN);
805
806 mt753x_reg_rmw(priv, MT7531_PLLGP_EN, EN_COREPLL, EN_COREPLL);
807
808 udelay(25);
809}
810
811static int mt7531_port_sgmii_init(struct mtk_eth_priv *priv,
812 u32 port)
813{
814 if (port != 5 && port != 6) {
815 printf("mt7531: port %d is not a SGMII port\n", port);
816 return -EINVAL;
817 }
818
819 /* Set SGMII GEN2 speed(2.5G) */
820 mt753x_reg_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
821 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
822
823 /* Disable SGMII AN */
824 mt753x_reg_rmw(priv, MT7531_PCS_CONTROL_1(port),
825 SGMII_AN_ENABLE, 0);
826
827 /* SGMII force mode setting */
828 mt753x_reg_write(priv, MT7531_SGMII_MODE(port), SGMII_FORCE_MODE);
829
830 /* Release PHYA power down state */
831 mt753x_reg_rmw(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
832 SGMII_PHYA_PWD, 0);
833
834 return 0;
835}
836
837static int mt7531_port_rgmii_init(struct mtk_eth_priv *priv, u32 port)
838{
839 u32 val;
840
841 if (port != 5) {
842 printf("error: RGMII mode is not available for port %d\n",
843 port);
844 return -EINVAL;
845 }
846
847 mt753x_reg_read(priv, MT7531_CLKGEN_CTRL, &val);
848 val |= GP_CLK_EN;
849 val &= ~GP_MODE_M;
850 val |= GP_MODE_RGMII << GP_MODE_S;
851 val |= TXCLK_NO_REVERSE;
852 val |= RXCLK_NO_DELAY;
853 val &= ~CLK_SKEW_IN_M;
854 val |= CLK_SKEW_IN_NO_CHANGE << CLK_SKEW_IN_S;
855 val &= ~CLK_SKEW_OUT_M;
856 val |= CLK_SKEW_OUT_NO_CHANGE << CLK_SKEW_OUT_S;
857 mt753x_reg_write(priv, MT7531_CLKGEN_CTRL, val);
858
859 return 0;
860}
861
862static void mt7531_phy_setting(struct mtk_eth_priv *priv)
863{
864 int i;
865 u32 val;
866
867 for (i = 0; i < MT753X_NUM_PHYS; i++) {
868 /* Enable HW auto downshift */
869 priv->mii_write(priv, i, 0x1f, 0x1);
870 val = priv->mii_read(priv, i, PHY_EXT_REG_14);
871 val |= PHY_EN_DOWN_SHFIT;
872 priv->mii_write(priv, i, PHY_EXT_REG_14, val);
873
874 /* PHY link down power saving enable */
875 val = priv->mii_read(priv, i, PHY_EXT_REG_17);
876 val |= PHY_LINKDOWN_POWER_SAVING_EN;
877 priv->mii_write(priv, i, PHY_EXT_REG_17, val);
878
879 val = priv->mmd_read(priv, i, 0x1e, PHY_DEV1E_REG_0C6);
880 val &= ~PHY_POWER_SAVING_M;
881 val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
882 priv->mmd_write(priv, i, 0x1e, PHY_DEV1E_REG_0C6, val);
883 }
884}
885
886static int mt7531_setup(struct mtk_eth_priv *priv)
887{
888 u16 phy_addr, phy_val;
889 u32 val;
890 u32 pmcr;
891 u32 port5_sgmii;
892 int i;
893
894 priv->mt753x_phy_base = (priv->mt753x_smi_addr + 1) &
895 MT753X_SMI_ADDR_MASK;
896
897 /* Turn off PHYs */
898 for (i = 0; i < MT753X_NUM_PHYS; i++) {
899 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
900 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
901 phy_val |= BMCR_PDOWN;
902 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
903 }
904
905 /* Force MAC link down before reset */
906 mt753x_reg_write(priv, PMCR_REG(5), FORCE_MODE_LNK);
907 mt753x_reg_write(priv, PMCR_REG(6), FORCE_MODE_LNK);
908
909 /* Switch soft reset */
910 mt753x_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
911 udelay(100);
912
913 /* Enable MDC input Schmitt Trigger */
914 mt753x_reg_rmw(priv, MT7531_SMT0_IOLB, SMT_IOLB_5_SMI_MDC_EN,
915 SMT_IOLB_5_SMI_MDC_EN);
916
917 mt7531_core_pll_setup(priv, priv->mcm);
918
919 mt753x_reg_read(priv, MT7531_TOP_SIG_SR, &val);
920 port5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
921
922 /* port5 support either RGMII or SGMII, port6 only support SGMII. */
923 switch (priv->phy_interface) {
924 case PHY_INTERFACE_MODE_RGMII:
925 if (!port5_sgmii)
926 mt7531_port_rgmii_init(priv, 5);
927 break;
928 case PHY_INTERFACE_MODE_SGMII:
929 mt7531_port_sgmii_init(priv, 6);
930 if (port5_sgmii)
931 mt7531_port_sgmii_init(priv, 5);
932 break;
933 default:
934 break;
935 }
936
937 pmcr = MT7531_FORCE_MODE |
938 (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
939 MAC_MODE | MAC_TX_EN | MAC_RX_EN |
940 BKOFF_EN | BACKPR_EN |
941 FORCE_RX_FC | FORCE_TX_FC |
942 (SPEED_1000M << FORCE_SPD_S) | FORCE_DPX |
943 FORCE_LINK;
944
945 mt753x_reg_write(priv, PMCR_REG(5), pmcr);
946 mt753x_reg_write(priv, PMCR_REG(6), pmcr);
947
948 /* Turn on PHYs */
949 for (i = 0; i < MT753X_NUM_PHYS; i++) {
950 phy_addr = MT753X_PHY_ADDR(priv->mt753x_phy_base, i);
951 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
952 phy_val &= ~BMCR_PDOWN;
953 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
954 }
955
956 mt7531_phy_setting(priv);
957
958 /* Enable Internal PHYs */
959 val = mt753x_core_reg_read(priv, CORE_PLL_GROUP4);
960 val |= MT7531_BYPASS_MODE;
961 val &= ~MT7531_POWER_ON_OFF;
962 mt753x_core_reg_write(priv, CORE_PLL_GROUP4, val);
963
964 return 0;
965}
966
967int mt753x_switch_init(struct mtk_eth_priv *priv)
968{
969 int ret;
970 int i;
971
Weijie Gao23f17162018-12-20 16:12:53 +0800972 /* Global reset switch */
973 if (priv->mcm) {
974 reset_assert(&priv->rst_mcm);
975 udelay(1000);
976 reset_deassert(&priv->rst_mcm);
977 mdelay(1000);
978 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
979 dm_gpio_set_value(&priv->rst_gpio, 0);
980 udelay(1000);
981 dm_gpio_set_value(&priv->rst_gpio, 1);
982 mdelay(1000);
983 }
984
Landen Chao532de8d2020-02-18 16:49:37 +0800985 ret = priv->switch_init(priv);
986 if (ret)
987 return ret;
Weijie Gao23f17162018-12-20 16:12:53 +0800988
989 /* Set port isolation */
Landen Chao532de8d2020-02-18 16:49:37 +0800990 for (i = 0; i < MT753X_NUM_PORTS; i++) {
Weijie Gao23f17162018-12-20 16:12:53 +0800991 /* Set port matrix mode */
992 if (i != 6)
Landen Chao532de8d2020-02-18 16:49:37 +0800993 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800994 (0x40 << PORT_MATRIX_S));
995 else
Landen Chao532de8d2020-02-18 16:49:37 +0800996 mt753x_reg_write(priv, PCR_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +0800997 (0x3f << PORT_MATRIX_S));
998
999 /* Set port mode to user port */
Landen Chao532de8d2020-02-18 16:49:37 +08001000 mt753x_reg_write(priv, PVC_REG(i),
Weijie Gao23f17162018-12-20 16:12:53 +08001001 (0x8100 << STAG_VPID_S) |
1002 (VLAN_ATTR_USER << VLAN_ATTR_S));
1003 }
1004
1005 return 0;
1006}
1007
1008static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
1009{
1010 u16 lcl_adv = 0, rmt_adv = 0;
1011 u8 flowctrl;
1012 u32 mcr;
1013
Landen Chao532de8d2020-02-18 16:49:37 +08001014 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001015 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1016 MAC_MODE | FORCE_MODE |
1017 MAC_TX_EN | MAC_RX_EN |
1018 BKOFF_EN | BACKPR_EN;
1019
1020 switch (priv->phydev->speed) {
1021 case SPEED_10:
1022 mcr |= (SPEED_10M << FORCE_SPD_S);
1023 break;
1024 case SPEED_100:
1025 mcr |= (SPEED_100M << FORCE_SPD_S);
1026 break;
1027 case SPEED_1000:
1028 mcr |= (SPEED_1000M << FORCE_SPD_S);
1029 break;
1030 };
1031
1032 if (priv->phydev->link)
1033 mcr |= FORCE_LINK;
1034
1035 if (priv->phydev->duplex) {
1036 mcr |= FORCE_DPX;
1037
1038 if (priv->phydev->pause)
1039 rmt_adv = LPA_PAUSE_CAP;
1040 if (priv->phydev->asym_pause)
1041 rmt_adv |= LPA_PAUSE_ASYM;
1042
1043 if (priv->phydev->advertising & ADVERTISED_Pause)
1044 lcl_adv |= ADVERTISE_PAUSE_CAP;
1045 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
1046 lcl_adv |= ADVERTISE_PAUSE_ASYM;
1047
1048 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1049
1050 if (flowctrl & FLOW_CTRL_TX)
1051 mcr |= FORCE_TX_FC;
1052 if (flowctrl & FLOW_CTRL_RX)
1053 mcr |= FORCE_RX_FC;
1054
1055 debug("rx pause %s, tx pause %s\n",
1056 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
1057 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
1058 }
1059
1060 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1061}
1062
1063static int mtk_phy_start(struct mtk_eth_priv *priv)
1064{
1065 struct phy_device *phydev = priv->phydev;
1066 int ret;
1067
1068 ret = phy_startup(phydev);
1069
1070 if (ret) {
1071 debug("Could not initialize PHY %s\n", phydev->dev->name);
1072 return ret;
1073 }
1074
1075 if (!phydev->link) {
1076 debug("%s: link down.\n", phydev->dev->name);
1077 return 0;
1078 }
1079
1080 mtk_phy_link_adjust(priv);
1081
1082 debug("Speed: %d, %s duplex%s\n", phydev->speed,
1083 (phydev->duplex) ? "full" : "half",
1084 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
1085
1086 return 0;
1087}
1088
1089static int mtk_phy_probe(struct udevice *dev)
1090{
1091 struct mtk_eth_priv *priv = dev_get_priv(dev);
1092 struct phy_device *phydev;
1093
1094 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
1095 priv->phy_interface);
1096 if (!phydev)
1097 return -ENODEV;
1098
1099 phydev->supported &= PHY_GBIT_FEATURES;
1100 phydev->advertising = phydev->supported;
1101
1102 priv->phydev = phydev;
1103 phy_config(phydev);
1104
1105 return 0;
1106}
1107
MarkLeeb4ef49a2020-01-21 19:31:57 +08001108static void mtk_sgmii_init(struct mtk_eth_priv *priv)
1109{
1110 /* Set SGMII GEN2 speed(2.5G) */
MarkLeef0236b72020-06-19 19:17:16 +08001111 clrsetbits_le32(priv->sgmii_base + ((priv->soc == SOC_MT7622) ?
1112 SGMSYS_GEN2_SPEED : SGMSYS_GEN2_SPEED_V2),
MarkLeeb4ef49a2020-01-21 19:31:57 +08001113 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
1114
1115 /* Disable SGMII AN */
1116 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
1117 SGMII_AN_ENABLE, 0);
1118
1119 /* SGMII force mode setting */
1120 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
1121
1122 /* Release PHYA power down state */
1123 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
1124 SGMII_PHYA_PWD, 0);
1125}
1126
Weijie Gao23f17162018-12-20 16:12:53 +08001127static void mtk_mac_init(struct mtk_eth_priv *priv)
1128{
1129 int i, ge_mode = 0;
1130 u32 mcr;
1131
1132 switch (priv->phy_interface) {
1133 case PHY_INTERFACE_MODE_RGMII_RXID:
1134 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +08001135 ge_mode = GE_MODE_RGMII;
1136 break;
Weijie Gao23f17162018-12-20 16:12:53 +08001137 case PHY_INTERFACE_MODE_SGMII:
1138 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +08001139 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
1140 SYSCFG0_SGMII_SEL(priv->gmac_id));
1141 mtk_sgmii_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001142 break;
1143 case PHY_INTERFACE_MODE_MII:
1144 case PHY_INTERFACE_MODE_GMII:
1145 ge_mode = GE_MODE_MII;
1146 break;
1147 case PHY_INTERFACE_MODE_RMII:
1148 ge_mode = GE_MODE_RMII;
1149 break;
1150 default:
1151 break;
1152 }
1153
1154 /* set the gmac to the right mode */
1155 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
1156 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
1157 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
1158
1159 if (priv->force_mode) {
Landen Chao532de8d2020-02-18 16:49:37 +08001160 mcr = (IPG_96BIT_WITH_SHORT_IPG << IPG_CFG_S) |
Weijie Gao23f17162018-12-20 16:12:53 +08001161 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
1162 MAC_MODE | FORCE_MODE |
1163 MAC_TX_EN | MAC_RX_EN |
1164 BKOFF_EN | BACKPR_EN |
1165 FORCE_LINK;
1166
1167 switch (priv->speed) {
1168 case SPEED_10:
1169 mcr |= SPEED_10M << FORCE_SPD_S;
1170 break;
1171 case SPEED_100:
1172 mcr |= SPEED_100M << FORCE_SPD_S;
1173 break;
1174 case SPEED_1000:
1175 mcr |= SPEED_1000M << FORCE_SPD_S;
1176 break;
1177 }
1178
1179 if (priv->duplex)
1180 mcr |= FORCE_DPX;
1181
1182 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
1183 }
1184
1185 if (priv->soc == SOC_MT7623) {
1186 /* Lower Tx Driving for TRGMII path */
1187 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
1188 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
1189 (8 << TD_DM_DRVP_S) |
1190 (8 << TD_DM_DRVN_S));
1191
1192 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
1193 RX_RST | RXC_DQSISEL);
1194 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
1195 }
1196}
1197
1198static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
1199{
1200 char *pkt_base = priv->pkt_pool;
1201 int i;
1202
1203 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
1204 udelay(500);
1205
1206 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
1207 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
1208 memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
1209
Frank Wunderlich47b14312020-01-31 10:23:29 +01001210 flush_dcache_range((ulong)pkt_base,
1211 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +08001212
1213 priv->rx_dma_owner_idx0 = 0;
1214 priv->tx_cpu_owner_idx0 = 0;
1215
1216 for (i = 0; i < NUM_TX_DESC; i++) {
1217 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
1218 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
1219 priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
1220
1221 priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
1222 pkt_base += PKTSIZE_ALIGN;
1223 }
1224
1225 for (i = 0; i < NUM_RX_DESC; i++) {
1226 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1227 priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
1228 pkt_base += PKTSIZE_ALIGN;
1229 }
1230
1231 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
1232 virt_to_phys(priv->tx_ring_noc));
1233 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
1234 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1235
1236 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
1237 virt_to_phys(priv->rx_ring_noc));
1238 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
1239 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
1240
1241 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
1242}
1243
1244static int mtk_eth_start(struct udevice *dev)
1245{
1246 struct mtk_eth_priv *priv = dev_get_priv(dev);
1247 int ret;
1248
1249 /* Reset FE */
1250 reset_assert(&priv->rst_fe);
1251 udelay(1000);
1252 reset_deassert(&priv->rst_fe);
1253 mdelay(10);
1254
1255 /* Packets forward to PDMA */
1256 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
1257
1258 if (priv->gmac_id == 0)
1259 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1260 else
1261 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
1262
1263 udelay(500);
1264
1265 mtk_eth_fifo_init(priv);
1266
1267 /* Start PHY */
1268 if (priv->sw == SW_NONE) {
1269 ret = mtk_phy_start(priv);
1270 if (ret)
1271 return ret;
1272 }
1273
1274 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
1275 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
1276 udelay(500);
1277
1278 return 0;
1279}
1280
1281static void mtk_eth_stop(struct udevice *dev)
1282{
1283 struct mtk_eth_priv *priv = dev_get_priv(dev);
1284
1285 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
1286 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
1287 udelay(500);
1288
1289 wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
1290 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
1291}
1292
1293static int mtk_eth_write_hwaddr(struct udevice *dev)
1294{
Simon Glassc69cda22020-12-03 16:55:20 -07001295 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001296 struct mtk_eth_priv *priv = dev_get_priv(dev);
1297 unsigned char *mac = pdata->enetaddr;
1298 u32 macaddr_lsb, macaddr_msb;
1299
1300 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
1301 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
1302 ((u32)mac[4] << 8) | (u32)mac[5];
1303
1304 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
1305 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
1306
1307 return 0;
1308}
1309
1310static int mtk_eth_send(struct udevice *dev, void *packet, int length)
1311{
1312 struct mtk_eth_priv *priv = dev_get_priv(dev);
1313 u32 idx = priv->tx_cpu_owner_idx0;
1314 void *pkt_base;
1315
1316 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
1317 debug("mtk-eth: TX DMA descriptor ring is full\n");
1318 return -EPERM;
1319 }
1320
1321 pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
1322 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001323 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001324 roundup(length, ARCH_DMA_MINALIGN));
1325
1326 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
1327 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
1328
1329 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
1330 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
1331
1332 return 0;
1333}
1334
1335static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
1336{
1337 struct mtk_eth_priv *priv = dev_get_priv(dev);
1338 u32 idx = priv->rx_dma_owner_idx0;
1339 uchar *pkt_base;
1340 u32 length;
1341
1342 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
1343 debug("mtk-eth: RX DMA descriptor ring is empty\n");
1344 return -EAGAIN;
1345 }
1346
1347 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
1348 pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001349 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +08001350 roundup(length, ARCH_DMA_MINALIGN));
1351
1352 if (packetp)
1353 *packetp = pkt_base;
1354
1355 return length;
1356}
1357
1358static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1359{
1360 struct mtk_eth_priv *priv = dev_get_priv(dev);
1361 u32 idx = priv->rx_dma_owner_idx0;
1362
1363 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
1364 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
1365 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1366
1367 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1368 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1369
1370 return 0;
1371}
1372
1373static int mtk_eth_probe(struct udevice *dev)
1374{
Simon Glassc69cda22020-12-03 16:55:20 -07001375 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001376 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001377 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001378 int ret;
1379
1380 /* Frame Engine Register Base */
1381 priv->fe_base = (void *)iobase;
1382
1383 /* GMAC Register Base */
1384 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1385
1386 /* MDIO register */
1387 ret = mtk_mdio_register(dev);
1388 if (ret)
1389 return ret;
1390
1391 /* Prepare for tx/rx rings */
1392 priv->tx_ring_noc = (struct pdma_txdesc *)
1393 noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1394 ARCH_DMA_MINALIGN);
1395 priv->rx_ring_noc = (struct pdma_rxdesc *)
1396 noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1397 ARCH_DMA_MINALIGN);
1398
1399 /* Set MAC mode */
1400 mtk_mac_init(priv);
1401
1402 /* Probe phy if switch is not specified */
1403 if (priv->sw == SW_NONE)
1404 return mtk_phy_probe(dev);
1405
1406 /* Initialize switch */
Landen Chao532de8d2020-02-18 16:49:37 +08001407 return mt753x_switch_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +08001408}
1409
1410static int mtk_eth_remove(struct udevice *dev)
1411{
1412 struct mtk_eth_priv *priv = dev_get_priv(dev);
1413
1414 /* MDIO unregister */
1415 mdio_unregister(priv->mdio_bus);
1416 mdio_free(priv->mdio_bus);
1417
1418 /* Stop possibly started DMA */
1419 mtk_eth_stop(dev);
1420
1421 return 0;
1422}
1423
Simon Glassd1998a92020-12-03 16:55:21 -07001424static int mtk_eth_of_to_plat(struct udevice *dev)
Weijie Gao23f17162018-12-20 16:12:53 +08001425{
Simon Glassc69cda22020-12-03 16:55:20 -07001426 struct eth_pdata *pdata = dev_get_plat(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001427 struct mtk_eth_priv *priv = dev_get_priv(dev);
1428 struct ofnode_phandle_args args;
1429 struct regmap *regmap;
1430 const char *str;
1431 ofnode subnode;
1432 int ret;
1433
1434 priv->soc = dev_get_driver_data(dev);
1435
Weijie Gao528e4832022-05-20 11:23:31 +08001436 pdata->iobase = (phys_addr_t)dev_remap_addr(dev);
Weijie Gao23f17162018-12-20 16:12:53 +08001437
1438 /* get corresponding ethsys phandle */
1439 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1440 &args);
1441 if (ret)
1442 return ret;
1443
Weijie Gao86062e72022-05-20 11:23:37 +08001444 priv->ethsys_regmap = syscon_node_to_regmap(args.node);
1445 if (IS_ERR(priv->ethsys_regmap))
1446 return PTR_ERR(priv->ethsys_regmap);
Weijie Gao23f17162018-12-20 16:12:53 +08001447
1448 /* Reset controllers */
1449 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1450 if (ret) {
1451 printf("error: Unable to get reset ctrl for frame engine\n");
1452 return ret;
1453 }
1454
1455 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1456
1457 /* Interface mode is required */
Marek BehĂșn123ca112022-04-07 00:33:01 +02001458 pdata->phy_interface = dev_read_phy_mode(dev);
1459 priv->phy_interface = pdata->phy_interface;
Marek BehĂșnffb0f6f2022-04-07 00:33:03 +02001460 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) {
Weijie Gao23f17162018-12-20 16:12:53 +08001461 printf("error: phy-mode is not set\n");
1462 return -EINVAL;
1463 }
1464
1465 /* Force mode or autoneg */
1466 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1467 if (ofnode_valid(subnode)) {
1468 priv->force_mode = 1;
1469 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1470 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1471
1472 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1473 priv->speed != SPEED_1000) {
1474 printf("error: no valid speed set in fixed-link\n");
1475 return -EINVAL;
1476 }
1477 }
1478
MarkLeeb4ef49a2020-01-21 19:31:57 +08001479 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1480 /* get corresponding sgmii phandle */
1481 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1482 NULL, 0, 0, &args);
1483 if (ret)
1484 return ret;
1485
1486 regmap = syscon_node_to_regmap(args.node);
1487
1488 if (IS_ERR(regmap))
1489 return PTR_ERR(regmap);
1490
1491 priv->sgmii_base = regmap_get_range(regmap, 0);
1492
1493 if (!priv->sgmii_base) {
1494 dev_err(dev, "Unable to find sgmii\n");
1495 return -ENODEV;
1496 }
1497 }
1498
Weijie Gao23f17162018-12-20 16:12:53 +08001499 /* check for switch first, otherwise phy will be used */
1500 priv->sw = SW_NONE;
1501 priv->switch_init = NULL;
1502 str = dev_read_string(dev, "mediatek,switch");
1503
1504 if (str) {
1505 if (!strcmp(str, "mt7530")) {
1506 priv->sw = SW_MT7530;
1507 priv->switch_init = mt7530_setup;
Landen Chao532de8d2020-02-18 16:49:37 +08001508 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
1509 } else if (!strcmp(str, "mt7531")) {
1510 priv->sw = SW_MT7531;
1511 priv->switch_init = mt7531_setup;
1512 priv->mt753x_smi_addr = MT753X_DFL_SMI_ADDR;
Weijie Gao23f17162018-12-20 16:12:53 +08001513 } else {
1514 printf("error: unsupported switch\n");
1515 return -EINVAL;
1516 }
1517
1518 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1519 if (priv->mcm) {
1520 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1521 if (ret) {
1522 printf("error: no reset ctrl for mcm\n");
1523 return ret;
1524 }
1525 } else {
1526 gpio_request_by_name(dev, "reset-gpios", 0,
1527 &priv->rst_gpio, GPIOD_IS_OUT);
1528 }
1529 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001530 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1531 0, &args);
1532 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001533 printf("error: phy-handle is not specified\n");
1534 return ret;
1535 }
1536
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001537 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001538 if (priv->phy_addr < 0) {
1539 printf("error: phy address is not specified\n");
1540 return ret;
1541 }
1542 }
1543
1544 return 0;
1545}
1546
1547static const struct udevice_id mtk_eth_ids[] = {
1548 { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1549 { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
MarkLeee3957172020-01-21 19:31:58 +08001550 { .compatible = "mediatek,mt7622-eth", .data = SOC_MT7622 },
Weijie Gaoad80d482022-05-20 11:23:42 +08001551 { .compatible = "mediatek,mt7621-eth", .data = SOC_MT7621 },
Weijie Gao23f17162018-12-20 16:12:53 +08001552 {}
1553};
1554
1555static const struct eth_ops mtk_eth_ops = {
1556 .start = mtk_eth_start,
1557 .stop = mtk_eth_stop,
1558 .send = mtk_eth_send,
1559 .recv = mtk_eth_recv,
1560 .free_pkt = mtk_eth_free_pkt,
1561 .write_hwaddr = mtk_eth_write_hwaddr,
1562};
1563
1564U_BOOT_DRIVER(mtk_eth) = {
1565 .name = "mtk-eth",
1566 .id = UCLASS_ETH,
1567 .of_match = mtk_eth_ids,
Simon Glassd1998a92020-12-03 16:55:21 -07001568 .of_to_plat = mtk_eth_of_to_plat,
Simon Glasscaa4daa2020-12-03 16:55:18 -07001569 .plat_auto = sizeof(struct eth_pdata),
Weijie Gao23f17162018-12-20 16:12:53 +08001570 .probe = mtk_eth_probe,
1571 .remove = mtk_eth_remove,
1572 .ops = &mtk_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -07001573 .priv_auto = sizeof(struct mtk_eth_priv),
Weijie Gao23f17162018-12-20 16:12:53 +08001574 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1575};