blob: 77589b2a04df4f54480f66b0ab20285650e65d02 [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
12#include <malloc.h>
13#include <miiphy.h>
14#include <regmap.h>
15#include <reset.h>
16#include <syscon.h>
17#include <wait_bit.h>
18#include <asm/gpio.h>
19#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <linux/err.h>
22#include <linux/ioport.h>
23#include <linux/mdio.h>
24#include <linux/mii.h>
25
26#include "mtk_eth.h"
27
28#define NUM_TX_DESC 24
29#define NUM_RX_DESC 24
30#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
31#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
32#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
33
34#define MT7530_NUM_PHYS 5
35#define MT7530_DFL_SMI_ADDR 31
36
37#define MT7530_PHY_ADDR(base, addr) \
38 (((base) + (addr)) & 0x1f)
39
40#define GDMA_FWD_TO_CPU \
41 (0x20000000 | \
42 GDM_ICS_EN | \
43 GDM_TCS_EN | \
44 GDM_UCS_EN | \
45 STRP_CRC | \
46 (DP_PDMA << MYMAC_DP_S) | \
47 (DP_PDMA << BC_DP_S) | \
48 (DP_PDMA << MC_DP_S) | \
49 (DP_PDMA << UN_DP_S))
50
51#define GDMA_FWD_DISCARD \
52 (0x20000000 | \
53 GDM_ICS_EN | \
54 GDM_TCS_EN | \
55 GDM_UCS_EN | \
56 STRP_CRC | \
57 (DP_DISCARD << MYMAC_DP_S) | \
58 (DP_DISCARD << BC_DP_S) | \
59 (DP_DISCARD << MC_DP_S) | \
60 (DP_DISCARD << UN_DP_S))
61
62struct pdma_rxd_info1 {
63 u32 PDP0;
64};
65
66struct pdma_rxd_info2 {
67 u32 PLEN1 : 14;
68 u32 LS1 : 1;
69 u32 UN_USED : 1;
70 u32 PLEN0 : 14;
71 u32 LS0 : 1;
72 u32 DDONE : 1;
73};
74
75struct pdma_rxd_info3 {
76 u32 PDP1;
77};
78
79struct pdma_rxd_info4 {
80 u32 FOE_ENTRY : 14;
81 u32 CRSN : 5;
82 u32 SP : 3;
83 u32 L4F : 1;
84 u32 L4VLD : 1;
85 u32 TACK : 1;
86 u32 IP4F : 1;
87 u32 IP4 : 1;
88 u32 IP6 : 1;
89 u32 UN_USED : 4;
90};
91
92struct pdma_rxdesc {
93 struct pdma_rxd_info1 rxd_info1;
94 struct pdma_rxd_info2 rxd_info2;
95 struct pdma_rxd_info3 rxd_info3;
96 struct pdma_rxd_info4 rxd_info4;
97};
98
99struct pdma_txd_info1 {
100 u32 SDP0;
101};
102
103struct pdma_txd_info2 {
104 u32 SDL1 : 14;
105 u32 LS1 : 1;
106 u32 BURST : 1;
107 u32 SDL0 : 14;
108 u32 LS0 : 1;
109 u32 DDONE : 1;
110};
111
112struct pdma_txd_info3 {
113 u32 SDP1;
114};
115
116struct pdma_txd_info4 {
117 u32 VLAN_TAG : 16;
118 u32 INS : 1;
119 u32 RESV : 2;
120 u32 UDF : 6;
121 u32 FPORT : 3;
122 u32 TSO : 1;
123 u32 TUI_CO : 3;
124};
125
126struct pdma_txdesc {
127 struct pdma_txd_info1 txd_info1;
128 struct pdma_txd_info2 txd_info2;
129 struct pdma_txd_info3 txd_info3;
130 struct pdma_txd_info4 txd_info4;
131};
132
133enum mtk_switch {
134 SW_NONE,
135 SW_MT7530
136};
137
138enum mtk_soc {
139 SOC_MT7623,
140 SOC_MT7629
141};
142
143struct mtk_eth_priv {
144 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
145
146 struct pdma_txdesc *tx_ring_noc;
147 struct pdma_rxdesc *rx_ring_noc;
148
149 int rx_dma_owner_idx0;
150 int tx_cpu_owner_idx0;
151
152 void __iomem *fe_base;
153 void __iomem *gmac_base;
154 void __iomem *ethsys_base;
155
156 struct mii_dev *mdio_bus;
157 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
158 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
159 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
160 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
161 u16 val);
162
163 enum mtk_soc soc;
164 int gmac_id;
165 int force_mode;
166 int speed;
167 int duplex;
168
169 struct phy_device *phydev;
170 int phy_interface;
171 int phy_addr;
172
173 enum mtk_switch sw;
174 int (*switch_init)(struct mtk_eth_priv *priv);
175 u32 mt7530_smi_addr;
176 u32 mt7530_phy_base;
177
178 struct gpio_desc rst_gpio;
179 int mcm;
180
181 struct reset_ctl rst_fe;
182 struct reset_ctl rst_mcm;
183};
184
185static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
186{
187 writel(val, priv->fe_base + PDMA_BASE + reg);
188}
189
190static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
191 u32 set)
192{
193 clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
194}
195
196static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
197 u32 val)
198{
199 u32 gdma_base;
200
201 if (no == 1)
202 gdma_base = GDMA2_BASE;
203 else
204 gdma_base = GDMA1_BASE;
205
206 writel(val, priv->fe_base + gdma_base + reg);
207}
208
209static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
210{
211 return readl(priv->gmac_base + reg);
212}
213
214static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
215{
216 writel(val, priv->gmac_base + reg);
217}
218
219static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
220{
221 clrsetbits_le32(priv->gmac_base + reg, clr, set);
222}
223
224static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
225 u32 set)
226{
227 clrsetbits_le32(priv->ethsys_base + reg, clr, set);
228}
229
230/* Direct MDIO clause 22/45 access via SoC */
231static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
232 u32 cmd, u32 st)
233{
234 int ret;
235 u32 val;
236
237 val = (st << MDIO_ST_S) |
238 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
239 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
240 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
241
242 if (cmd == MDIO_CMD_WRITE)
243 val |= data & MDIO_RW_DATA_M;
244
245 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
246
247 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
248 PHY_ACS_ST, 0, 5000, 0);
249 if (ret) {
250 pr_warn("MDIO access timeout\n");
251 return ret;
252 }
253
254 if (cmd == MDIO_CMD_READ) {
255 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
256 return val & MDIO_RW_DATA_M;
257 }
258
259 return 0;
260}
261
262/* Direct MDIO clause 22 read via SoC */
263static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
264{
265 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
266}
267
268/* Direct MDIO clause 22 write via SoC */
269static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
270{
271 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
272}
273
274/* Direct MDIO clause 45 read via SoC */
275static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
276{
277 int ret;
278
279 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
280 if (ret)
281 return ret;
282
283 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
284 MDIO_ST_C45);
285}
286
287/* Direct MDIO clause 45 write via SoC */
288static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
289 u16 reg, u16 val)
290{
291 int ret;
292
293 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
294 if (ret)
295 return ret;
296
297 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
298 MDIO_ST_C45);
299}
300
301/* Indirect MDIO clause 45 read via MII registers */
302static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
303 u16 reg)
304{
305 int ret;
306
307 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
308 (MMD_ADDR << MMD_CMD_S) |
309 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
310 if (ret)
311 return ret;
312
313 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
314 if (ret)
315 return ret;
316
317 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
318 (MMD_DATA << MMD_CMD_S) |
319 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
320 if (ret)
321 return ret;
322
323 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
324}
325
326/* Indirect MDIO clause 45 write via MII registers */
327static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
328 u16 reg, u16 val)
329{
330 int ret;
331
332 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
333 (MMD_ADDR << MMD_CMD_S) |
334 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
335 if (ret)
336 return ret;
337
338 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
339 if (ret)
340 return ret;
341
342 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
343 (MMD_DATA << MMD_CMD_S) |
344 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
345 if (ret)
346 return ret;
347
348 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
349}
350
351static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
352{
353 struct mtk_eth_priv *priv = bus->priv;
354
355 if (devad < 0)
356 return priv->mii_read(priv, addr, reg);
357 else
358 return priv->mmd_read(priv, addr, devad, reg);
359}
360
361static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
362 u16 val)
363{
364 struct mtk_eth_priv *priv = bus->priv;
365
366 if (devad < 0)
367 return priv->mii_write(priv, addr, reg, val);
368 else
369 return priv->mmd_write(priv, addr, devad, reg, val);
370}
371
372static int mtk_mdio_register(struct udevice *dev)
373{
374 struct mtk_eth_priv *priv = dev_get_priv(dev);
375 struct mii_dev *mdio_bus = mdio_alloc();
376 int ret;
377
378 if (!mdio_bus)
379 return -ENOMEM;
380
381 /* Assign MDIO access APIs according to the switch/phy */
382 switch (priv->sw) {
383 case SW_MT7530:
384 priv->mii_read = mtk_mii_read;
385 priv->mii_write = mtk_mii_write;
386 priv->mmd_read = mtk_mmd_ind_read;
387 priv->mmd_write = mtk_mmd_ind_write;
388 break;
389 default:
390 priv->mii_read = mtk_mii_read;
391 priv->mii_write = mtk_mii_write;
392 priv->mmd_read = mtk_mmd_read;
393 priv->mmd_write = mtk_mmd_write;
394 }
395
396 mdio_bus->read = mtk_mdio_read;
397 mdio_bus->write = mtk_mdio_write;
398 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
399
400 mdio_bus->priv = (void *)priv;
401
402 ret = mdio_register(mdio_bus);
403
404 if (ret)
405 return ret;
406
407 priv->mdio_bus = mdio_bus;
408
409 return 0;
410}
411
412/*
413 * MT7530 Internal Register Address Bits
414 * -------------------------------------------------------------------
415 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
416 * |----------------------------------------|---------------|--------|
417 * | Page Address | Reg Address | Unused |
418 * -------------------------------------------------------------------
419 */
420
421static int mt7530_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
422{
423 int ret, low_word, high_word;
424
425 /* Write page address */
426 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
427 if (ret)
428 return ret;
429
430 /* Read low word */
431 low_word = mtk_mii_read(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf);
432 if (low_word < 0)
433 return low_word;
434
435 /* Read high word */
436 high_word = mtk_mii_read(priv, priv->mt7530_smi_addr, 0x10);
437 if (high_word < 0)
438 return high_word;
439
440 if (data)
441 *data = ((u32)high_word << 16) | (low_word & 0xffff);
442
443 return 0;
444}
445
446static int mt7530_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
447{
448 int ret;
449
450 /* Write page address */
451 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
452 if (ret)
453 return ret;
454
455 /* Write low word */
456 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf,
457 data & 0xffff);
458 if (ret)
459 return ret;
460
461 /* Write high word */
462 return mtk_mii_write(priv, priv->mt7530_smi_addr, 0x10, data >> 16);
463}
464
465static void mt7530_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
466 u32 set)
467{
468 u32 val;
469
470 mt7530_reg_read(priv, reg, &val);
471 val &= ~clr;
472 val |= set;
473 mt7530_reg_write(priv, reg, val);
474}
475
476static void mt7530_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
477{
478 u8 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, 0);
479
480 mtk_mmd_ind_write(priv, phy_addr, 0x1f, reg, val);
481}
482
483static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
484{
485 u32 ncpo1, ssc_delta;
486
487 switch (mode) {
488 case PHY_INTERFACE_MODE_RGMII:
489 ncpo1 = 0x0c80;
490 ssc_delta = 0x87;
491 break;
492 default:
493 printf("error: xMII mode %d not supported\n", mode);
494 return -EINVAL;
495 }
496
497 /* Disable MT7530 core clock */
498 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
499
500 /* Disable MT7530 PLL */
501 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
502 (2 << RG_GSWPLL_POSDIV_200M_S) |
503 (32 << RG_GSWPLL_FBKDIV_200M_S));
504
505 /* For MT7530 core clock = 500Mhz */
506 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP2,
507 (1 << RG_GSWPLL_POSDIV_500M_S) |
508 (25 << RG_GSWPLL_FBKDIV_500M_S));
509
510 /* Enable MT7530 PLL */
511 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
512 (2 << RG_GSWPLL_POSDIV_200M_S) |
513 (32 << RG_GSWPLL_FBKDIV_200M_S) |
514 RG_GSWPLL_EN_PRE);
515
516 udelay(20);
517
518 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
519
520 /* Setup the MT7530 TRGMII Tx Clock */
521 mt7530_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
522 mt7530_core_reg_write(priv, CORE_PLL_GROUP6, 0);
523 mt7530_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
524 mt7530_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
525 mt7530_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
526 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
527
528 mt7530_core_reg_write(priv, CORE_PLL_GROUP2,
529 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
530 (1 << RG_SYSPLL_POSDIV_S));
531
532 mt7530_core_reg_write(priv, CORE_PLL_GROUP7,
533 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
534 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
535
536 /* Enable MT7530 core clock */
537 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
538 REG_GSWCK_EN | REG_TRGMIICK_EN);
539
540 return 0;
541}
542
543static int mt7530_setup(struct mtk_eth_priv *priv)
544{
545 u16 phy_addr, phy_val;
546 u32 val;
547 int i;
548
549 /* Select 250MHz clk for RGMII mode */
550 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
551 ETHSYS_TRGMII_CLK_SEL362_5, 0);
552
553 /* Global reset switch */
554 if (priv->mcm) {
555 reset_assert(&priv->rst_mcm);
556 udelay(1000);
557 reset_deassert(&priv->rst_mcm);
558 mdelay(1000);
559 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
560 dm_gpio_set_value(&priv->rst_gpio, 0);
561 udelay(1000);
562 dm_gpio_set_value(&priv->rst_gpio, 1);
563 mdelay(1000);
564 }
565
566 /* Modify HWTRAP first to allow direct access to internal PHYs */
567 mt7530_reg_read(priv, HWTRAP_REG, &val);
568 val |= CHG_TRAP;
569 val &= ~C_MDIO_BPS;
570 mt7530_reg_write(priv, MHWTRAP_REG, val);
571
572 /* Calculate the phy base address */
573 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
574 priv->mt7530_phy_base = (val | 0x7) + 1;
575
576 /* Turn off PHYs */
577 for (i = 0; i < MT7530_NUM_PHYS; i++) {
578 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
579 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
580 phy_val |= BMCR_PDOWN;
581 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
582 }
583
584 /* Force MAC link down before reset */
585 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
586 mt7530_reg_write(priv, PCMR_REG(6), FORCE_MODE);
587
588 /* MT7530 reset */
589 mt7530_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
590 udelay(100);
591
592 val = (1 << IPG_CFG_S) |
593 MAC_MODE | FORCE_MODE |
594 MAC_TX_EN | MAC_RX_EN |
595 BKOFF_EN | BACKPR_EN |
596 (SPEED_1000M << FORCE_SPD_S) |
597 FORCE_DPX | FORCE_LINK;
598
599 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
600 mt7530_reg_write(priv, PCMR_REG(6), val);
601
602 /* MT7530 Port5: Forced link down */
603 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
604
605 /* MT7530 Port6: Set to RGMII */
606 mt7530_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
607
608 /* Hardware Trap: Enable Port6, Disable Port5 */
609 mt7530_reg_read(priv, HWTRAP_REG, &val);
610 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
611 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
612 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
613 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
614 mt7530_reg_write(priv, MHWTRAP_REG, val);
615
616 /* Setup switch core pll */
617 mt7530_pad_clk_setup(priv, priv->phy_interface);
618
619 /* Lower Tx Driving for TRGMII path */
620 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
621 mt7530_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
622 (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
623
624 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
625 mt7530_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
626
627 /* Turn on PHYs */
628 for (i = 0; i < MT7530_NUM_PHYS; i++) {
629 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
630 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
631 phy_val &= ~BMCR_PDOWN;
632 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
633 }
634
635 /* Set port isolation */
636 for (i = 0; i < 8; i++) {
637 /* Set port matrix mode */
638 if (i != 6)
639 mt7530_reg_write(priv, PCR_REG(i),
640 (0x40 << PORT_MATRIX_S));
641 else
642 mt7530_reg_write(priv, PCR_REG(i),
643 (0x3f << PORT_MATRIX_S));
644
645 /* Set port mode to user port */
646 mt7530_reg_write(priv, PVC_REG(i),
647 (0x8100 << STAG_VPID_S) |
648 (VLAN_ATTR_USER << VLAN_ATTR_S));
649 }
650
651 return 0;
652}
653
654static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
655{
656 u16 lcl_adv = 0, rmt_adv = 0;
657 u8 flowctrl;
658 u32 mcr;
659
660 mcr = (1 << IPG_CFG_S) |
661 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
662 MAC_MODE | FORCE_MODE |
663 MAC_TX_EN | MAC_RX_EN |
664 BKOFF_EN | BACKPR_EN;
665
666 switch (priv->phydev->speed) {
667 case SPEED_10:
668 mcr |= (SPEED_10M << FORCE_SPD_S);
669 break;
670 case SPEED_100:
671 mcr |= (SPEED_100M << FORCE_SPD_S);
672 break;
673 case SPEED_1000:
674 mcr |= (SPEED_1000M << FORCE_SPD_S);
675 break;
676 };
677
678 if (priv->phydev->link)
679 mcr |= FORCE_LINK;
680
681 if (priv->phydev->duplex) {
682 mcr |= FORCE_DPX;
683
684 if (priv->phydev->pause)
685 rmt_adv = LPA_PAUSE_CAP;
686 if (priv->phydev->asym_pause)
687 rmt_adv |= LPA_PAUSE_ASYM;
688
689 if (priv->phydev->advertising & ADVERTISED_Pause)
690 lcl_adv |= ADVERTISE_PAUSE_CAP;
691 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
692 lcl_adv |= ADVERTISE_PAUSE_ASYM;
693
694 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
695
696 if (flowctrl & FLOW_CTRL_TX)
697 mcr |= FORCE_TX_FC;
698 if (flowctrl & FLOW_CTRL_RX)
699 mcr |= FORCE_RX_FC;
700
701 debug("rx pause %s, tx pause %s\n",
702 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
703 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
704 }
705
706 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
707}
708
709static int mtk_phy_start(struct mtk_eth_priv *priv)
710{
711 struct phy_device *phydev = priv->phydev;
712 int ret;
713
714 ret = phy_startup(phydev);
715
716 if (ret) {
717 debug("Could not initialize PHY %s\n", phydev->dev->name);
718 return ret;
719 }
720
721 if (!phydev->link) {
722 debug("%s: link down.\n", phydev->dev->name);
723 return 0;
724 }
725
726 mtk_phy_link_adjust(priv);
727
728 debug("Speed: %d, %s duplex%s\n", phydev->speed,
729 (phydev->duplex) ? "full" : "half",
730 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
731
732 return 0;
733}
734
735static int mtk_phy_probe(struct udevice *dev)
736{
737 struct mtk_eth_priv *priv = dev_get_priv(dev);
738 struct phy_device *phydev;
739
740 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
741 priv->phy_interface);
742 if (!phydev)
743 return -ENODEV;
744
745 phydev->supported &= PHY_GBIT_FEATURES;
746 phydev->advertising = phydev->supported;
747
748 priv->phydev = phydev;
749 phy_config(phydev);
750
751 return 0;
752}
753
754static void mtk_mac_init(struct mtk_eth_priv *priv)
755{
756 int i, ge_mode = 0;
757 u32 mcr;
758
759 switch (priv->phy_interface) {
760 case PHY_INTERFACE_MODE_RGMII_RXID:
761 case PHY_INTERFACE_MODE_RGMII:
762 case PHY_INTERFACE_MODE_SGMII:
763 ge_mode = GE_MODE_RGMII;
764 break;
765 case PHY_INTERFACE_MODE_MII:
766 case PHY_INTERFACE_MODE_GMII:
767 ge_mode = GE_MODE_MII;
768 break;
769 case PHY_INTERFACE_MODE_RMII:
770 ge_mode = GE_MODE_RMII;
771 break;
772 default:
773 break;
774 }
775
776 /* set the gmac to the right mode */
777 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
778 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
779 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
780
781 if (priv->force_mode) {
782 mcr = (1 << IPG_CFG_S) |
783 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
784 MAC_MODE | FORCE_MODE |
785 MAC_TX_EN | MAC_RX_EN |
786 BKOFF_EN | BACKPR_EN |
787 FORCE_LINK;
788
789 switch (priv->speed) {
790 case SPEED_10:
791 mcr |= SPEED_10M << FORCE_SPD_S;
792 break;
793 case SPEED_100:
794 mcr |= SPEED_100M << FORCE_SPD_S;
795 break;
796 case SPEED_1000:
797 mcr |= SPEED_1000M << FORCE_SPD_S;
798 break;
799 }
800
801 if (priv->duplex)
802 mcr |= FORCE_DPX;
803
804 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
805 }
806
807 if (priv->soc == SOC_MT7623) {
808 /* Lower Tx Driving for TRGMII path */
809 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
810 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
811 (8 << TD_DM_DRVP_S) |
812 (8 << TD_DM_DRVN_S));
813
814 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
815 RX_RST | RXC_DQSISEL);
816 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
817 }
818}
819
820static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
821{
822 char *pkt_base = priv->pkt_pool;
823 int i;
824
825 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
826 udelay(500);
827
828 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
829 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
830 memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
831
832 flush_dcache_range((u32)pkt_base, (u32)(pkt_base + TOTAL_PKT_BUF_SIZE));
833
834 priv->rx_dma_owner_idx0 = 0;
835 priv->tx_cpu_owner_idx0 = 0;
836
837 for (i = 0; i < NUM_TX_DESC; i++) {
838 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
839 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
840 priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
841
842 priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
843 pkt_base += PKTSIZE_ALIGN;
844 }
845
846 for (i = 0; i < NUM_RX_DESC; i++) {
847 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
848 priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
849 pkt_base += PKTSIZE_ALIGN;
850 }
851
852 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
853 virt_to_phys(priv->tx_ring_noc));
854 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
855 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
856
857 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
858 virt_to_phys(priv->rx_ring_noc));
859 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
860 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
861
862 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
863}
864
865static int mtk_eth_start(struct udevice *dev)
866{
867 struct mtk_eth_priv *priv = dev_get_priv(dev);
868 int ret;
869
870 /* Reset FE */
871 reset_assert(&priv->rst_fe);
872 udelay(1000);
873 reset_deassert(&priv->rst_fe);
874 mdelay(10);
875
876 /* Packets forward to PDMA */
877 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
878
879 if (priv->gmac_id == 0)
880 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
881 else
882 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
883
884 udelay(500);
885
886 mtk_eth_fifo_init(priv);
887
888 /* Start PHY */
889 if (priv->sw == SW_NONE) {
890 ret = mtk_phy_start(priv);
891 if (ret)
892 return ret;
893 }
894
895 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
896 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
897 udelay(500);
898
899 return 0;
900}
901
902static void mtk_eth_stop(struct udevice *dev)
903{
904 struct mtk_eth_priv *priv = dev_get_priv(dev);
905
906 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
907 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
908 udelay(500);
909
910 wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
911 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
912}
913
914static int mtk_eth_write_hwaddr(struct udevice *dev)
915{
916 struct eth_pdata *pdata = dev_get_platdata(dev);
917 struct mtk_eth_priv *priv = dev_get_priv(dev);
918 unsigned char *mac = pdata->enetaddr;
919 u32 macaddr_lsb, macaddr_msb;
920
921 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
922 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
923 ((u32)mac[4] << 8) | (u32)mac[5];
924
925 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
926 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
927
928 return 0;
929}
930
931static int mtk_eth_send(struct udevice *dev, void *packet, int length)
932{
933 struct mtk_eth_priv *priv = dev_get_priv(dev);
934 u32 idx = priv->tx_cpu_owner_idx0;
935 void *pkt_base;
936
937 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
938 debug("mtk-eth: TX DMA descriptor ring is full\n");
939 return -EPERM;
940 }
941
942 pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
943 memcpy(pkt_base, packet, length);
944 flush_dcache_range((u32)pkt_base, (u32)pkt_base +
945 roundup(length, ARCH_DMA_MINALIGN));
946
947 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
948 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
949
950 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
951 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
952
953 return 0;
954}
955
956static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
957{
958 struct mtk_eth_priv *priv = dev_get_priv(dev);
959 u32 idx = priv->rx_dma_owner_idx0;
960 uchar *pkt_base;
961 u32 length;
962
963 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
964 debug("mtk-eth: RX DMA descriptor ring is empty\n");
965 return -EAGAIN;
966 }
967
968 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
969 pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
970 invalidate_dcache_range((u32)pkt_base, (u32)pkt_base +
971 roundup(length, ARCH_DMA_MINALIGN));
972
973 if (packetp)
974 *packetp = pkt_base;
975
976 return length;
977}
978
979static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
980{
981 struct mtk_eth_priv *priv = dev_get_priv(dev);
982 u32 idx = priv->rx_dma_owner_idx0;
983
984 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
985 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
986 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
987
988 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
989 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
990
991 return 0;
992}
993
994static int mtk_eth_probe(struct udevice *dev)
995{
996 struct eth_pdata *pdata = dev_get_platdata(dev);
997 struct mtk_eth_priv *priv = dev_get_priv(dev);
998 u32 iobase = pdata->iobase;
999 int ret;
1000
1001 /* Frame Engine Register Base */
1002 priv->fe_base = (void *)iobase;
1003
1004 /* GMAC Register Base */
1005 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1006
1007 /* MDIO register */
1008 ret = mtk_mdio_register(dev);
1009 if (ret)
1010 return ret;
1011
1012 /* Prepare for tx/rx rings */
1013 priv->tx_ring_noc = (struct pdma_txdesc *)
1014 noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1015 ARCH_DMA_MINALIGN);
1016 priv->rx_ring_noc = (struct pdma_rxdesc *)
1017 noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1018 ARCH_DMA_MINALIGN);
1019
1020 /* Set MAC mode */
1021 mtk_mac_init(priv);
1022
1023 /* Probe phy if switch is not specified */
1024 if (priv->sw == SW_NONE)
1025 return mtk_phy_probe(dev);
1026
1027 /* Initialize switch */
1028 return priv->switch_init(priv);
1029}
1030
1031static int mtk_eth_remove(struct udevice *dev)
1032{
1033 struct mtk_eth_priv *priv = dev_get_priv(dev);
1034
1035 /* MDIO unregister */
1036 mdio_unregister(priv->mdio_bus);
1037 mdio_free(priv->mdio_bus);
1038
1039 /* Stop possibly started DMA */
1040 mtk_eth_stop(dev);
1041
1042 return 0;
1043}
1044
1045static int mtk_eth_ofdata_to_platdata(struct udevice *dev)
1046{
1047 struct eth_pdata *pdata = dev_get_platdata(dev);
1048 struct mtk_eth_priv *priv = dev_get_priv(dev);
1049 struct ofnode_phandle_args args;
1050 struct regmap *regmap;
1051 const char *str;
1052 ofnode subnode;
1053 int ret;
1054
1055 priv->soc = dev_get_driver_data(dev);
1056
1057 pdata->iobase = devfdt_get_addr(dev);
1058
1059 /* get corresponding ethsys phandle */
1060 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1061 &args);
1062 if (ret)
1063 return ret;
1064
1065 regmap = syscon_node_to_regmap(args.node);
1066 if (IS_ERR(regmap))
1067 return PTR_ERR(regmap);
1068
1069 priv->ethsys_base = regmap_get_range(regmap, 0);
1070 if (!priv->ethsys_base) {
1071 dev_err(dev, "Unable to find ethsys\n");
1072 return -ENODEV;
1073 }
1074
1075 /* Reset controllers */
1076 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1077 if (ret) {
1078 printf("error: Unable to get reset ctrl for frame engine\n");
1079 return ret;
1080 }
1081
1082 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1083
1084 /* Interface mode is required */
1085 str = dev_read_string(dev, "phy-mode");
1086 if (str) {
1087 pdata->phy_interface = phy_get_interface_by_name(str);
1088 priv->phy_interface = pdata->phy_interface;
1089 } else {
1090 printf("error: phy-mode is not set\n");
1091 return -EINVAL;
1092 }
1093
1094 /* Force mode or autoneg */
1095 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1096 if (ofnode_valid(subnode)) {
1097 priv->force_mode = 1;
1098 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1099 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1100
1101 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1102 priv->speed != SPEED_1000) {
1103 printf("error: no valid speed set in fixed-link\n");
1104 return -EINVAL;
1105 }
1106 }
1107
1108 /* check for switch first, otherwise phy will be used */
1109 priv->sw = SW_NONE;
1110 priv->switch_init = NULL;
1111 str = dev_read_string(dev, "mediatek,switch");
1112
1113 if (str) {
1114 if (!strcmp(str, "mt7530")) {
1115 priv->sw = SW_MT7530;
1116 priv->switch_init = mt7530_setup;
1117 priv->mt7530_smi_addr = MT7530_DFL_SMI_ADDR;
1118 } else {
1119 printf("error: unsupported switch\n");
1120 return -EINVAL;
1121 }
1122
1123 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1124 if (priv->mcm) {
1125 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1126 if (ret) {
1127 printf("error: no reset ctrl for mcm\n");
1128 return ret;
1129 }
1130 } else {
1131 gpio_request_by_name(dev, "reset-gpios", 0,
1132 &priv->rst_gpio, GPIOD_IS_OUT);
1133 }
1134 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001135 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1136 0, &args);
1137 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001138 printf("error: phy-handle is not specified\n");
1139 return ret;
1140 }
1141
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001142 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001143 if (priv->phy_addr < 0) {
1144 printf("error: phy address is not specified\n");
1145 return ret;
1146 }
1147 }
1148
1149 return 0;
1150}
1151
1152static const struct udevice_id mtk_eth_ids[] = {
1153 { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1154 { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
1155 {}
1156};
1157
1158static const struct eth_ops mtk_eth_ops = {
1159 .start = mtk_eth_start,
1160 .stop = mtk_eth_stop,
1161 .send = mtk_eth_send,
1162 .recv = mtk_eth_recv,
1163 .free_pkt = mtk_eth_free_pkt,
1164 .write_hwaddr = mtk_eth_write_hwaddr,
1165};
1166
1167U_BOOT_DRIVER(mtk_eth) = {
1168 .name = "mtk-eth",
1169 .id = UCLASS_ETH,
1170 .of_match = mtk_eth_ids,
1171 .ofdata_to_platdata = mtk_eth_ofdata_to_platdata,
1172 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1173 .probe = mtk_eth_probe,
1174 .remove = mtk_eth_remove,
1175 .ops = &mtk_eth_ops,
1176 .priv_auto_alloc_size = sizeof(struct mtk_eth_priv),
1177 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1178};