blob: 126b824b1a8f9ccd974fe0d28931a59cf513af03 [file] [log] [blame]
Weijie Gao23f17162018-12-20 16:12:53 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 MediaTek Inc.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 * Author: Mark Lee <mark-mc.lee@mediatek.com>
7 */
8
9#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070010#include <cpu_func.h>
Weijie Gao23f17162018-12-20 16:12:53 +080011#include <dm.h>
12#include <malloc.h>
13#include <miiphy.h>
14#include <regmap.h>
15#include <reset.h>
16#include <syscon.h>
17#include <wait_bit.h>
18#include <asm/gpio.h>
19#include <asm/io.h>
Simon Glass336d4612020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Weijie Gao23f17162018-12-20 16:12:53 +080021#include <linux/err.h>
22#include <linux/ioport.h>
23#include <linux/mdio.h>
24#include <linux/mii.h>
25
26#include "mtk_eth.h"
27
28#define NUM_TX_DESC 24
29#define NUM_RX_DESC 24
30#define TX_TOTAL_BUF_SIZE (NUM_TX_DESC * PKTSIZE_ALIGN)
31#define RX_TOTAL_BUF_SIZE (NUM_RX_DESC * PKTSIZE_ALIGN)
32#define TOTAL_PKT_BUF_SIZE (TX_TOTAL_BUF_SIZE + RX_TOTAL_BUF_SIZE)
33
34#define MT7530_NUM_PHYS 5
35#define MT7530_DFL_SMI_ADDR 31
36
37#define MT7530_PHY_ADDR(base, addr) \
38 (((base) + (addr)) & 0x1f)
39
40#define GDMA_FWD_TO_CPU \
41 (0x20000000 | \
42 GDM_ICS_EN | \
43 GDM_TCS_EN | \
44 GDM_UCS_EN | \
45 STRP_CRC | \
46 (DP_PDMA << MYMAC_DP_S) | \
47 (DP_PDMA << BC_DP_S) | \
48 (DP_PDMA << MC_DP_S) | \
49 (DP_PDMA << UN_DP_S))
50
51#define GDMA_FWD_DISCARD \
52 (0x20000000 | \
53 GDM_ICS_EN | \
54 GDM_TCS_EN | \
55 GDM_UCS_EN | \
56 STRP_CRC | \
57 (DP_DISCARD << MYMAC_DP_S) | \
58 (DP_DISCARD << BC_DP_S) | \
59 (DP_DISCARD << MC_DP_S) | \
60 (DP_DISCARD << UN_DP_S))
61
62struct pdma_rxd_info1 {
63 u32 PDP0;
64};
65
66struct pdma_rxd_info2 {
67 u32 PLEN1 : 14;
68 u32 LS1 : 1;
69 u32 UN_USED : 1;
70 u32 PLEN0 : 14;
71 u32 LS0 : 1;
72 u32 DDONE : 1;
73};
74
75struct pdma_rxd_info3 {
76 u32 PDP1;
77};
78
79struct pdma_rxd_info4 {
80 u32 FOE_ENTRY : 14;
81 u32 CRSN : 5;
82 u32 SP : 3;
83 u32 L4F : 1;
84 u32 L4VLD : 1;
85 u32 TACK : 1;
86 u32 IP4F : 1;
87 u32 IP4 : 1;
88 u32 IP6 : 1;
89 u32 UN_USED : 4;
90};
91
92struct pdma_rxdesc {
93 struct pdma_rxd_info1 rxd_info1;
94 struct pdma_rxd_info2 rxd_info2;
95 struct pdma_rxd_info3 rxd_info3;
96 struct pdma_rxd_info4 rxd_info4;
97};
98
99struct pdma_txd_info1 {
100 u32 SDP0;
101};
102
103struct pdma_txd_info2 {
104 u32 SDL1 : 14;
105 u32 LS1 : 1;
106 u32 BURST : 1;
107 u32 SDL0 : 14;
108 u32 LS0 : 1;
109 u32 DDONE : 1;
110};
111
112struct pdma_txd_info3 {
113 u32 SDP1;
114};
115
116struct pdma_txd_info4 {
117 u32 VLAN_TAG : 16;
118 u32 INS : 1;
119 u32 RESV : 2;
120 u32 UDF : 6;
121 u32 FPORT : 3;
122 u32 TSO : 1;
123 u32 TUI_CO : 3;
124};
125
126struct pdma_txdesc {
127 struct pdma_txd_info1 txd_info1;
128 struct pdma_txd_info2 txd_info2;
129 struct pdma_txd_info3 txd_info3;
130 struct pdma_txd_info4 txd_info4;
131};
132
133enum mtk_switch {
134 SW_NONE,
135 SW_MT7530
136};
137
138enum mtk_soc {
139 SOC_MT7623,
MarkLeee3957172020-01-21 19:31:58 +0800140 SOC_MT7629,
141 SOC_MT7622
Weijie Gao23f17162018-12-20 16:12:53 +0800142};
143
144struct mtk_eth_priv {
145 char pkt_pool[TOTAL_PKT_BUF_SIZE] __aligned(ARCH_DMA_MINALIGN);
146
147 struct pdma_txdesc *tx_ring_noc;
148 struct pdma_rxdesc *rx_ring_noc;
149
150 int rx_dma_owner_idx0;
151 int tx_cpu_owner_idx0;
152
153 void __iomem *fe_base;
154 void __iomem *gmac_base;
155 void __iomem *ethsys_base;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800156 void __iomem *sgmii_base;
Weijie Gao23f17162018-12-20 16:12:53 +0800157
158 struct mii_dev *mdio_bus;
159 int (*mii_read)(struct mtk_eth_priv *priv, u8 phy, u8 reg);
160 int (*mii_write)(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 val);
161 int (*mmd_read)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg);
162 int (*mmd_write)(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg,
163 u16 val);
164
165 enum mtk_soc soc;
166 int gmac_id;
167 int force_mode;
168 int speed;
169 int duplex;
170
171 struct phy_device *phydev;
172 int phy_interface;
173 int phy_addr;
174
175 enum mtk_switch sw;
176 int (*switch_init)(struct mtk_eth_priv *priv);
177 u32 mt7530_smi_addr;
178 u32 mt7530_phy_base;
179
180 struct gpio_desc rst_gpio;
181 int mcm;
182
183 struct reset_ctl rst_fe;
184 struct reset_ctl rst_mcm;
185};
186
187static void mtk_pdma_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
188{
189 writel(val, priv->fe_base + PDMA_BASE + reg);
190}
191
192static void mtk_pdma_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
193 u32 set)
194{
195 clrsetbits_le32(priv->fe_base + PDMA_BASE + reg, clr, set);
196}
197
198static void mtk_gdma_write(struct mtk_eth_priv *priv, int no, u32 reg,
199 u32 val)
200{
201 u32 gdma_base;
202
203 if (no == 1)
204 gdma_base = GDMA2_BASE;
205 else
206 gdma_base = GDMA1_BASE;
207
208 writel(val, priv->fe_base + gdma_base + reg);
209}
210
211static u32 mtk_gmac_read(struct mtk_eth_priv *priv, u32 reg)
212{
213 return readl(priv->gmac_base + reg);
214}
215
216static void mtk_gmac_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
217{
218 writel(val, priv->gmac_base + reg);
219}
220
221static void mtk_gmac_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr, u32 set)
222{
223 clrsetbits_le32(priv->gmac_base + reg, clr, set);
224}
225
226static void mtk_ethsys_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
227 u32 set)
228{
229 clrsetbits_le32(priv->ethsys_base + reg, clr, set);
230}
231
232/* Direct MDIO clause 22/45 access via SoC */
233static int mtk_mii_rw(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data,
234 u32 cmd, u32 st)
235{
236 int ret;
237 u32 val;
238
239 val = (st << MDIO_ST_S) |
240 ((cmd << MDIO_CMD_S) & MDIO_CMD_M) |
241 (((u32)phy << MDIO_PHY_ADDR_S) & MDIO_PHY_ADDR_M) |
242 (((u32)reg << MDIO_REG_ADDR_S) & MDIO_REG_ADDR_M);
243
244 if (cmd == MDIO_CMD_WRITE)
245 val |= data & MDIO_RW_DATA_M;
246
247 mtk_gmac_write(priv, GMAC_PIAC_REG, val | PHY_ACS_ST);
248
249 ret = wait_for_bit_le32(priv->gmac_base + GMAC_PIAC_REG,
250 PHY_ACS_ST, 0, 5000, 0);
251 if (ret) {
252 pr_warn("MDIO access timeout\n");
253 return ret;
254 }
255
256 if (cmd == MDIO_CMD_READ) {
257 val = mtk_gmac_read(priv, GMAC_PIAC_REG);
258 return val & MDIO_RW_DATA_M;
259 }
260
261 return 0;
262}
263
264/* Direct MDIO clause 22 read via SoC */
265static int mtk_mii_read(struct mtk_eth_priv *priv, u8 phy, u8 reg)
266{
267 return mtk_mii_rw(priv, phy, reg, 0, MDIO_CMD_READ, MDIO_ST_C22);
268}
269
270/* Direct MDIO clause 22 write via SoC */
271static int mtk_mii_write(struct mtk_eth_priv *priv, u8 phy, u8 reg, u16 data)
272{
273 return mtk_mii_rw(priv, phy, reg, data, MDIO_CMD_WRITE, MDIO_ST_C22);
274}
275
276/* Direct MDIO clause 45 read via SoC */
277static int mtk_mmd_read(struct mtk_eth_priv *priv, u8 addr, u8 devad, u16 reg)
278{
279 int ret;
280
281 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
282 if (ret)
283 return ret;
284
285 return mtk_mii_rw(priv, addr, devad, 0, MDIO_CMD_READ_C45,
286 MDIO_ST_C45);
287}
288
289/* Direct MDIO clause 45 write via SoC */
290static int mtk_mmd_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
291 u16 reg, u16 val)
292{
293 int ret;
294
295 ret = mtk_mii_rw(priv, addr, devad, reg, MDIO_CMD_ADDR, MDIO_ST_C45);
296 if (ret)
297 return ret;
298
299 return mtk_mii_rw(priv, addr, devad, val, MDIO_CMD_WRITE,
300 MDIO_ST_C45);
301}
302
303/* Indirect MDIO clause 45 read via MII registers */
304static int mtk_mmd_ind_read(struct mtk_eth_priv *priv, u8 addr, u8 devad,
305 u16 reg)
306{
307 int ret;
308
309 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
310 (MMD_ADDR << MMD_CMD_S) |
311 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
312 if (ret)
313 return ret;
314
315 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
316 if (ret)
317 return ret;
318
319 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
320 (MMD_DATA << MMD_CMD_S) |
321 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
322 if (ret)
323 return ret;
324
325 return priv->mii_read(priv, addr, MII_MMD_ADDR_DATA_REG);
326}
327
328/* Indirect MDIO clause 45 write via MII registers */
329static int mtk_mmd_ind_write(struct mtk_eth_priv *priv, u8 addr, u8 devad,
330 u16 reg, u16 val)
331{
332 int ret;
333
334 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
335 (MMD_ADDR << MMD_CMD_S) |
336 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
337 if (ret)
338 return ret;
339
340 ret = priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, reg);
341 if (ret)
342 return ret;
343
344 ret = priv->mii_write(priv, addr, MII_MMD_ACC_CTL_REG,
345 (MMD_DATA << MMD_CMD_S) |
346 ((devad << MMD_DEVAD_S) & MMD_DEVAD_M));
347 if (ret)
348 return ret;
349
350 return priv->mii_write(priv, addr, MII_MMD_ADDR_DATA_REG, val);
351}
352
353static int mtk_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
354{
355 struct mtk_eth_priv *priv = bus->priv;
356
357 if (devad < 0)
358 return priv->mii_read(priv, addr, reg);
359 else
360 return priv->mmd_read(priv, addr, devad, reg);
361}
362
363static int mtk_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
364 u16 val)
365{
366 struct mtk_eth_priv *priv = bus->priv;
367
368 if (devad < 0)
369 return priv->mii_write(priv, addr, reg, val);
370 else
371 return priv->mmd_write(priv, addr, devad, reg, val);
372}
373
374static int mtk_mdio_register(struct udevice *dev)
375{
376 struct mtk_eth_priv *priv = dev_get_priv(dev);
377 struct mii_dev *mdio_bus = mdio_alloc();
378 int ret;
379
380 if (!mdio_bus)
381 return -ENOMEM;
382
383 /* Assign MDIO access APIs according to the switch/phy */
384 switch (priv->sw) {
385 case SW_MT7530:
386 priv->mii_read = mtk_mii_read;
387 priv->mii_write = mtk_mii_write;
388 priv->mmd_read = mtk_mmd_ind_read;
389 priv->mmd_write = mtk_mmd_ind_write;
390 break;
391 default:
392 priv->mii_read = mtk_mii_read;
393 priv->mii_write = mtk_mii_write;
394 priv->mmd_read = mtk_mmd_read;
395 priv->mmd_write = mtk_mmd_write;
396 }
397
398 mdio_bus->read = mtk_mdio_read;
399 mdio_bus->write = mtk_mdio_write;
400 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
401
402 mdio_bus->priv = (void *)priv;
403
404 ret = mdio_register(mdio_bus);
405
406 if (ret)
407 return ret;
408
409 priv->mdio_bus = mdio_bus;
410
411 return 0;
412}
413
414/*
415 * MT7530 Internal Register Address Bits
416 * -------------------------------------------------------------------
417 * | 15 14 13 12 11 10 9 8 7 6 | 5 4 3 2 | 1 0 |
418 * |----------------------------------------|---------------|--------|
419 * | Page Address | Reg Address | Unused |
420 * -------------------------------------------------------------------
421 */
422
423static int mt7530_reg_read(struct mtk_eth_priv *priv, u32 reg, u32 *data)
424{
425 int ret, low_word, high_word;
426
427 /* Write page address */
428 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
429 if (ret)
430 return ret;
431
432 /* Read low word */
433 low_word = mtk_mii_read(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf);
434 if (low_word < 0)
435 return low_word;
436
437 /* Read high word */
438 high_word = mtk_mii_read(priv, priv->mt7530_smi_addr, 0x10);
439 if (high_word < 0)
440 return high_word;
441
442 if (data)
443 *data = ((u32)high_word << 16) | (low_word & 0xffff);
444
445 return 0;
446}
447
448static int mt7530_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 data)
449{
450 int ret;
451
452 /* Write page address */
453 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, 0x1f, reg >> 6);
454 if (ret)
455 return ret;
456
457 /* Write low word */
458 ret = mtk_mii_write(priv, priv->mt7530_smi_addr, (reg >> 2) & 0xf,
459 data & 0xffff);
460 if (ret)
461 return ret;
462
463 /* Write high word */
464 return mtk_mii_write(priv, priv->mt7530_smi_addr, 0x10, data >> 16);
465}
466
467static void mt7530_reg_rmw(struct mtk_eth_priv *priv, u32 reg, u32 clr,
468 u32 set)
469{
470 u32 val;
471
472 mt7530_reg_read(priv, reg, &val);
473 val &= ~clr;
474 val |= set;
475 mt7530_reg_write(priv, reg, val);
476}
477
478static void mt7530_core_reg_write(struct mtk_eth_priv *priv, u32 reg, u32 val)
479{
480 u8 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, 0);
481
482 mtk_mmd_ind_write(priv, phy_addr, 0x1f, reg, val);
483}
484
485static int mt7530_pad_clk_setup(struct mtk_eth_priv *priv, int mode)
486{
487 u32 ncpo1, ssc_delta;
488
489 switch (mode) {
490 case PHY_INTERFACE_MODE_RGMII:
491 ncpo1 = 0x0c80;
492 ssc_delta = 0x87;
493 break;
494 default:
495 printf("error: xMII mode %d not supported\n", mode);
496 return -EINVAL;
497 }
498
499 /* Disable MT7530 core clock */
500 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, 0);
501
502 /* Disable MT7530 PLL */
503 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
504 (2 << RG_GSWPLL_POSDIV_200M_S) |
505 (32 << RG_GSWPLL_FBKDIV_200M_S));
506
507 /* For MT7530 core clock = 500Mhz */
508 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP2,
509 (1 << RG_GSWPLL_POSDIV_500M_S) |
510 (25 << RG_GSWPLL_FBKDIV_500M_S));
511
512 /* Enable MT7530 PLL */
513 mt7530_core_reg_write(priv, CORE_GSWPLL_GRP1,
514 (2 << RG_GSWPLL_POSDIV_200M_S) |
515 (32 << RG_GSWPLL_FBKDIV_200M_S) |
516 RG_GSWPLL_EN_PRE);
517
518 udelay(20);
519
520 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
521
522 /* Setup the MT7530 TRGMII Tx Clock */
523 mt7530_core_reg_write(priv, CORE_PLL_GROUP5, ncpo1);
524 mt7530_core_reg_write(priv, CORE_PLL_GROUP6, 0);
525 mt7530_core_reg_write(priv, CORE_PLL_GROUP10, ssc_delta);
526 mt7530_core_reg_write(priv, CORE_PLL_GROUP11, ssc_delta);
527 mt7530_core_reg_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
528 RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
529
530 mt7530_core_reg_write(priv, CORE_PLL_GROUP2,
531 RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
532 (1 << RG_SYSPLL_POSDIV_S));
533
534 mt7530_core_reg_write(priv, CORE_PLL_GROUP7,
535 RG_LCDDS_PCW_NCPO_CHG | (3 << RG_LCCDS_C_S) |
536 RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
537
538 /* Enable MT7530 core clock */
539 mt7530_core_reg_write(priv, CORE_TRGMII_GSW_CLK_CG,
540 REG_GSWCK_EN | REG_TRGMIICK_EN);
541
542 return 0;
543}
544
545static int mt7530_setup(struct mtk_eth_priv *priv)
546{
547 u16 phy_addr, phy_val;
548 u32 val;
549 int i;
550
551 /* Select 250MHz clk for RGMII mode */
552 mtk_ethsys_rmw(priv, ETHSYS_CLKCFG0_REG,
553 ETHSYS_TRGMII_CLK_SEL362_5, 0);
554
555 /* Global reset switch */
556 if (priv->mcm) {
557 reset_assert(&priv->rst_mcm);
558 udelay(1000);
559 reset_deassert(&priv->rst_mcm);
560 mdelay(1000);
561 } else if (dm_gpio_is_valid(&priv->rst_gpio)) {
562 dm_gpio_set_value(&priv->rst_gpio, 0);
563 udelay(1000);
564 dm_gpio_set_value(&priv->rst_gpio, 1);
565 mdelay(1000);
566 }
567
568 /* Modify HWTRAP first to allow direct access to internal PHYs */
569 mt7530_reg_read(priv, HWTRAP_REG, &val);
570 val |= CHG_TRAP;
571 val &= ~C_MDIO_BPS;
572 mt7530_reg_write(priv, MHWTRAP_REG, val);
573
574 /* Calculate the phy base address */
575 val = ((val & SMI_ADDR_M) >> SMI_ADDR_S) << 3;
576 priv->mt7530_phy_base = (val | 0x7) + 1;
577
578 /* Turn off PHYs */
579 for (i = 0; i < MT7530_NUM_PHYS; i++) {
580 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
581 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
582 phy_val |= BMCR_PDOWN;
583 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
584 }
585
586 /* Force MAC link down before reset */
587 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
588 mt7530_reg_write(priv, PCMR_REG(6), FORCE_MODE);
589
590 /* MT7530 reset */
591 mt7530_reg_write(priv, SYS_CTRL_REG, SW_SYS_RST | SW_REG_RST);
592 udelay(100);
593
594 val = (1 << IPG_CFG_S) |
595 MAC_MODE | FORCE_MODE |
596 MAC_TX_EN | MAC_RX_EN |
597 BKOFF_EN | BACKPR_EN |
598 (SPEED_1000M << FORCE_SPD_S) |
599 FORCE_DPX | FORCE_LINK;
600
601 /* MT7530 Port6: Forced 1000M/FD, FC disabled */
602 mt7530_reg_write(priv, PCMR_REG(6), val);
603
604 /* MT7530 Port5: Forced link down */
605 mt7530_reg_write(priv, PCMR_REG(5), FORCE_MODE);
606
607 /* MT7530 Port6: Set to RGMII */
608 mt7530_reg_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_M, P6_INTF_MODE_RGMII);
609
610 /* Hardware Trap: Enable Port6, Disable Port5 */
611 mt7530_reg_read(priv, HWTRAP_REG, &val);
612 val |= CHG_TRAP | LOOPDET_DIS | P5_INTF_DIS |
613 (P5_INTF_SEL_GMAC5 << P5_INTF_SEL_S) |
614 (P5_INTF_MODE_RGMII << P5_INTF_MODE_S);
615 val &= ~(C_MDIO_BPS | P6_INTF_DIS);
616 mt7530_reg_write(priv, MHWTRAP_REG, val);
617
618 /* Setup switch core pll */
619 mt7530_pad_clk_setup(priv, priv->phy_interface);
620
621 /* Lower Tx Driving for TRGMII path */
622 for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
623 mt7530_reg_write(priv, MT7530_TRGMII_TD_ODT(i),
624 (8 << TD_DM_DRVP_S) | (8 << TD_DM_DRVN_S));
625
626 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
627 mt7530_reg_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_M, 16);
628
629 /* Turn on PHYs */
630 for (i = 0; i < MT7530_NUM_PHYS; i++) {
631 phy_addr = MT7530_PHY_ADDR(priv->mt7530_phy_base, i);
632 phy_val = priv->mii_read(priv, phy_addr, MII_BMCR);
633 phy_val &= ~BMCR_PDOWN;
634 priv->mii_write(priv, phy_addr, MII_BMCR, phy_val);
635 }
636
637 /* Set port isolation */
638 for (i = 0; i < 8; i++) {
639 /* Set port matrix mode */
640 if (i != 6)
641 mt7530_reg_write(priv, PCR_REG(i),
642 (0x40 << PORT_MATRIX_S));
643 else
644 mt7530_reg_write(priv, PCR_REG(i),
645 (0x3f << PORT_MATRIX_S));
646
647 /* Set port mode to user port */
648 mt7530_reg_write(priv, PVC_REG(i),
649 (0x8100 << STAG_VPID_S) |
650 (VLAN_ATTR_USER << VLAN_ATTR_S));
651 }
652
653 return 0;
654}
655
656static void mtk_phy_link_adjust(struct mtk_eth_priv *priv)
657{
658 u16 lcl_adv = 0, rmt_adv = 0;
659 u8 flowctrl;
660 u32 mcr;
661
662 mcr = (1 << IPG_CFG_S) |
663 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
664 MAC_MODE | FORCE_MODE |
665 MAC_TX_EN | MAC_RX_EN |
666 BKOFF_EN | BACKPR_EN;
667
668 switch (priv->phydev->speed) {
669 case SPEED_10:
670 mcr |= (SPEED_10M << FORCE_SPD_S);
671 break;
672 case SPEED_100:
673 mcr |= (SPEED_100M << FORCE_SPD_S);
674 break;
675 case SPEED_1000:
676 mcr |= (SPEED_1000M << FORCE_SPD_S);
677 break;
678 };
679
680 if (priv->phydev->link)
681 mcr |= FORCE_LINK;
682
683 if (priv->phydev->duplex) {
684 mcr |= FORCE_DPX;
685
686 if (priv->phydev->pause)
687 rmt_adv = LPA_PAUSE_CAP;
688 if (priv->phydev->asym_pause)
689 rmt_adv |= LPA_PAUSE_ASYM;
690
691 if (priv->phydev->advertising & ADVERTISED_Pause)
692 lcl_adv |= ADVERTISE_PAUSE_CAP;
693 if (priv->phydev->advertising & ADVERTISED_Asym_Pause)
694 lcl_adv |= ADVERTISE_PAUSE_ASYM;
695
696 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
697
698 if (flowctrl & FLOW_CTRL_TX)
699 mcr |= FORCE_TX_FC;
700 if (flowctrl & FLOW_CTRL_RX)
701 mcr |= FORCE_RX_FC;
702
703 debug("rx pause %s, tx pause %s\n",
704 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
705 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
706 }
707
708 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
709}
710
711static int mtk_phy_start(struct mtk_eth_priv *priv)
712{
713 struct phy_device *phydev = priv->phydev;
714 int ret;
715
716 ret = phy_startup(phydev);
717
718 if (ret) {
719 debug("Could not initialize PHY %s\n", phydev->dev->name);
720 return ret;
721 }
722
723 if (!phydev->link) {
724 debug("%s: link down.\n", phydev->dev->name);
725 return 0;
726 }
727
728 mtk_phy_link_adjust(priv);
729
730 debug("Speed: %d, %s duplex%s\n", phydev->speed,
731 (phydev->duplex) ? "full" : "half",
732 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
733
734 return 0;
735}
736
737static int mtk_phy_probe(struct udevice *dev)
738{
739 struct mtk_eth_priv *priv = dev_get_priv(dev);
740 struct phy_device *phydev;
741
742 phydev = phy_connect(priv->mdio_bus, priv->phy_addr, dev,
743 priv->phy_interface);
744 if (!phydev)
745 return -ENODEV;
746
747 phydev->supported &= PHY_GBIT_FEATURES;
748 phydev->advertising = phydev->supported;
749
750 priv->phydev = phydev;
751 phy_config(phydev);
752
753 return 0;
754}
755
MarkLeeb4ef49a2020-01-21 19:31:57 +0800756static void mtk_sgmii_init(struct mtk_eth_priv *priv)
757{
758 /* Set SGMII GEN2 speed(2.5G) */
759 clrsetbits_le32(priv->sgmii_base + SGMSYS_GEN2_SPEED,
760 SGMSYS_SPEED_2500, SGMSYS_SPEED_2500);
761
762 /* Disable SGMII AN */
763 clrsetbits_le32(priv->sgmii_base + SGMSYS_PCS_CONTROL_1,
764 SGMII_AN_ENABLE, 0);
765
766 /* SGMII force mode setting */
767 writel(SGMII_FORCE_MODE, priv->sgmii_base + SGMSYS_SGMII_MODE);
768
769 /* Release PHYA power down state */
770 clrsetbits_le32(priv->sgmii_base + SGMSYS_QPHY_PWR_STATE_CTRL,
771 SGMII_PHYA_PWD, 0);
772}
773
Weijie Gao23f17162018-12-20 16:12:53 +0800774static void mtk_mac_init(struct mtk_eth_priv *priv)
775{
776 int i, ge_mode = 0;
777 u32 mcr;
778
779 switch (priv->phy_interface) {
780 case PHY_INTERFACE_MODE_RGMII_RXID:
781 case PHY_INTERFACE_MODE_RGMII:
MarkLeeb4ef49a2020-01-21 19:31:57 +0800782 ge_mode = GE_MODE_RGMII;
783 break;
Weijie Gao23f17162018-12-20 16:12:53 +0800784 case PHY_INTERFACE_MODE_SGMII:
785 ge_mode = GE_MODE_RGMII;
MarkLeeb4ef49a2020-01-21 19:31:57 +0800786 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG, SYSCFG0_SGMII_SEL_M,
787 SYSCFG0_SGMII_SEL(priv->gmac_id));
788 mtk_sgmii_init(priv);
Weijie Gao23f17162018-12-20 16:12:53 +0800789 break;
790 case PHY_INTERFACE_MODE_MII:
791 case PHY_INTERFACE_MODE_GMII:
792 ge_mode = GE_MODE_MII;
793 break;
794 case PHY_INTERFACE_MODE_RMII:
795 ge_mode = GE_MODE_RMII;
796 break;
797 default:
798 break;
799 }
800
801 /* set the gmac to the right mode */
802 mtk_ethsys_rmw(priv, ETHSYS_SYSCFG0_REG,
803 SYSCFG0_GE_MODE_M << SYSCFG0_GE_MODE_S(priv->gmac_id),
804 ge_mode << SYSCFG0_GE_MODE_S(priv->gmac_id));
805
806 if (priv->force_mode) {
807 mcr = (1 << IPG_CFG_S) |
808 (MAC_RX_PKT_LEN_1536 << MAC_RX_PKT_LEN_S) |
809 MAC_MODE | FORCE_MODE |
810 MAC_TX_EN | MAC_RX_EN |
811 BKOFF_EN | BACKPR_EN |
812 FORCE_LINK;
813
814 switch (priv->speed) {
815 case SPEED_10:
816 mcr |= SPEED_10M << FORCE_SPD_S;
817 break;
818 case SPEED_100:
819 mcr |= SPEED_100M << FORCE_SPD_S;
820 break;
821 case SPEED_1000:
822 mcr |= SPEED_1000M << FORCE_SPD_S;
823 break;
824 }
825
826 if (priv->duplex)
827 mcr |= FORCE_DPX;
828
829 mtk_gmac_write(priv, GMAC_PORT_MCR(priv->gmac_id), mcr);
830 }
831
832 if (priv->soc == SOC_MT7623) {
833 /* Lower Tx Driving for TRGMII path */
834 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
835 mtk_gmac_write(priv, GMAC_TRGMII_TD_ODT(i),
836 (8 << TD_DM_DRVP_S) |
837 (8 << TD_DM_DRVN_S));
838
839 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, 0,
840 RX_RST | RXC_DQSISEL);
841 mtk_gmac_rmw(priv, GMAC_TRGMII_RCK_CTRL, RX_RST, 0);
842 }
843}
844
845static void mtk_eth_fifo_init(struct mtk_eth_priv *priv)
846{
847 char *pkt_base = priv->pkt_pool;
848 int i;
849
850 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0xffff0000, 0);
851 udelay(500);
852
853 memset(priv->tx_ring_noc, 0, NUM_TX_DESC * sizeof(struct pdma_txdesc));
854 memset(priv->rx_ring_noc, 0, NUM_RX_DESC * sizeof(struct pdma_rxdesc));
855 memset(priv->pkt_pool, 0, TOTAL_PKT_BUF_SIZE);
856
Frank Wunderlich47b14312020-01-31 10:23:29 +0100857 flush_dcache_range((ulong)pkt_base,
858 (ulong)(pkt_base + TOTAL_PKT_BUF_SIZE));
Weijie Gao23f17162018-12-20 16:12:53 +0800859
860 priv->rx_dma_owner_idx0 = 0;
861 priv->tx_cpu_owner_idx0 = 0;
862
863 for (i = 0; i < NUM_TX_DESC; i++) {
864 priv->tx_ring_noc[i].txd_info2.LS0 = 1;
865 priv->tx_ring_noc[i].txd_info2.DDONE = 1;
866 priv->tx_ring_noc[i].txd_info4.FPORT = priv->gmac_id + 1;
867
868 priv->tx_ring_noc[i].txd_info1.SDP0 = virt_to_phys(pkt_base);
869 pkt_base += PKTSIZE_ALIGN;
870 }
871
872 for (i = 0; i < NUM_RX_DESC; i++) {
873 priv->rx_ring_noc[i].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
874 priv->rx_ring_noc[i].rxd_info1.PDP0 = virt_to_phys(pkt_base);
875 pkt_base += PKTSIZE_ALIGN;
876 }
877
878 mtk_pdma_write(priv, TX_BASE_PTR_REG(0),
879 virt_to_phys(priv->tx_ring_noc));
880 mtk_pdma_write(priv, TX_MAX_CNT_REG(0), NUM_TX_DESC);
881 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
882
883 mtk_pdma_write(priv, RX_BASE_PTR_REG(0),
884 virt_to_phys(priv->rx_ring_noc));
885 mtk_pdma_write(priv, RX_MAX_CNT_REG(0), NUM_RX_DESC);
886 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), NUM_RX_DESC - 1);
887
888 mtk_pdma_write(priv, PDMA_RST_IDX_REG, RST_DTX_IDX0 | RST_DRX_IDX0);
889}
890
891static int mtk_eth_start(struct udevice *dev)
892{
893 struct mtk_eth_priv *priv = dev_get_priv(dev);
894 int ret;
895
896 /* Reset FE */
897 reset_assert(&priv->rst_fe);
898 udelay(1000);
899 reset_deassert(&priv->rst_fe);
900 mdelay(10);
901
902 /* Packets forward to PDMA */
903 mtk_gdma_write(priv, priv->gmac_id, GDMA_IG_CTRL_REG, GDMA_FWD_TO_CPU);
904
905 if (priv->gmac_id == 0)
906 mtk_gdma_write(priv, 1, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
907 else
908 mtk_gdma_write(priv, 0, GDMA_IG_CTRL_REG, GDMA_FWD_DISCARD);
909
910 udelay(500);
911
912 mtk_eth_fifo_init(priv);
913
914 /* Start PHY */
915 if (priv->sw == SW_NONE) {
916 ret = mtk_phy_start(priv);
917 if (ret)
918 return ret;
919 }
920
921 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG, 0,
922 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
923 udelay(500);
924
925 return 0;
926}
927
928static void mtk_eth_stop(struct udevice *dev)
929{
930 struct mtk_eth_priv *priv = dev_get_priv(dev);
931
932 mtk_pdma_rmw(priv, PDMA_GLO_CFG_REG,
933 TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN, 0);
934 udelay(500);
935
936 wait_for_bit_le32(priv->fe_base + PDMA_BASE + PDMA_GLO_CFG_REG,
937 RX_DMA_BUSY | TX_DMA_BUSY, 0, 5000, 0);
938}
939
940static int mtk_eth_write_hwaddr(struct udevice *dev)
941{
942 struct eth_pdata *pdata = dev_get_platdata(dev);
943 struct mtk_eth_priv *priv = dev_get_priv(dev);
944 unsigned char *mac = pdata->enetaddr;
945 u32 macaddr_lsb, macaddr_msb;
946
947 macaddr_msb = ((u32)mac[0] << 8) | (u32)mac[1];
948 macaddr_lsb = ((u32)mac[2] << 24) | ((u32)mac[3] << 16) |
949 ((u32)mac[4] << 8) | (u32)mac[5];
950
951 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_MSB_REG, macaddr_msb);
952 mtk_gdma_write(priv, priv->gmac_id, GDMA_MAC_LSB_REG, macaddr_lsb);
953
954 return 0;
955}
956
957static int mtk_eth_send(struct udevice *dev, void *packet, int length)
958{
959 struct mtk_eth_priv *priv = dev_get_priv(dev);
960 u32 idx = priv->tx_cpu_owner_idx0;
961 void *pkt_base;
962
963 if (!priv->tx_ring_noc[idx].txd_info2.DDONE) {
964 debug("mtk-eth: TX DMA descriptor ring is full\n");
965 return -EPERM;
966 }
967
968 pkt_base = (void *)phys_to_virt(priv->tx_ring_noc[idx].txd_info1.SDP0);
969 memcpy(pkt_base, packet, length);
Frank Wunderlich47b14312020-01-31 10:23:29 +0100970 flush_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +0800971 roundup(length, ARCH_DMA_MINALIGN));
972
973 priv->tx_ring_noc[idx].txd_info2.SDL0 = length;
974 priv->tx_ring_noc[idx].txd_info2.DDONE = 0;
975
976 priv->tx_cpu_owner_idx0 = (priv->tx_cpu_owner_idx0 + 1) % NUM_TX_DESC;
977 mtk_pdma_write(priv, TX_CTX_IDX_REG(0), priv->tx_cpu_owner_idx0);
978
979 return 0;
980}
981
982static int mtk_eth_recv(struct udevice *dev, int flags, uchar **packetp)
983{
984 struct mtk_eth_priv *priv = dev_get_priv(dev);
985 u32 idx = priv->rx_dma_owner_idx0;
986 uchar *pkt_base;
987 u32 length;
988
989 if (!priv->rx_ring_noc[idx].rxd_info2.DDONE) {
990 debug("mtk-eth: RX DMA descriptor ring is empty\n");
991 return -EAGAIN;
992 }
993
994 length = priv->rx_ring_noc[idx].rxd_info2.PLEN0;
995 pkt_base = (void *)phys_to_virt(priv->rx_ring_noc[idx].rxd_info1.PDP0);
Frank Wunderlich47b14312020-01-31 10:23:29 +0100996 invalidate_dcache_range((ulong)pkt_base, (ulong)pkt_base +
Weijie Gao23f17162018-12-20 16:12:53 +0800997 roundup(length, ARCH_DMA_MINALIGN));
998
999 if (packetp)
1000 *packetp = pkt_base;
1001
1002 return length;
1003}
1004
1005static int mtk_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
1006{
1007 struct mtk_eth_priv *priv = dev_get_priv(dev);
1008 u32 idx = priv->rx_dma_owner_idx0;
1009
1010 priv->rx_ring_noc[idx].rxd_info2.DDONE = 0;
1011 priv->rx_ring_noc[idx].rxd_info2.LS0 = 0;
1012 priv->rx_ring_noc[idx].rxd_info2.PLEN0 = PKTSIZE_ALIGN;
1013
1014 mtk_pdma_write(priv, RX_CRX_IDX_REG(0), idx);
1015 priv->rx_dma_owner_idx0 = (priv->rx_dma_owner_idx0 + 1) % NUM_RX_DESC;
1016
1017 return 0;
1018}
1019
1020static int mtk_eth_probe(struct udevice *dev)
1021{
1022 struct eth_pdata *pdata = dev_get_platdata(dev);
1023 struct mtk_eth_priv *priv = dev_get_priv(dev);
Frank Wunderlich47b14312020-01-31 10:23:29 +01001024 ulong iobase = pdata->iobase;
Weijie Gao23f17162018-12-20 16:12:53 +08001025 int ret;
1026
1027 /* Frame Engine Register Base */
1028 priv->fe_base = (void *)iobase;
1029
1030 /* GMAC Register Base */
1031 priv->gmac_base = (void *)(iobase + GMAC_BASE);
1032
1033 /* MDIO register */
1034 ret = mtk_mdio_register(dev);
1035 if (ret)
1036 return ret;
1037
1038 /* Prepare for tx/rx rings */
1039 priv->tx_ring_noc = (struct pdma_txdesc *)
1040 noncached_alloc(sizeof(struct pdma_txdesc) * NUM_TX_DESC,
1041 ARCH_DMA_MINALIGN);
1042 priv->rx_ring_noc = (struct pdma_rxdesc *)
1043 noncached_alloc(sizeof(struct pdma_rxdesc) * NUM_RX_DESC,
1044 ARCH_DMA_MINALIGN);
1045
1046 /* Set MAC mode */
1047 mtk_mac_init(priv);
1048
1049 /* Probe phy if switch is not specified */
1050 if (priv->sw == SW_NONE)
1051 return mtk_phy_probe(dev);
1052
1053 /* Initialize switch */
1054 return priv->switch_init(priv);
1055}
1056
1057static int mtk_eth_remove(struct udevice *dev)
1058{
1059 struct mtk_eth_priv *priv = dev_get_priv(dev);
1060
1061 /* MDIO unregister */
1062 mdio_unregister(priv->mdio_bus);
1063 mdio_free(priv->mdio_bus);
1064
1065 /* Stop possibly started DMA */
1066 mtk_eth_stop(dev);
1067
1068 return 0;
1069}
1070
1071static int mtk_eth_ofdata_to_platdata(struct udevice *dev)
1072{
1073 struct eth_pdata *pdata = dev_get_platdata(dev);
1074 struct mtk_eth_priv *priv = dev_get_priv(dev);
1075 struct ofnode_phandle_args args;
1076 struct regmap *regmap;
1077 const char *str;
1078 ofnode subnode;
1079 int ret;
1080
1081 priv->soc = dev_get_driver_data(dev);
1082
1083 pdata->iobase = devfdt_get_addr(dev);
1084
1085 /* get corresponding ethsys phandle */
1086 ret = dev_read_phandle_with_args(dev, "mediatek,ethsys", NULL, 0, 0,
1087 &args);
1088 if (ret)
1089 return ret;
1090
1091 regmap = syscon_node_to_regmap(args.node);
1092 if (IS_ERR(regmap))
1093 return PTR_ERR(regmap);
1094
1095 priv->ethsys_base = regmap_get_range(regmap, 0);
1096 if (!priv->ethsys_base) {
1097 dev_err(dev, "Unable to find ethsys\n");
1098 return -ENODEV;
1099 }
1100
1101 /* Reset controllers */
1102 ret = reset_get_by_name(dev, "fe", &priv->rst_fe);
1103 if (ret) {
1104 printf("error: Unable to get reset ctrl for frame engine\n");
1105 return ret;
1106 }
1107
1108 priv->gmac_id = dev_read_u32_default(dev, "mediatek,gmac-id", 0);
1109
1110 /* Interface mode is required */
1111 str = dev_read_string(dev, "phy-mode");
1112 if (str) {
1113 pdata->phy_interface = phy_get_interface_by_name(str);
1114 priv->phy_interface = pdata->phy_interface;
1115 } else {
1116 printf("error: phy-mode is not set\n");
1117 return -EINVAL;
1118 }
1119
1120 /* Force mode or autoneg */
1121 subnode = ofnode_find_subnode(dev_ofnode(dev), "fixed-link");
1122 if (ofnode_valid(subnode)) {
1123 priv->force_mode = 1;
1124 priv->speed = ofnode_read_u32_default(subnode, "speed", 0);
1125 priv->duplex = ofnode_read_bool(subnode, "full-duplex");
1126
1127 if (priv->speed != SPEED_10 && priv->speed != SPEED_100 &&
1128 priv->speed != SPEED_1000) {
1129 printf("error: no valid speed set in fixed-link\n");
1130 return -EINVAL;
1131 }
1132 }
1133
MarkLeeb4ef49a2020-01-21 19:31:57 +08001134 if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1135 /* get corresponding sgmii phandle */
1136 ret = dev_read_phandle_with_args(dev, "mediatek,sgmiisys",
1137 NULL, 0, 0, &args);
1138 if (ret)
1139 return ret;
1140
1141 regmap = syscon_node_to_regmap(args.node);
1142
1143 if (IS_ERR(regmap))
1144 return PTR_ERR(regmap);
1145
1146 priv->sgmii_base = regmap_get_range(regmap, 0);
1147
1148 if (!priv->sgmii_base) {
1149 dev_err(dev, "Unable to find sgmii\n");
1150 return -ENODEV;
1151 }
1152 }
1153
Weijie Gao23f17162018-12-20 16:12:53 +08001154 /* check for switch first, otherwise phy will be used */
1155 priv->sw = SW_NONE;
1156 priv->switch_init = NULL;
1157 str = dev_read_string(dev, "mediatek,switch");
1158
1159 if (str) {
1160 if (!strcmp(str, "mt7530")) {
1161 priv->sw = SW_MT7530;
1162 priv->switch_init = mt7530_setup;
1163 priv->mt7530_smi_addr = MT7530_DFL_SMI_ADDR;
1164 } else {
1165 printf("error: unsupported switch\n");
1166 return -EINVAL;
1167 }
1168
1169 priv->mcm = dev_read_bool(dev, "mediatek,mcm");
1170 if (priv->mcm) {
1171 ret = reset_get_by_name(dev, "mcm", &priv->rst_mcm);
1172 if (ret) {
1173 printf("error: no reset ctrl for mcm\n");
1174 return ret;
1175 }
1176 } else {
1177 gpio_request_by_name(dev, "reset-gpios", 0,
1178 &priv->rst_gpio, GPIOD_IS_OUT);
1179 }
1180 } else {
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001181 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0,
1182 0, &args);
1183 if (ret) {
Weijie Gao23f17162018-12-20 16:12:53 +08001184 printf("error: phy-handle is not specified\n");
1185 return ret;
1186 }
1187
Weijie Gaoebb97ea2019-04-28 15:08:57 +08001188 priv->phy_addr = ofnode_read_s32_default(args.node, "reg", -1);
Weijie Gao23f17162018-12-20 16:12:53 +08001189 if (priv->phy_addr < 0) {
1190 printf("error: phy address is not specified\n");
1191 return ret;
1192 }
1193 }
1194
1195 return 0;
1196}
1197
1198static const struct udevice_id mtk_eth_ids[] = {
1199 { .compatible = "mediatek,mt7629-eth", .data = SOC_MT7629 },
1200 { .compatible = "mediatek,mt7623-eth", .data = SOC_MT7623 },
MarkLeee3957172020-01-21 19:31:58 +08001201 { .compatible = "mediatek,mt7622-eth", .data = SOC_MT7622 },
Weijie Gao23f17162018-12-20 16:12:53 +08001202 {}
1203};
1204
1205static const struct eth_ops mtk_eth_ops = {
1206 .start = mtk_eth_start,
1207 .stop = mtk_eth_stop,
1208 .send = mtk_eth_send,
1209 .recv = mtk_eth_recv,
1210 .free_pkt = mtk_eth_free_pkt,
1211 .write_hwaddr = mtk_eth_write_hwaddr,
1212};
1213
1214U_BOOT_DRIVER(mtk_eth) = {
1215 .name = "mtk-eth",
1216 .id = UCLASS_ETH,
1217 .of_match = mtk_eth_ids,
1218 .ofdata_to_platdata = mtk_eth_ofdata_to_platdata,
1219 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1220 .probe = mtk_eth_probe,
1221 .remove = mtk_eth_remove,
1222 .ops = &mtk_eth_ops,
1223 .priv_auto_alloc_size = sizeof(struct mtk_eth_priv),
1224 .flags = DM_FLAG_ALLOC_PRIV_DMA,
1225};