blob: b95de474fb027993edb5b74e3c30837723537645 [file] [log] [blame]
Stefan Roesec895ef42018-10-26 14:53:27 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020018#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060019#include <log.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020020#include <malloc.h>
21#include <miiphy.h>
22#include <net.h>
Weijie Gao2734fde2019-09-25 17:45:32 +080023#include <reset.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020024#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060025#include <asm/cache.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020026#include <asm/io.h>
27#include <linux/bitfield.h>
Simon Glasscd93d622020-05-10 11:40:13 -060028#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060029#include <linux/delay.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020030#include <linux/err.h>
Simon Glass1e94b462023-09-14 18:21:46 -060031#include <linux/printk.h>
Stefan Roesec895ef42018-10-26 14:53:27 +020032
Stefan Roesec895ef42018-10-26 14:53:27 +020033/* Ethernet frame engine register */
34#define PDMA_RELATED 0x0800
35
36#define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
37#define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
38#define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
39#define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
40
41#define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
42#define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
43#define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
44
45#define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
46#define PDMA_RST_IDX (PDMA_RELATED + 0x208)
47#define DLY_INT_CFG (PDMA_RELATED + 0x20c)
48
49#define SDM_RELATED 0x0c00
50
51#define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
52#define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
53
54#define RST_DTX_IDX0 BIT(0)
55#define RST_DRX_IDX0 BIT(16)
56
57#define TX_DMA_EN BIT(0)
58#define TX_DMA_BUSY BIT(1)
59#define RX_DMA_EN BIT(2)
60#define RX_DMA_BUSY BIT(3)
61#define TX_WB_DDONE BIT(6)
62
63/* Ethernet switch register */
64#define MT7628_SWITCH_FCT0 0x0008
65#define MT7628_SWITCH_PFC1 0x0014
Weijie Gao877d0392019-09-25 17:45:35 +080066#define MT7628_SWITCH_PVIDC0 0x0040
67#define MT7628_SWITCH_PVIDC1 0x0044
68#define MT7628_SWITCH_PVIDC2 0x0048
69#define MT7628_SWITCH_PVIDC3 0x004c
70#define MT7628_SWITCH_VMSC0 0x0070
Stefan Roesec895ef42018-10-26 14:53:27 +020071#define MT7628_SWITCH_FPA 0x0084
72#define MT7628_SWITCH_SOCPC 0x008c
73#define MT7628_SWITCH_POC0 0x0090
74#define MT7628_SWITCH_POC2 0x0098
75#define MT7628_SWITCH_SGC 0x009c
76#define MT7628_SWITCH_PCR0 0x00c0
77#define PCR0_PHY_ADDR GENMASK(4, 0)
78#define PCR0_PHY_REG GENMASK(12, 8)
79#define PCR0_WT_PHY_CMD BIT(13)
80#define PCR0_RD_PHY_CMD BIT(14)
81#define PCR0_WT_DATA GENMASK(31, 16)
82
83#define MT7628_SWITCH_PCR1 0x00c4
84#define PCR1_WT_DONE BIT(0)
85#define PCR1_RD_RDY BIT(1)
86#define PCR1_RD_DATA GENMASK(31, 16)
87
88#define MT7628_SWITCH_FPA1 0x00c8
89#define MT7628_SWITCH_FCT2 0x00cc
90#define MT7628_SWITCH_SGC2 0x00e4
91#define MT7628_SWITCH_BMU_CTRL 0x0110
92
93/* rxd2 */
94#define RX_DMA_DONE BIT(31)
95#define RX_DMA_LSO BIT(30)
96#define RX_DMA_PLEN0 GENMASK(29, 16)
97#define RX_DMA_TAG BIT(15)
98
99struct fe_rx_dma {
100 unsigned int rxd1;
101 unsigned int rxd2;
102 unsigned int rxd3;
103 unsigned int rxd4;
104} __packed __aligned(4);
105
106#define TX_DMA_PLEN0 GENMASK(29, 16)
107#define TX_DMA_LS1 BIT(14)
108#define TX_DMA_LS0 BIT(30)
109#define TX_DMA_DONE BIT(31)
110
111#define TX_DMA_INS_VLAN_MT7621 BIT(16)
112#define TX_DMA_INS_VLAN BIT(7)
113#define TX_DMA_INS_PPPOE BIT(12)
114#define TX_DMA_PN GENMASK(26, 24)
115
116struct fe_tx_dma {
117 unsigned int txd1;
118 unsigned int txd2;
119 unsigned int txd3;
120 unsigned int txd4;
121} __packed __aligned(4);
122
123#define NUM_RX_DESC 256
124#define NUM_TX_DESC 4
Weijie Gaof0793212019-09-25 17:45:33 +0800125#define NUM_PHYS 5
Stefan Roesec895ef42018-10-26 14:53:27 +0200126
127#define PADDING_LENGTH 60
128
129#define MTK_QDMA_PAGE_SIZE 2048
130
Tom Rini6e7df1d2023-01-10 11:19:45 -0500131#define CFG_MDIO_TIMEOUT 100
132#define CFG_DMA_STOP_TIMEOUT 100
133#define CFG_TX_DMA_TIMEOUT 100
Stefan Roesec895ef42018-10-26 14:53:27 +0200134
Stefan Roesec895ef42018-10-26 14:53:27 +0200135struct mt7628_eth_dev {
136 void __iomem *base; /* frame engine base address */
137 void __iomem *eth_sw_base; /* switch base address */
Stefan Roesec895ef42018-10-26 14:53:27 +0200138
139 struct mii_dev *bus;
140
141 struct fe_tx_dma *tx_ring;
142 struct fe_rx_dma *rx_ring;
143
144 u8 *rx_buf[NUM_RX_DESC];
145
146 /* Point to the next RXD DMA wants to use in RXD Ring0 */
147 int rx_dma_idx;
148 /* Point to the next TXD in TXD Ring0 CPU wants to use */
149 int tx_dma_idx;
Weijie Gao2734fde2019-09-25 17:45:32 +0800150
151 struct reset_ctl rst_ephy;
Weijie Gaof0793212019-09-25 17:45:33 +0800152
153 struct phy_device *phy;
Weijie Gao877d0392019-09-25 17:45:35 +0800154
155 int wan_port;
Stefan Roesec895ef42018-10-26 14:53:27 +0200156};
157
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800158static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
159
Stefan Roesec895ef42018-10-26 14:53:27 +0200160static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
161{
162 void __iomem *base = priv->eth_sw_base;
163 int ret;
164
165 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
Tom Rini6e7df1d2023-01-10 11:19:45 -0500166 CFG_MDIO_TIMEOUT, false);
Stefan Roesec895ef42018-10-26 14:53:27 +0200167 if (ret) {
168 printf("MDIO operation timeout!\n");
169 return -ETIMEDOUT;
170 }
171
172 return 0;
173}
174
175static int mii_mgr_read(struct mt7628_eth_dev *priv,
176 u32 phy_addr, u32 phy_register, u32 *read_data)
177{
178 void __iomem *base = priv->eth_sw_base;
179 u32 status = 0;
180 u32 ret;
181
182 *read_data = 0xffff;
183 /* Make sure previous read operation is complete */
184 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
185 if (ret)
186 return ret;
187
188 writel(PCR0_RD_PHY_CMD |
189 FIELD_PREP(PCR0_PHY_REG, phy_register) |
190 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
191 base + MT7628_SWITCH_PCR0);
192
193 /* Make sure previous read operation is complete */
194 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
195 if (ret)
196 return ret;
197
198 status = readl(base + MT7628_SWITCH_PCR1);
199 *read_data = FIELD_GET(PCR1_RD_DATA, status);
200
201 return 0;
202}
203
204static int mii_mgr_write(struct mt7628_eth_dev *priv,
205 u32 phy_addr, u32 phy_register, u32 write_data)
206{
207 void __iomem *base = priv->eth_sw_base;
208 u32 data;
209 int ret;
210
211 /* Make sure previous write operation is complete */
212 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
213 if (ret)
214 return ret;
215
216 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
217 FIELD_PREP(PCR0_PHY_REG, phy_register) |
218 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
219 PCR0_WT_PHY_CMD;
220 writel(data, base + MT7628_SWITCH_PCR0);
221
222 return mdio_wait_read(priv, PCR1_WT_DONE, true);
223}
224
225static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
226{
227 u32 val;
228 int ret;
229
230 ret = mii_mgr_read(bus->priv, addr, reg, &val);
231 if (ret)
232 return ret;
233
234 return val;
235}
236
237static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
238 u16 value)
239{
240 return mii_mgr_write(bus->priv, addr, reg, value);
241}
242
243static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
244{
245 int i;
246
247 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
248 mii_mgr_write(priv, 0, 26, 0x0000);
249
250 for (i = 0; i < 5; i++) {
251 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
252 mii_mgr_write(priv, i, 0, 0x3100);
253
254 /* EEE disable */
255 mii_mgr_write(priv, i, 30, 0xa000);
256 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
257 mii_mgr_write(priv, i, 16, 0x0606);
258 mii_mgr_write(priv, i, 23, 0x0f0e);
259 mii_mgr_write(priv, i, 24, 0x1610);
260 mii_mgr_write(priv, i, 30, 0x1f15);
261 mii_mgr_write(priv, i, 28, 0x6111);
262 }
263
264 /* 100Base AOI setting */
265 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
266 mii_mgr_write(priv, 0, 19, 0x004a);
267 mii_mgr_write(priv, 0, 20, 0x015a);
268 mii_mgr_write(priv, 0, 21, 0x00ee);
269 mii_mgr_write(priv, 0, 22, 0x0033);
270 mii_mgr_write(priv, 0, 23, 0x020a);
271 mii_mgr_write(priv, 0, 24, 0x0000);
272 mii_mgr_write(priv, 0, 25, 0x024a);
273 mii_mgr_write(priv, 0, 26, 0x035a);
274 mii_mgr_write(priv, 0, 27, 0x02ee);
275 mii_mgr_write(priv, 0, 28, 0x0233);
276 mii_mgr_write(priv, 0, 29, 0x000a);
277 mii_mgr_write(priv, 0, 30, 0x0000);
278
279 /* Fix EPHY idle state abnormal behavior */
280 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
281 mii_mgr_write(priv, 0, 29, 0x000d);
282 mii_mgr_write(priv, 0, 30, 0x0500);
283}
284
285static void rt305x_esw_init(struct mt7628_eth_dev *priv)
286{
287 void __iomem *base = priv->eth_sw_base;
Weijie Gao877d0392019-09-25 17:45:35 +0800288 void __iomem *reg;
289 u32 val = 0, pvid;
290 int i;
Stefan Roesec895ef42018-10-26 14:53:27 +0200291
292 /*
293 * FC_RLS_TH=200, FC_SET_TH=160
294 * DROP_RLS=120, DROP_SET_TH=80
295 */
296 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
297 writel(0x00000000, base + MT7628_SWITCH_SGC2);
298 writel(0x00405555, base + MT7628_SWITCH_PFC1);
299 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
300 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
301 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
302 /* hashing algorithm=XOR48, aging interval=300sec */
303 writel(0x0008a301, base + MT7628_SWITCH_SGC);
304 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
305
306 /* Ext PHY Addr=0x1f */
307 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
308 writel(0x00000000, base + MT7628_SWITCH_FPA);
309 /* 1us cycle number=125 (FE's clock=125Mhz) */
310 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
311
Weijie Gao877d0392019-09-25 17:45:35 +0800312 /* LAN/WAN partition, WAN port will be unusable in u-boot network */
313 if (priv->wan_port >= 0 && priv->wan_port < 6) {
314 for (i = 0; i < 8; i++) {
315 pvid = i == priv->wan_port ? 2 : 1;
316 reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
317 if (i % 2 == 0) {
318 val = pvid;
319 } else {
320 val |= (pvid << 12);
321 writel(val, reg);
322 }
323 }
324
325 val = 0xffff407f;
326 val |= 1 << (8 + priv->wan_port);
327 val &= ~(1 << priv->wan_port);
328 writel(val, base + MT7628_SWITCH_VMSC0);
329 }
330
Stefan Roesec895ef42018-10-26 14:53:27 +0200331 /* Reset PHY */
Weijie Gao2734fde2019-09-25 17:45:32 +0800332 reset_assert(&priv->rst_ephy);
333 reset_deassert(&priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200334 mdelay(10);
335
336 mt7628_ephy_init(priv);
337}
338
339static void eth_dma_start(struct mt7628_eth_dev *priv)
340{
341 void __iomem *base = priv->base;
342
343 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
344}
345
346static void eth_dma_stop(struct mt7628_eth_dev *priv)
347{
348 void __iomem *base = priv->base;
349 int ret;
350
351 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
352
353 /* Wait for DMA to stop */
354 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
355 RX_DMA_BUSY | TX_DMA_BUSY, false,
Tom Rini6e7df1d2023-01-10 11:19:45 -0500356 CFG_DMA_STOP_TIMEOUT, false);
Stefan Roesec895ef42018-10-26 14:53:27 +0200357 if (ret)
358 printf("DMA stop timeout error!\n");
359}
360
361static int mt7628_eth_write_hwaddr(struct udevice *dev)
362{
363 struct mt7628_eth_dev *priv = dev_get_priv(dev);
364 void __iomem *base = priv->base;
Simon Glassc69cda22020-12-03 16:55:20 -0700365 u8 *addr = ((struct eth_pdata *)dev_get_plat(dev))->enetaddr;
Stefan Roesec895ef42018-10-26 14:53:27 +0200366 u32 val;
367
368 /* Set MAC address. */
369 val = addr[0];
370 val = (val << 8) | addr[1];
371 writel(val, base + SDM_MAC_ADRH);
372
373 val = addr[2];
374 val = (val << 8) | addr[3];
375 val = (val << 8) | addr[4];
376 val = (val << 8) | addr[5];
377 writel(val, base + SDM_MAC_ADRL);
378
379 return 0;
380}
381
382static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
383{
384 struct mt7628_eth_dev *priv = dev_get_priv(dev);
385 void __iomem *base = priv->base;
386 int ret;
387 int idx;
388 int i;
389
390 idx = priv->tx_dma_idx;
391
392 /* Pad message to a minimum length */
393 if (length < PADDING_LENGTH) {
394 char *p = (char *)packet;
395
396 for (i = 0; i < PADDING_LENGTH - length; i++)
397 p[length + i] = 0;
398 length = PADDING_LENGTH;
399 }
400
401 /* Check if buffer is ready for next TX DMA */
402 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
Tom Rini6e7df1d2023-01-10 11:19:45 -0500403 CFG_TX_DMA_TIMEOUT, false);
Stefan Roesec895ef42018-10-26 14:53:27 +0200404 if (ret) {
405 printf("TX: DMA still busy on buffer %d\n", idx);
406 return ret;
407 }
408
409 flush_dcache_range((u32)packet, (u32)packet + length);
410
411 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
412 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
413 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
414 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
415
416 idx = (idx + 1) % NUM_TX_DESC;
417
418 /* Make sure the writes executed at this place */
419 wmb();
420 writel(idx, base + TX_CTX_IDX0);
421
422 priv->tx_dma_idx = idx;
423
424 return 0;
425}
426
427static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
428{
429 struct mt7628_eth_dev *priv = dev_get_priv(dev);
430 u32 rxd_info;
431 int length;
432 int idx;
433
434 idx = priv->rx_dma_idx;
435
436 rxd_info = priv->rx_ring[idx].rxd2;
437 if ((rxd_info & RX_DMA_DONE) == 0)
438 return -EAGAIN;
439
440 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
441 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
442 printf("%s: invalid length (%d bytes)\n", __func__, length);
Weijie Gaoc88ee3e2019-09-25 17:45:34 +0800443 mt7628_eth_free_pkt(dev, NULL, 0);
Stefan Roesec895ef42018-10-26 14:53:27 +0200444 return -EIO;
445 }
446
447 *packetp = priv->rx_buf[idx];
448 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
449
450 priv->rx_ring[idx].rxd4 = 0;
451 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
452
453 /* Make sure the writes executed at this place */
454 wmb();
455
456 return length;
457}
458
459static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
460{
461 struct mt7628_eth_dev *priv = dev_get_priv(dev);
462 void __iomem *base = priv->base;
463 int idx;
464
465 idx = priv->rx_dma_idx;
466
467 /* Move point to next RXD which wants to alloc */
468 writel(idx, base + RX_CALC_IDX0);
469
470 /* Update to Next packet point that was received */
471 idx = (idx + 1) % NUM_RX_DESC;
472
473 priv->rx_dma_idx = idx;
474
475 return 0;
476}
477
Stefan Roesec895ef42018-10-26 14:53:27 +0200478static int mt7628_eth_start(struct udevice *dev)
479{
480 struct mt7628_eth_dev *priv = dev_get_priv(dev);
481 void __iomem *base = priv->base;
482 uchar packet[MTK_QDMA_PAGE_SIZE];
483 uchar *packetp;
Weijie Gaof0793212019-09-25 17:45:33 +0800484 int ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200485 int i;
486
487 for (i = 0; i < NUM_RX_DESC; i++) {
488 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
489 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
490 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
491 }
492
493 for (i = 0; i < NUM_TX_DESC; i++) {
494 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
495 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
496 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
497 }
498
499 priv->rx_dma_idx = 0;
500 priv->tx_dma_idx = 0;
501
502 /* Make sure the writes executed at this place */
503 wmb();
504
505 /* disable delay interrupt */
506 writel(0, base + DLY_INT_CFG);
507
508 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
509
510 /* Tell the adapter where the TX/RX rings are located. */
511 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
512 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
513
514 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
515 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
516
517 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
518 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
519
520 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
521 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
522
523 /* Make sure the writes executed at this place */
524 wmb();
525 eth_dma_start(priv);
526
Weijie Gaof0793212019-09-25 17:45:33 +0800527 if (priv->phy) {
528 ret = phy_startup(priv->phy);
529 if (ret)
530 return ret;
Stefan Roesec895ef42018-10-26 14:53:27 +0200531
Weijie Gaof0793212019-09-25 17:45:33 +0800532 if (!priv->phy->link)
533 return -EAGAIN;
Stefan Roesec895ef42018-10-26 14:53:27 +0200534 }
535
536 /*
537 * The integrated switch seems to queue some received ethernet
538 * packets in some FIFO. Lets read the already queued packets
539 * out by using the receive routine, so that these old messages
540 * are dropped before the new xfer starts.
541 */
542 packetp = &packet[0];
543 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
544 mt7628_eth_free_pkt(dev, packetp, 0);
545
546 return 0;
547}
548
549static void mt7628_eth_stop(struct udevice *dev)
550{
551 struct mt7628_eth_dev *priv = dev_get_priv(dev);
552
553 eth_dma_stop(priv);
554}
555
556static int mt7628_eth_probe(struct udevice *dev)
557{
558 struct mt7628_eth_dev *priv = dev_get_priv(dev);
Stefan Roesec895ef42018-10-26 14:53:27 +0200559 struct mii_dev *bus;
Weijie Gaof0793212019-09-25 17:45:33 +0800560 int poll_link_phy;
Stefan Roesec895ef42018-10-26 14:53:27 +0200561 int ret;
562 int i;
563
564 /* Save frame-engine base address for later use */
565 priv->base = dev_remap_addr_index(dev, 0);
566 if (IS_ERR(priv->base))
567 return PTR_ERR(priv->base);
568
569 /* Save switch base address for later use */
570 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
571 if (IS_ERR(priv->eth_sw_base))
572 return PTR_ERR(priv->eth_sw_base);
573
Weijie Gao2734fde2019-09-25 17:45:32 +0800574 /* Reset controller */
575 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
Stefan Roesec895ef42018-10-26 14:53:27 +0200576 if (ret) {
Weijie Gao2734fde2019-09-25 17:45:32 +0800577 pr_err("unable to find reset controller for ethernet PHYs\n");
Stefan Roesec895ef42018-10-26 14:53:27 +0200578 return ret;
579 }
580
Weijie Gao877d0392019-09-25 17:45:35 +0800581 /* WAN port will be isolated from LAN ports */
582 priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
583
Stefan Roesec895ef42018-10-26 14:53:27 +0200584 /* Put rx and tx rings into KSEG1 area (uncached) */
585 priv->tx_ring = (struct fe_tx_dma *)
586 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
587 sizeof(*priv->tx_ring) * NUM_TX_DESC));
588 priv->rx_ring = (struct fe_rx_dma *)
589 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
590 sizeof(*priv->rx_ring) * NUM_RX_DESC));
591
592 for (i = 0; i < NUM_RX_DESC; i++)
593 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
594
595 bus = mdio_alloc();
596 if (!bus) {
597 printf("Failed to allocate MDIO bus\n");
598 return -ENOMEM;
599 }
600
601 bus->read = mt7628_mdio_read;
602 bus->write = mt7628_mdio_write;
603 snprintf(bus->name, sizeof(bus->name), dev->name);
604 bus->priv = (void *)priv;
605
606 ret = mdio_register(bus);
607 if (ret)
608 return ret;
609
Weijie Gaof0793212019-09-25 17:45:33 +0800610 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
611 if (poll_link_phy >= 0) {
612 if (poll_link_phy >= NUM_PHYS) {
613 pr_err("invalid phy %d for poll-link-phy\n",
614 poll_link_phy);
615 return ret;
616 }
617
618 priv->phy = phy_connect(bus, poll_link_phy, dev,
619 PHY_INTERFACE_MODE_MII);
620 if (!priv->phy) {
621 pr_err("failed to probe phy %d\n", poll_link_phy);
622 return -ENODEV;
623 }
624
625 priv->phy->advertising = priv->phy->supported;
626 phy_config(priv->phy);
627 }
628
Stefan Roesec895ef42018-10-26 14:53:27 +0200629 /* Switch configuration */
630 rt305x_esw_init(priv);
631
632 return 0;
633}
634
635static const struct eth_ops mt7628_eth_ops = {
636 .start = mt7628_eth_start,
637 .send = mt7628_eth_send,
638 .recv = mt7628_eth_recv,
639 .free_pkt = mt7628_eth_free_pkt,
640 .stop = mt7628_eth_stop,
641 .write_hwaddr = mt7628_eth_write_hwaddr,
642};
643
644static const struct udevice_id mt7628_eth_ids[] = {
645 { .compatible = "mediatek,mt7628-eth" },
646 { }
647};
648
649U_BOOT_DRIVER(mt7628_eth) = {
650 .name = "mt7628_eth",
651 .id = UCLASS_ETH,
652 .of_match = mt7628_eth_ids,
653 .probe = mt7628_eth_probe,
654 .ops = &mt7628_eth_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700655 .priv_auto = sizeof(struct mt7628_eth_dev),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700656 .plat_auto = sizeof(struct eth_pdata),
Stefan Roesec895ef42018-10-26 14:53:27 +0200657};