blob: 3587ca2124e33279c74be88a4ae18f15998db559 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Prafulla Wadaskar91315892009-06-14 22:33:46 +05302/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6 *
7 * (C) Copyright 2003
8 * Ingo Assmus <ingo.assmus@keymile.com>
9 *
10 * based on - Driver for MV64360X ethernet ports
11 * Copyright (C) 2002 rabeeh@galileo.co.il
Prafulla Wadaskar91315892009-06-14 22:33:46 +053012 */
13
14#include <common.h>
Chris Packhamfb731072018-07-09 21:34:00 +120015#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Prafulla Wadaskar91315892009-06-14 22:33:46 +053017#include <net.h>
18#include <malloc.h>
19#include <miiphy.h>
Chris Packham5194ed72018-06-09 20:46:16 +120020#include <wait_bit.h>
Simon Glass401d1c42020-10-30 21:38:53 -060021#include <asm/global_data.h>
Lei Wena7efd712011-10-18 20:11:42 +053022#include <asm/io.h>
Simon Glassc05ed002020-05-10 11:40:11 -060023#include <linux/delay.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090024#include <linux/errno.h>
Prafulla Wadaskar91315892009-06-14 22:33:46 +053025#include <asm/types.h>
Lei Wena7efd712011-10-18 20:11:42 +053026#include <asm/system.h>
Prafulla Wadaskar91315892009-06-14 22:33:46 +053027#include <asm/byteorder.h>
Anatolij Gustschin36aaa912011-10-29 10:09:22 +000028#include <asm/arch/cpu.h>
Albert Aribaudd44265a2010-07-12 22:24:28 +020029
Trevor Woernerbb0fb4c2020-05-06 08:02:40 -040030#if defined(CONFIG_ARCH_KIRKWOOD)
Stefan Roese3dc23f72014-10-22 12:13:06 +020031#include <asm/arch/soc.h>
Trevor Woernerb16a3312020-05-06 08:02:38 -040032#elif defined(CONFIG_ARCH_ORION5X)
Albert Aribaudd3c9ffd2010-07-12 22:24:29 +020033#include <asm/arch/orion5x.h>
Albert Aribaudd44265a2010-07-12 22:24:28 +020034#endif
35
Albert Aribaud9b6bcdc2010-07-12 22:24:27 +020036#include "mvgbe.h"
Prafulla Wadaskar91315892009-06-14 22:33:46 +053037
Albert Aribaud49fa6ed2010-07-05 20:15:25 +020038DECLARE_GLOBAL_DATA_PTR;
39
Albert Aribaudd44265a2010-07-12 22:24:28 +020040#define MV_PHY_ADR_REQUEST 0xee
41#define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
Tony Dinhf0f98752022-04-12 13:18:19 -070042#define MVGBE_PGADR_REG 22
Simon Kagstrombb1ca3b2009-08-20 10:12:28 +020043
Sebastian Hesselbarthcd3ca3f2012-12-04 09:32:00 +010044#if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Chris Packham5194ed72018-06-09 20:46:16 +120045static int smi_wait_ready(struct mvgbe_device *dmvgbe)
46{
47 int ret;
48
49 ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
50 MVGBE_PHY_SMI_TIMEOUT_MS, false);
51 if (ret) {
52 printf("Error: SMI busy timeout\n");
53 return ret;
54 }
55
56 return 0;
57}
58
Chris Packhame9bf75c2018-07-09 21:33:59 +120059static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
60 int devad, int reg_ofs)
Prafulla Wadaskar91315892009-06-14 22:33:46 +053061{
Albert Aribaudd44265a2010-07-12 22:24:28 +020062 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskar91315892009-06-14 22:33:46 +053063 u32 smi_reg;
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +020064 u32 timeout;
Chris Packhame9bf75c2018-07-09 21:33:59 +120065 u16 data = 0;
Prafulla Wadaskar91315892009-06-14 22:33:46 +053066
67 /* Phyadr read request */
Albert Aribaudd44265a2010-07-12 22:24:28 +020068 if (phy_adr == MV_PHY_ADR_REQUEST &&
69 reg_ofs == MV_PHY_ADR_REQUEST) {
Prafulla Wadaskar91315892009-06-14 22:33:46 +053070 /* */
Joe Hershberger5a49f172016-08-08 11:28:38 -050071 data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
72 return data;
Prafulla Wadaskar91315892009-06-14 22:33:46 +053073 }
74 /* check parameters */
75 if (phy_adr > PHYADR_MASK) {
76 printf("Err..(%s) Invalid PHY address %d\n",
Joe Hershberger1fd92db2015-04-08 01:41:06 -050077 __func__, phy_adr);
Prafulla Wadaskar91315892009-06-14 22:33:46 +053078 return -EFAULT;
79 }
80 if (reg_ofs > PHYREG_MASK) {
81 printf("Err..(%s) Invalid register offset %d\n",
Joe Hershberger1fd92db2015-04-08 01:41:06 -050082 __func__, reg_ofs);
Prafulla Wadaskar91315892009-06-14 22:33:46 +053083 return -EFAULT;
84 }
85
Prafulla Wadaskar91315892009-06-14 22:33:46 +053086 /* wait till the SMI is not busy */
Chris Packham5194ed72018-06-09 20:46:16 +120087 if (smi_wait_ready(dmvgbe) < 0)
88 return -EFAULT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +053089
90 /* fill the phy address and regiser offset and read opcode */
Albert Aribaudd44265a2010-07-12 22:24:28 +020091 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
92 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
93 | MVGBE_PHY_SMI_OPCODE_READ;
Prafulla Wadaskar91315892009-06-14 22:33:46 +053094
95 /* write the smi register */
Albert Aribaudd44265a2010-07-12 22:24:28 +020096 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
Prafulla Wadaskar91315892009-06-14 22:33:46 +053097
98 /*wait till read value is ready */
Albert Aribaudd44265a2010-07-12 22:24:28 +020099 timeout = MVGBE_PHY_SMI_TIMEOUT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530100
101 do {
102 /* read smi register */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200103 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530104 if (timeout-- == 0) {
105 printf("Err..(%s) SMI read ready timeout\n",
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500106 __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530107 return -EFAULT;
108 }
Albert Aribaudd44265a2010-07-12 22:24:28 +0200109 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530110
111 /* Wait for the data to update in the SMI register */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200112 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
113 ;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530114
Joe Hershberger5a49f172016-08-08 11:28:38 -0500115 data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530116
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500117 debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
Joe Hershberger5a49f172016-08-08 11:28:38 -0500118 data);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530119
Joe Hershberger5a49f172016-08-08 11:28:38 -0500120 return data;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530121}
122
123/*
Chris Packhame9bf75c2018-07-09 21:33:59 +1200124 * smi_reg_read - miiphy_read callback function.
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530125 *
Chris Packhame9bf75c2018-07-09 21:33:59 +1200126 * Returns 16bit phy register value, or -EFAULT on error
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530127 */
Chris Packhame9bf75c2018-07-09 21:33:59 +1200128static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
129 int reg_ofs)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530130{
Chris Packhamfb731072018-07-09 21:34:00 +1200131 struct mvgbe_device *dmvgbe = bus->priv;
Chris Packhame9bf75c2018-07-09 21:33:59 +1200132
133 return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
134}
135
136static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
137 int devad, int reg_ofs, u16 data)
138{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200139 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530140 u32 smi_reg;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530141
142 /* Phyadr write request*/
Albert Aribaudd44265a2010-07-12 22:24:28 +0200143 if (phy_adr == MV_PHY_ADR_REQUEST &&
144 reg_ofs == MV_PHY_ADR_REQUEST) {
145 MVGBE_REG_WR(regs->phyadr, data);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530146 return 0;
147 }
148
149 /* check parameters */
150 if (phy_adr > PHYADR_MASK) {
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500151 printf("Err..(%s) Invalid phy address\n", __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530152 return -EINVAL;
153 }
154 if (reg_ofs > PHYREG_MASK) {
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500155 printf("Err..(%s) Invalid register offset\n", __func__);
Chris Packham5194ed72018-06-09 20:46:16 +1200156 return -EFAULT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530157 }
158
159 /* wait till the SMI is not busy */
Chris Packham5194ed72018-06-09 20:46:16 +1200160 if (smi_wait_ready(dmvgbe) < 0)
161 return -EFAULT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530162
163 /* fill the phy addr and reg offset and write opcode and data */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200164 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
165 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
166 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
167 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530168
169 /* write the smi register */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200170 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530171
172 return 0;
173}
Chris Packhame9bf75c2018-07-09 21:33:59 +1200174
175/*
176 * smi_reg_write - miiphy_write callback function.
177 *
178 * Returns 0 if write succeed, -EFAULT on error
179 */
180static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
181 int reg_ofs, u16 data)
182{
Chris Packhamfb731072018-07-09 21:34:00 +1200183 struct mvgbe_device *dmvgbe = bus->priv;
Chris Packhame9bf75c2018-07-09 21:33:59 +1200184
185 return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
186}
Stefan Biglercc796972012-03-26 00:02:13 +0000187#endif
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530188
189/* Stop and checks all queues */
190static void stop_queue(u32 * qreg)
191{
192 u32 reg_data;
193
194 reg_data = readl(qreg);
195
196 if (reg_data & 0xFF) {
197 /* Issue stop command for active channels only */
198 writel((reg_data << 8), qreg);
199
200 /* Wait for all queue activity to terminate. */
201 do {
202 /*
203 * Check port cause register that all queues
204 * are stopped
205 */
206 reg_data = readl(qreg);
207 }
208 while (reg_data & 0xFF);
209 }
210}
211
212/*
213 * set_access_control - Config address decode parameters for Ethernet unit
214 *
215 * This function configures the address decode parameters for the Gigabit
216 * Ethernet Controller according the given parameters struct.
217 *
218 * @regs Register struct pointer.
219 * @param Address decode parameter struct.
220 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200221static void set_access_control(struct mvgbe_registers *regs,
222 struct mvgbe_winparam *param)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530223{
224 u32 access_prot_reg;
225
226 /* Set access control register */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200227 access_prot_reg = MVGBE_REG_RD(regs->epap);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530228 /* clear window permission */
229 access_prot_reg &= (~(3 << (param->win * 2)));
230 access_prot_reg |= (param->access_ctrl << (param->win * 2));
Albert Aribaudd44265a2010-07-12 22:24:28 +0200231 MVGBE_REG_WR(regs->epap, access_prot_reg);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530232
233 /* Set window Size reg (SR) */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200234 MVGBE_REG_WR(regs->barsz[param->win].size,
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530235 (((param->size / 0x10000) - 1) << 16));
236
237 /* Set window Base address reg (BA) */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200238 MVGBE_REG_WR(regs->barsz[param->win].bar,
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530239 (param->target | param->attrib | param->base_addr));
240 /* High address remap reg (HARR) */
241 if (param->win < 4)
Albert Aribaudd44265a2010-07-12 22:24:28 +0200242 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530243
244 /* Base address enable reg (BARER) */
245 if (param->enable == 1)
Albert Aribaudd44265a2010-07-12 22:24:28 +0200246 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530247 else
Albert Aribaudd44265a2010-07-12 22:24:28 +0200248 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530249}
250
Albert Aribaudd44265a2010-07-12 22:24:28 +0200251static void set_dram_access(struct mvgbe_registers *regs)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530252{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200253 struct mvgbe_winparam win_param;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530254 int i;
255
256 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
257 /* Set access parameters for DRAM bank i */
258 win_param.win = i; /* Use Ethernet window i */
259 /* Window target - DDR */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200260 win_param.target = MVGBE_TARGET_DRAM;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530261 /* Enable full access */
262 win_param.access_ctrl = EWIN_ACCESS_FULL;
263 win_param.high_addr = 0;
Albert Aribaud49fa6ed2010-07-05 20:15:25 +0200264 /* Get bank base and size */
265 win_param.base_addr = gd->bd->bi_dram[i].start;
266 win_param.size = gd->bd->bi_dram[i].size;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530267 if (win_param.size == 0)
268 win_param.enable = 0;
269 else
270 win_param.enable = 1; /* Enable the access */
271
272 /* Enable DRAM bank */
273 switch (i) {
274 case 0:
275 win_param.attrib = EBAR_DRAM_CS0;
276 break;
277 case 1:
278 win_param.attrib = EBAR_DRAM_CS1;
279 break;
280 case 2:
281 win_param.attrib = EBAR_DRAM_CS2;
282 break;
283 case 3:
284 win_param.attrib = EBAR_DRAM_CS3;
285 break;
286 default:
Albert Aribaud49fa6ed2010-07-05 20:15:25 +0200287 /* invalid bank, disable access */
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530288 win_param.enable = 0;
289 win_param.attrib = 0;
290 break;
291 }
292 /* Set the access control for address window(EPAPR) RD/WR */
293 set_access_control(regs, &win_param);
294 }
295}
296
297/*
298 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
299 *
300 * Go through all the DA filter tables (Unicast, Special Multicast & Other
301 * Multicast) and set each entry to 0.
302 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200303static void port_init_mac_tables(struct mvgbe_registers *regs)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530304{
305 int table_index;
306
307 /* Clear DA filter unicast table (Ex_dFUT) */
308 for (table_index = 0; table_index < 4; ++table_index)
Albert Aribaudd44265a2010-07-12 22:24:28 +0200309 MVGBE_REG_WR(regs->dfut[table_index], 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530310
311 for (table_index = 0; table_index < 64; ++table_index) {
312 /* Clear DA filter special multicast table (Ex_dFSMT) */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200313 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530314 /* Clear DA filter other multicast table (Ex_dFOMT) */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200315 MVGBE_REG_WR(regs->dfomt[table_index], 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530316 }
317}
318
319/*
320 * port_uc_addr - This function Set the port unicast address table
321 *
322 * This function locates the proper entry in the Unicast table for the
323 * specified MAC nibble and sets its properties according to function
324 * parameters.
325 * This function add/removes MAC addresses from the port unicast address
326 * table.
327 *
328 * @uc_nibble Unicast MAC Address last nibble.
329 * @option 0 = Add, 1 = remove address.
330 *
331 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
332 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200333static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530334 int option)
335{
336 u32 unicast_reg;
337 u32 tbl_offset;
338 u32 reg_offset;
339
340 /* Locate the Unicast table entry */
341 uc_nibble = (0xf & uc_nibble);
342 /* Register offset from unicast table base */
343 tbl_offset = (uc_nibble / 4);
344 /* Entry offset within the above register */
345 reg_offset = uc_nibble % 4;
346
347 switch (option) {
348 case REJECT_MAC_ADDR:
349 /*
350 * Clear accepts frame bit at specified unicast
351 * DA table entry
352 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200353 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530354 unicast_reg &= (0xFF << (8 * reg_offset));
Albert Aribaudd44265a2010-07-12 22:24:28 +0200355 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530356 break;
357 case ACCEPT_MAC_ADDR:
358 /* Set accepts frame bit at unicast DA filter table entry */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200359 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530360 unicast_reg &= (0xFF << (8 * reg_offset));
361 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
Albert Aribaudd44265a2010-07-12 22:24:28 +0200362 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530363 break;
364 default:
365 return 0;
366 }
367 return 1;
368}
369
370/*
371 * port_uc_addr_set - This function Set the port Unicast address.
372 */
Chris Packhame9bf75c2018-07-09 21:33:59 +1200373static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530374{
Chris Packhame9bf75c2018-07-09 21:33:59 +1200375 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530376 u32 mac_h;
377 u32 mac_l;
378
379 mac_l = (p_addr[4] << 8) | (p_addr[5]);
380 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
381 (p_addr[3] << 0);
382
Albert Aribaudd44265a2010-07-12 22:24:28 +0200383 MVGBE_REG_WR(regs->macal, mac_l);
384 MVGBE_REG_WR(regs->macah, mac_h);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530385
386 /* Accept frames of this address */
387 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
388}
389
390/*
Albert Aribaudd44265a2010-07-12 22:24:28 +0200391 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530392 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200393static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530394{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200395 struct mvgbe_rxdesc *p_rx_desc;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530396 int i;
397
398 /* initialize the Rx descriptors ring */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200399 p_rx_desc = dmvgbe->p_rxdesc;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530400 for (i = 0; i < RINGSZ; i++) {
401 p_rx_desc->cmd_sts =
Albert Aribaudd44265a2010-07-12 22:24:28 +0200402 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530403 p_rx_desc->buf_size = PKTSIZE_ALIGN;
404 p_rx_desc->byte_cnt = 0;
Albert Aribaudd44265a2010-07-12 22:24:28 +0200405 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530406 if (i == (RINGSZ - 1))
Albert Aribaudd44265a2010-07-12 22:24:28 +0200407 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530408 else {
Albert Aribaudd44265a2010-07-12 22:24:28 +0200409 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
410 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530411 p_rx_desc = p_rx_desc->nxtdesc_p;
412 }
413 }
Albert Aribaudd44265a2010-07-12 22:24:28 +0200414 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530415}
416
Chris Packhamfb731072018-07-09 21:34:00 +1200417static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
418 const char *name)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530419{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200420 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530421 /* setup RX rings */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200422 mvgbe_init_rx_desc_ring(dmvgbe);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530423
424 /* Clear the ethernet port interrupts */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200425 MVGBE_REG_WR(regs->ic, 0);
426 MVGBE_REG_WR(regs->ice, 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530427 /* Unmask RX buffer and TX end interrupt */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200428 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530429 /* Unmask phy and link status changes interrupts */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200430 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530431
432 set_dram_access(regs);
433 port_init_mac_tables(regs);
Chris Packhamfb731072018-07-09 21:34:00 +1200434 port_uc_addr_set(dmvgbe, enetaddr);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530435
436 /* Assign port configuration and command. */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200437 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
438 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
439 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530440
441 /* Assign port SDMA configuration */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200442 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
443 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
444 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
445 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530446 /* Turn off the port/RXUQ bandwidth limitation */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200447 MVGBE_REG_WR(regs->pmtu, 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530448
449 /* Set maximum receive buffer to 9700 bytes */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200450 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
451 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530452
Prafulla Wadaskarf0588fd2010-04-06 21:33:08 +0530453 /* Enable port initially */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200454 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
Prafulla Wadaskarf0588fd2010-04-06 21:33:08 +0530455
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530456 /*
457 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
458 * disable the leaky bucket mechanism .
459 */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200460 MVGBE_REG_WR(regs->pmtu, 0);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530461
462 /* Assignment of Rx CRDB of given RXUQ */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200463 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
Albert Aribaudc19a20d2010-07-10 15:41:29 +0200464 /* ensure previous write is done before enabling Rx DMA */
465 isb();
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530466 /* Enable port Rx. */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200467 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530468
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530469 return 0;
470}
471
Chris Packhame9bf75c2018-07-09 21:33:59 +1200472static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
473{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200474 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530475
476 /* Disable all gigE address decoder */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200477 MVGBE_REG_WR(regs->bare, 0x3f);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530478
479 stop_queue(&regs->tqc);
480 stop_queue(&regs->rqc);
481
Prafulla Wadaskarf0588fd2010-04-06 21:33:08 +0530482 /* Disable port */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200483 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530484 /* Set port is not reset */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200485 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530486#ifdef CONFIG_SYS_MII_MODE
487 /* Set MMI interface up */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200488 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530489#endif
490 /* Disable & mask ethernet port interrupts */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200491 MVGBE_REG_WR(regs->ic, 0);
492 MVGBE_REG_WR(regs->ice, 0);
493 MVGBE_REG_WR(regs->pim, 0);
494 MVGBE_REG_WR(regs->peim, 0);
Chris Packhame9bf75c2018-07-09 21:33:59 +1200495}
496
Chris Packhamfb731072018-07-09 21:34:00 +1200497static int mvgbe_write_hwaddr(struct udevice *dev)
498{
Simon Glassc69cda22020-12-03 16:55:20 -0700499 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200500
501 port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
502
503 return 0;
504}
Prafulla Wadaskarb5ce63e2010-04-06 22:21:33 +0530505
Chris Packhame9bf75c2018-07-09 21:33:59 +1200506static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
507 int datasize)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530508{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200509 struct mvgbe_registers *regs = dmvgbe->regs;
510 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
Simon Kagstrom477fa632009-08-20 10:14:11 +0200511 void *p = (void *)dataptr;
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200512 u32 cmd_sts;
Anatolij Gustschine6e556c2011-11-19 08:59:36 +0000513 u32 txuq0_reg_addr;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530514
Simon Kagstrom477fa632009-08-20 10:14:11 +0200515 /* Copy buffer if it's misaligned */
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530516 if ((u32) dataptr & 0x07) {
Simon Kagstrom477fa632009-08-20 10:14:11 +0200517 if (datasize > PKTSIZE_ALIGN) {
518 printf("Non-aligned data too large (%d)\n",
519 datasize);
520 return -1;
521 }
522
Albert Aribaudd44265a2010-07-12 22:24:28 +0200523 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
524 p = dmvgbe->p_aligned_txbuf;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530525 }
Simon Kagstrom477fa632009-08-20 10:14:11 +0200526
Albert Aribaudd44265a2010-07-12 22:24:28 +0200527 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
528 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
529 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
530 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
Simon Kagstrom477fa632009-08-20 10:14:11 +0200531 p_txdesc->buf_ptr = (u8 *) p;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530532 p_txdesc->byte_cnt = datasize;
533
Albert Aribaudc19a20d2010-07-10 15:41:29 +0200534 /* Set this tc desc as zeroth TXUQ */
Anatolij Gustschine6e556c2011-11-19 08:59:36 +0000535 txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
536 writel((u32) p_txdesc, txuq0_reg_addr);
Albert Aribaudc19a20d2010-07-10 15:41:29 +0200537
538 /* ensure tx desc writes above are performed before we start Tx DMA */
539 isb();
540
541 /* Apply send command using zeroth TXUQ */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200542 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530543
544 /*
545 * wait for packet xmit completion
546 */
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200547 cmd_sts = readl(&p_txdesc->cmd_sts);
Albert Aribaudd44265a2010-07-12 22:24:28 +0200548 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530549 /* return fail if error is detected */
Albert Aribaudd44265a2010-07-12 22:24:28 +0200550 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
551 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
552 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500553 printf("Err..(%s) in xmit packet\n", __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530554 return -1;
555 }
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200556 cmd_sts = readl(&p_txdesc->cmd_sts);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530557 };
558 return 0;
559}
560
Chris Packhame9bf75c2018-07-09 21:33:59 +1200561static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
562{
Albert Aribaudd44265a2010-07-12 22:24:28 +0200563 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200564 u32 cmd_sts;
565 u32 timeout = 0;
Anatolij Gustschine6e556c2011-11-19 08:59:36 +0000566 u32 rxdesc_curr_addr;
Chris Packhame9bf75c2018-07-09 21:33:59 +1200567 unsigned char *data;
568 int rx_bytes = 0;
569
570 *packetp = NULL;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530571
572 /* wait untill rx packet available or timeout */
573 do {
Albert Aribaudd44265a2010-07-12 22:24:28 +0200574 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530575 timeout++;
576 else {
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500577 debug("%s time out...\n", __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530578 return -1;
579 }
Albert Aribaudd44265a2010-07-12 22:24:28 +0200580 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530581
582 if (p_rxdesc_curr->byte_cnt != 0) {
583 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500584 __func__, (u32) p_rxdesc_curr->byte_cnt,
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530585 (u32) p_rxdesc_curr->buf_ptr,
586 (u32) p_rxdesc_curr->cmd_sts);
587 }
588
589 /*
590 * In case received a packet without first/last bits on
591 * OR the error summary bit is on,
592 * the packets needs to be dropeed.
593 */
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200594 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
595
596 if ((cmd_sts &
Albert Aribaudd44265a2010-07-12 22:24:28 +0200597 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
598 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530599
600 printf("Err..(%s) Dropping packet spread on"
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500601 " multiple descriptors\n", __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530602
Albert Aribaudd44265a2010-07-12 22:24:28 +0200603 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530604
605 printf("Err..(%s) Dropping packet with errors\n",
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500606 __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530607
608 } else {
609 /* !!! call higher layer processing */
610 debug("%s: Sending Received packet to"
Joe Hershberger1fd92db2015-04-08 01:41:06 -0500611 " upper layer (net_process_received_packet)\n",
612 __func__);
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530613
Chris Packhame9bf75c2018-07-09 21:33:59 +1200614 data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
615 rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
616 RX_BUF_OFFSET);
617
618 *packetp = data;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530619 }
620 /*
621 * free these descriptors and point next in the ring
622 */
623 p_rxdesc_curr->cmd_sts =
Albert Aribaudd44265a2010-07-12 22:24:28 +0200624 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
Prafulla Wadaskar91315892009-06-14 22:33:46 +0530625 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
626 p_rxdesc_curr->byte_cnt = 0;
627
Anatolij Gustschine6e556c2011-11-19 08:59:36 +0000628 rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
629 writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
Simon Kagstrom7b05f5e2009-07-08 13:03:18 +0200630
Chris Packhame9bf75c2018-07-09 21:33:59 +1200631 return rx_bytes;
632}
633
Tom Rinic7f15a32022-11-27 10:25:17 -0500634#if defined(CONFIG_PHYLIB)
Chris Packhamfb731072018-07-09 21:34:00 +1200635static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
636 struct mii_dev *bus,
637 phy_interface_t phy_interface,
638 int phyid)
Chris Packhamfb731072018-07-09 21:34:00 +1200639{
640 struct phy_device *phydev;
641
642 /* Set phy address of the port */
643 miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
644 phyid);
645
Tony Dinhf0f98752022-04-12 13:18:19 -0700646 /* Make sure the selected PHY page is 0 before connecting */
647 miiphy_write(dev->name, phyid, MVGBE_PGADR_REG, 0);
648
Chris Packhamfb731072018-07-09 21:34:00 +1200649 phydev = phy_connect(bus, phyid, dev, phy_interface);
650 if (!phydev) {
651 printf("phy_connect failed\n");
652 return NULL;
653 }
654
655 phy_config(phydev);
656 phy_startup(phydev);
657
658 return phydev;
659}
Tom Rinic7f15a32022-11-27 10:25:17 -0500660#endif /* CONFIG_PHYLIB */
Sebastian Hesselbarthcd3ca3f2012-12-04 09:32:00 +0100661
Chris Packhamfb731072018-07-09 21:34:00 +1200662static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
663{
664 dmvgbe->p_rxdesc = memalign(PKTALIGN,
665 MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
666 if (!dmvgbe->p_rxdesc)
667 goto error1;
668
669 dmvgbe->p_rxbuf = memalign(PKTALIGN,
670 RINGSZ * PKTSIZE_ALIGN + 1);
671 if (!dmvgbe->p_rxbuf)
672 goto error2;
673
674 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
675 if (!dmvgbe->p_aligned_txbuf)
676 goto error3;
677
678 dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
679 if (!dmvgbe->p_txdesc)
680 goto error4;
681
682 return 0;
683
684error4:
685 free(dmvgbe->p_aligned_txbuf);
686error3:
687 free(dmvgbe->p_rxbuf);
688error2:
689 free(dmvgbe->p_rxdesc);
690error1:
691 return -ENOMEM;
692}
693
Chris Packhamfb731072018-07-09 21:34:00 +1200694static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
695{
696 return dmvgbe->phyaddr > PHY_MAX_ADDR;
697}
698
699static int mvgbe_start(struct udevice *dev)
700{
Simon Glassc69cda22020-12-03 16:55:20 -0700701 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200702 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
703 int ret;
704
705 ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
706 if (ret)
707 return ret;
708
709 if (!mvgbe_port_is_fixed_link(dmvgbe)) {
710 dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
711 dmvgbe->phy_interface,
712 dmvgbe->phyaddr);
713 if (!dmvgbe->phydev)
714 return -ENODEV;
715 }
716
717 return 0;
718}
719
720static int mvgbe_send(struct udevice *dev, void *packet, int length)
721{
722 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
723
724 return __mvgbe_send(dmvgbe, packet, length);
725}
726
727static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
728{
729 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
730
731 return __mvgbe_recv(dmvgbe, packetp);
732}
733
734static void mvgbe_stop(struct udevice *dev)
735{
736 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
737
738 __mvgbe_halt(dmvgbe);
739}
740
741static int mvgbe_probe(struct udevice *dev)
742{
Simon Glassc69cda22020-12-03 16:55:20 -0700743 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200744 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
745 struct mii_dev *bus;
746 int ret;
747
748 ret = mvgbe_alloc_buffers(dmvgbe);
749 if (ret)
750 return ret;
751
752 dmvgbe->regs = (void __iomem *)pdata->iobase;
753
754 bus = mdio_alloc();
755 if (!bus) {
756 printf("Failed to allocate MDIO bus\n");
757 return -ENOMEM;
758 }
759
760 bus->read = smi_reg_read;
761 bus->write = smi_reg_write;
762 snprintf(bus->name, sizeof(bus->name), dev->name);
763 bus->priv = dmvgbe;
764 dmvgbe->bus = bus;
765
766 ret = mdio_register(bus);
767 if (ret < 0)
768 return ret;
769
770 return 0;
771}
772
773static const struct eth_ops mvgbe_ops = {
774 .start = mvgbe_start,
775 .send = mvgbe_send,
776 .recv = mvgbe_recv,
777 .stop = mvgbe_stop,
778 .write_hwaddr = mvgbe_write_hwaddr,
779};
780
Simon Glassd1998a92020-12-03 16:55:21 -0700781static int mvgbe_of_to_plat(struct udevice *dev)
Chris Packhamfb731072018-07-09 21:34:00 +1200782{
Simon Glassc69cda22020-12-03 16:55:20 -0700783 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200784 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
785 void *blob = (void *)gd->fdt_blob;
786 int node = dev_of_offset(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200787 int fl_node;
788 int pnode;
789 unsigned long addr;
790
Masahiro Yamada25484932020-07-17 14:36:48 +0900791 pdata->iobase = dev_read_addr(dev);
Chris Packhamfb731072018-07-09 21:34:00 +1200792 pdata->phy_interface = -1;
793
794 pnode = fdt_node_offset_by_compatible(blob, node,
795 "marvell,kirkwood-eth-port");
796
797 /* Get phy-mode / phy_interface from DT */
Marek BehĂșn123ca112022-04-07 00:33:01 +0200798 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșnffb0f6f2022-04-07 00:33:03 +0200799 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Chris Packham92f129f2018-12-04 19:54:30 +1300800 pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
Chris Packhamfb731072018-07-09 21:34:00 +1200801
802 dmvgbe->phy_interface = pdata->phy_interface;
803
804 /* fetch 'fixed-link' property */
805 fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
806 if (fl_node != -FDT_ERR_NOTFOUND) {
807 /* set phy_addr to invalid value for fixed link */
808 dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
809 dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
810 dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
811 } else {
812 /* Now read phyaddr from DT */
813 addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
814 if (addr > 0)
815 dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
816 }
817
818 return 0;
819}
820
821static const struct udevice_id mvgbe_ids[] = {
822 { .compatible = "marvell,kirkwood-eth" },
823 { }
824};
825
826U_BOOT_DRIVER(mvgbe) = {
827 .name = "mvgbe",
828 .id = UCLASS_ETH,
829 .of_match = mvgbe_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700830 .of_to_plat = mvgbe_of_to_plat,
Chris Packhamfb731072018-07-09 21:34:00 +1200831 .probe = mvgbe_probe,
832 .ops = &mvgbe_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700833 .priv_auto = sizeof(struct mvgbe_device),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700834 .plat_auto = sizeof(struct eth_pdata),
Chris Packhamfb731072018-07-09 21:34:00 +1200835};