blob: 033efb819569d75bc728386b9f9892fc16f263ac [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek185f7d92012-09-13 20:23:34 +00002/*
3 * (C) Copyright 2011 Michal Simek
4 *
5 * Michal SIMEK <monstr@monstr.eu>
6 *
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
Michal Simek185f7d92012-09-13 20:23:34 +00009 */
10
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +053011#include <clk.h>
Michal Simek185f7d92012-09-13 20:23:34 +000012#include <common.h>
Michal Simek6889ca72015-11-30 14:14:56 +010013#include <dm.h>
Michal Simek185f7d92012-09-13 20:23:34 +000014#include <net.h>
Michal Simek2fd24892014-04-25 14:17:38 +020015#include <netdev.h>
Michal Simek185f7d92012-09-13 20:23:34 +000016#include <config.h>
Michal Simekb8de29f2015-09-24 20:13:45 +020017#include <console.h>
Michal Simek185f7d92012-09-13 20:23:34 +000018#include <malloc.h>
19#include <asm/io.h>
20#include <phy.h>
21#include <miiphy.h>
Mateusz Kulikowskie7138b32016-01-23 11:54:33 +010022#include <wait_bit.h>
Michal Simek185f7d92012-09-13 20:23:34 +000023#include <watchdog.h>
Siva Durga Prasad Paladugu96f4f142014-12-06 12:57:53 +053024#include <asm/system.h>
David Andrey01fbf312013-04-05 17:24:24 +020025#include <asm/arch/hardware.h>
Michal Simek80243522012-10-15 14:01:23 +020026#include <asm/arch/sys_proto.h>
Masahiro Yamada5d97dff2016-09-21 11:28:57 +090027#include <linux/errno.h>
Michal Simek185f7d92012-09-13 20:23:34 +000028
Michal Simek6889ca72015-11-30 14:14:56 +010029DECLARE_GLOBAL_DATA_PTR;
30
Michal Simek185f7d92012-09-13 20:23:34 +000031/* Bit/mask specification */
32#define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
33#define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
34#define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
35#define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
36#define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
37
38#define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
39#define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
40#define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
41
42#define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
43#define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
44#define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
45
46/* Wrap bit, last descriptor */
47#define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
48#define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
Michal Simek23a598f2015-08-17 09:58:54 +020049#define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
Michal Simek185f7d92012-09-13 20:23:34 +000050
Michal Simek185f7d92012-09-13 20:23:34 +000051#define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
52#define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
53#define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
54#define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
55
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053056#define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
57#define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
58#define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
59#define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
Siva Durga Prasad Paladugu4eaf8f52016-05-16 15:31:38 +053060#define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053061#define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
Michal Simekf17ea712015-09-08 17:20:01 +020062#ifdef CONFIG_ARM64
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053063#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
Michal Simekf17ea712015-09-08 17:20:01 +020064#else
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053065#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
Michal Simekf17ea712015-09-08 17:20:01 +020066#endif
Michal Simek185f7d92012-09-13 20:23:34 +000067
Siva Durga Prasad Paladugu8a584c82014-07-08 15:31:03 +053068#ifdef CONFIG_ARM64
69# define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
70#else
71# define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
72#endif
73
74#define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
75 ZYNQ_GEM_NWCFG_FDEN | \
Michal Simek185f7d92012-09-13 20:23:34 +000076 ZYNQ_GEM_NWCFG_FSREM | \
77 ZYNQ_GEM_NWCFG_MDCCLKDIV)
78
79#define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
80
81#define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
82/* Use full configured addressable space (8 Kb) */
83#define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
84/* Use full configured addressable space (4 Kb) */
85#define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
86/* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
87#define ZYNQ_GEM_DMACR_RXBUF 0x00180000
88
Vipul Kumar9a7799f2018-11-26 16:27:38 +053089#if defined(CONFIG_PHYS_64BIT)
90# define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
91#else
92# define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
93#endif
94
Michal Simek185f7d92012-09-13 20:23:34 +000095#define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
96 ZYNQ_GEM_DMACR_RXSIZE | \
97 ZYNQ_GEM_DMACR_TXSIZE | \
Vipul Kumar9a7799f2018-11-26 16:27:38 +053098 ZYNQ_GEM_DMACR_RXBUF | \
99 ZYNQ_GEM_DMA_BUS_WIDTH)
Michal Simek185f7d92012-09-13 20:23:34 +0000100
Michal Simeke4d23182015-08-17 09:57:46 +0200101#define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
102
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530103#define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
104
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530105#define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
106
Michal Simekf97d7e82013-04-22 14:41:09 +0200107/* Use MII register 1 (MII status register) to detect PHY */
108#define PHY_DETECT_REG 1
109
110/* Mask used to verify certain PHY features (or register contents)
111 * in the register above:
112 * 0x1000: 10Mbps full duplex support
113 * 0x0800: 10Mbps half duplex support
114 * 0x0008: Auto-negotiation support
115 */
116#define PHY_DETECT_MASK 0x1808
117
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530118/* TX BD status masks */
119#define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
120#define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
121#define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
122
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800123/* Clock frequencies for different speeds */
124#define ZYNQ_GEM_FREQUENCY_10 2500000UL
125#define ZYNQ_GEM_FREQUENCY_100 25000000UL
126#define ZYNQ_GEM_FREQUENCY_1000 125000000UL
127
Michal Simek185f7d92012-09-13 20:23:34 +0000128/* Device registers */
129struct zynq_gem_regs {
Michal Simek97a51a02015-10-05 11:49:43 +0200130 u32 nwctrl; /* 0x0 - Network Control reg */
131 u32 nwcfg; /* 0x4 - Network Config reg */
132 u32 nwsr; /* 0x8 - Network Status reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000133 u32 reserved1;
Michal Simek97a51a02015-10-05 11:49:43 +0200134 u32 dmacr; /* 0x10 - DMA Control reg */
135 u32 txsr; /* 0x14 - TX Status reg */
136 u32 rxqbase; /* 0x18 - RX Q Base address reg */
137 u32 txqbase; /* 0x1c - TX Q Base address reg */
138 u32 rxsr; /* 0x20 - RX Status reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000139 u32 reserved2[2];
Michal Simek97a51a02015-10-05 11:49:43 +0200140 u32 idr; /* 0x2c - Interrupt Disable reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000141 u32 reserved3;
Michal Simek97a51a02015-10-05 11:49:43 +0200142 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000143 u32 reserved4[18];
Michal Simek97a51a02015-10-05 11:49:43 +0200144 u32 hashl; /* 0x80 - Hash Low address reg */
145 u32 hashh; /* 0x84 - Hash High address reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000146#define LADDR_LOW 0
147#define LADDR_HIGH 1
Michal Simek97a51a02015-10-05 11:49:43 +0200148 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
149 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000150 u32 reserved6[18];
Michal Simek0ebf4042015-10-05 12:49:48 +0200151#define STAT_SIZE 44
152 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530153 u32 reserved9[20];
154 u32 pcscntrl;
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530155 u32 rserved12[36];
156 u32 dcfg6; /* 0x294 Design config reg6 */
157 u32 reserved7[106];
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700158 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
159 u32 reserved8[15];
160 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530161 u32 reserved10[17];
162 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
163 u32 reserved11[2];
164 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
Michal Simek185f7d92012-09-13 20:23:34 +0000165};
166
167/* BD descriptors */
168struct emac_bd {
169 u32 addr; /* Next descriptor pointer */
170 u32 status;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530171#if defined(CONFIG_PHYS_64BIT)
172 u32 addr_hi;
173 u32 reserved;
174#endif
Michal Simek185f7d92012-09-13 20:23:34 +0000175};
176
Siva Durga Prasad Paladugueda9d302015-04-15 12:15:01 +0530177#define RX_BUF 32
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530178/* Page table entries are set to 1MB, or multiples of 1MB
179 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
180 */
181#define BD_SPACE 0x100000
182/* BD separation space */
Michal Simekff475872015-08-17 09:45:53 +0200183#define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
Michal Simek185f7d92012-09-13 20:23:34 +0000184
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700185/* Setup the first free TX descriptor */
186#define TX_FREE_DESC 2
187
Michal Simek185f7d92012-09-13 20:23:34 +0000188/* Initialized, rxbd_current, rx_first_buf must be 0 after init */
189struct zynq_gem_priv {
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530190 struct emac_bd *tx_bd;
191 struct emac_bd *rx_bd;
192 char *rxbuffers;
Michal Simek185f7d92012-09-13 20:23:34 +0000193 u32 rxbd_current;
194 u32 rx_first_buf;
195 int phyaddr;
Michal Simek05868752013-01-24 13:04:12 +0100196 int init;
Michal Simekf2fc2762015-11-30 10:24:15 +0100197 struct zynq_gem_regs *iobase;
Michal Simek16ce6de2015-10-07 16:42:56 +0200198 phy_interface_t interface;
Michal Simek185f7d92012-09-13 20:23:34 +0000199 struct phy_device *phydev;
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530200 ofnode phy_of_node;
Michal Simek185f7d92012-09-13 20:23:34 +0000201 struct mii_dev *bus;
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530202 struct clk clk;
Siva Durga Prasad Paladugu69065e82018-04-12 12:22:17 +0200203 u32 max_speed;
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530204 bool int_pcs;
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530205 bool dma_64bit;
Michal Simek185f7d92012-09-13 20:23:34 +0000206};
207
Michal Simekb33d4a52018-06-13 10:00:30 +0200208static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
Michal Simekf2fc2762015-11-30 10:24:15 +0100209 u32 op, u16 *data)
Michal Simek185f7d92012-09-13 20:23:34 +0000210{
211 u32 mgtcr;
Michal Simekf2fc2762015-11-30 10:24:15 +0100212 struct zynq_gem_regs *regs = priv->iobase;
Michal Simekb908fca2016-12-12 09:47:26 +0100213 int err;
Michal Simek185f7d92012-09-13 20:23:34 +0000214
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100215 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
216 true, 20000, false);
Michal Simekb908fca2016-12-12 09:47:26 +0100217 if (err)
218 return err;
Michal Simek185f7d92012-09-13 20:23:34 +0000219
220 /* Construct mgtcr mask for the operation */
221 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
222 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
223 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
224
225 /* Write mgtcr and wait for completion */
226 writel(mgtcr, &regs->phymntnc);
227
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100228 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
229 true, 20000, false);
Michal Simekb908fca2016-12-12 09:47:26 +0100230 if (err)
231 return err;
Michal Simek185f7d92012-09-13 20:23:34 +0000232
233 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
234 *data = readl(&regs->phymntnc);
235
236 return 0;
237}
238
Michal Simekb33d4a52018-06-13 10:00:30 +0200239static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simekf2fc2762015-11-30 10:24:15 +0100240 u32 regnum, u16 *val)
Michal Simek185f7d92012-09-13 20:23:34 +0000241{
Michal Simekb33d4a52018-06-13 10:00:30 +0200242 int ret;
Michal Simek198e9a42015-10-07 16:34:51 +0200243
Michal Simekf2fc2762015-11-30 10:24:15 +0100244 ret = phy_setup_op(priv, phy_addr, regnum,
245 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
Michal Simek198e9a42015-10-07 16:34:51 +0200246
247 if (!ret)
248 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
249 phy_addr, regnum, *val);
250
251 return ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000252}
253
Michal Simekb33d4a52018-06-13 10:00:30 +0200254static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simekf2fc2762015-11-30 10:24:15 +0100255 u32 regnum, u16 data)
Michal Simek185f7d92012-09-13 20:23:34 +0000256{
Michal Simek198e9a42015-10-07 16:34:51 +0200257 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
258 regnum, data);
259
Michal Simekf2fc2762015-11-30 10:24:15 +0100260 return phy_setup_op(priv, phy_addr, regnum,
261 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
Michal Simek185f7d92012-09-13 20:23:34 +0000262}
263
Michal Simek6889ca72015-11-30 14:14:56 +0100264static int zynq_gem_setup_mac(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000265{
266 u32 i, macaddrlow, macaddrhigh;
Michal Simek6889ca72015-11-30 14:14:56 +0100267 struct eth_pdata *pdata = dev_get_platdata(dev);
268 struct zynq_gem_priv *priv = dev_get_priv(dev);
269 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek185f7d92012-09-13 20:23:34 +0000270
271 /* Set the MAC bits [31:0] in BOT */
Michal Simek6889ca72015-11-30 14:14:56 +0100272 macaddrlow = pdata->enetaddr[0];
273 macaddrlow |= pdata->enetaddr[1] << 8;
274 macaddrlow |= pdata->enetaddr[2] << 16;
275 macaddrlow |= pdata->enetaddr[3] << 24;
Michal Simek185f7d92012-09-13 20:23:34 +0000276
277 /* Set MAC bits [47:32] in TOP */
Michal Simek6889ca72015-11-30 14:14:56 +0100278 macaddrhigh = pdata->enetaddr[4];
279 macaddrhigh |= pdata->enetaddr[5] << 8;
Michal Simek185f7d92012-09-13 20:23:34 +0000280
281 for (i = 0; i < 4; i++) {
282 writel(0, &regs->laddr[i][LADDR_LOW]);
283 writel(0, &regs->laddr[i][LADDR_HIGH]);
284 /* Do not use MATCHx register */
285 writel(0, &regs->match[i]);
286 }
287
288 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
289 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
290
291 return 0;
292}
293
Michal Simek6889ca72015-11-30 14:14:56 +0100294static int zynq_phy_init(struct udevice *dev)
Michal Simek68cc3bd2015-11-30 13:54:43 +0100295{
296 int ret;
Michal Simek6889ca72015-11-30 14:14:56 +0100297 struct zynq_gem_priv *priv = dev_get_priv(dev);
298 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek68cc3bd2015-11-30 13:54:43 +0100299 const u32 supported = SUPPORTED_10baseT_Half |
300 SUPPORTED_10baseT_Full |
301 SUPPORTED_100baseT_Half |
302 SUPPORTED_100baseT_Full |
303 SUPPORTED_1000baseT_Half |
304 SUPPORTED_1000baseT_Full;
305
Michal Simekc8e29272015-11-30 13:58:36 +0100306 /* Enable only MDIO bus */
307 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, &regs->nwctrl);
308
Michal Simek68cc3bd2015-11-30 13:54:43 +0100309 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
310 priv->interface);
Michal Simek90c6f2e2015-11-30 14:03:37 +0100311 if (!priv->phydev)
312 return -ENODEV;
Michal Simek68cc3bd2015-11-30 13:54:43 +0100313
Siva Durga Prasad Paladugu69065e82018-04-12 12:22:17 +0200314 if (priv->max_speed) {
315 ret = phy_set_supported(priv->phydev, priv->max_speed);
316 if (ret)
317 return ret;
318 }
319
Siva Durga Prasad Paladugu51c019f2019-03-27 17:39:59 +0530320 priv->phydev->supported &= supported | ADVERTISED_Pause |
321 ADVERTISED_Asym_Pause;
322
Michal Simek68cc3bd2015-11-30 13:54:43 +0100323 priv->phydev->advertising = priv->phydev->supported;
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530324 priv->phydev->node = priv->phy_of_node;
Dan Murphy20671a92016-05-02 15:45:57 -0500325
Michal Simek7a673f02016-05-18 14:37:23 +0200326 return phy_config(priv->phydev);
Michal Simek68cc3bd2015-11-30 13:54:43 +0100327}
328
Michal Simek6889ca72015-11-30 14:14:56 +0100329static int zynq_gem_init(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000330{
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530331 u32 i, nwconfig;
Michal Simek55259e72016-05-18 12:37:22 +0200332 int ret;
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800333 unsigned long clk_rate = 0;
Michal Simek6889ca72015-11-30 14:14:56 +0100334 struct zynq_gem_priv *priv = dev_get_priv(dev);
335 struct zynq_gem_regs *regs = priv->iobase;
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700336 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
337 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
Michal Simek185f7d92012-09-13 20:23:34 +0000338
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530339 if (readl(&regs->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
340 priv->dma_64bit = true;
341 else
342 priv->dma_64bit = false;
343
344#if defined(CONFIG_PHYS_64BIT)
345 if (!priv->dma_64bit) {
346 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
347 __func__);
348 return -EINVAL;
349 }
350#else
351 if (priv->dma_64bit)
352 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
353 __func__);
354#endif
355
Michal Simek05868752013-01-24 13:04:12 +0100356 if (!priv->init) {
357 /* Disable all interrupts */
358 writel(0xFFFFFFFF, &regs->idr);
Michal Simek185f7d92012-09-13 20:23:34 +0000359
Michal Simek05868752013-01-24 13:04:12 +0100360 /* Disable the receiver & transmitter */
361 writel(0, &regs->nwctrl);
362 writel(0, &regs->txsr);
363 writel(0, &regs->rxsr);
364 writel(0, &regs->phymntnc);
Michal Simek185f7d92012-09-13 20:23:34 +0000365
Michal Simek05868752013-01-24 13:04:12 +0100366 /* Clear the Hash registers for the mac address
367 * pointed by AddressPtr
368 */
369 writel(0x0, &regs->hashl);
370 /* Write bits [63:32] in TOP */
371 writel(0x0, &regs->hashh);
Michal Simek185f7d92012-09-13 20:23:34 +0000372
Michal Simek05868752013-01-24 13:04:12 +0100373 /* Clear all counters */
Michal Simek0ebf4042015-10-05 12:49:48 +0200374 for (i = 0; i < STAT_SIZE; i++)
Michal Simek05868752013-01-24 13:04:12 +0100375 readl(&regs->stat[i]);
Michal Simek185f7d92012-09-13 20:23:34 +0000376
Michal Simek05868752013-01-24 13:04:12 +0100377 /* Setup RxBD space */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530378 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
Michal Simek185f7d92012-09-13 20:23:34 +0000379
Michal Simek05868752013-01-24 13:04:12 +0100380 for (i = 0; i < RX_BUF; i++) {
381 priv->rx_bd[i].status = 0xF0000000;
382 priv->rx_bd[i].addr =
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530383 (lower_32_bits((ulong)(priv->rxbuffers)
384 + (i * PKTSIZE_ALIGN)));
385#if defined(CONFIG_PHYS_64BIT)
386 priv->rx_bd[i].addr_hi =
387 (upper_32_bits((ulong)(priv->rxbuffers)
388 + (i * PKTSIZE_ALIGN)));
389#endif
390 }
Michal Simek05868752013-01-24 13:04:12 +0100391 /* WRAP bit to last BD */
392 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
393 /* Write RxBDs to IP */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530394 writel(lower_32_bits((ulong)priv->rx_bd), &regs->rxqbase);
395#if defined(CONFIG_PHYS_64BIT)
396 writel(upper_32_bits((ulong)priv->rx_bd), &regs->upper_rxqbase);
397#endif
Michal Simek185f7d92012-09-13 20:23:34 +0000398
Michal Simek05868752013-01-24 13:04:12 +0100399 /* Setup for DMA Configuration register */
400 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
Michal Simek185f7d92012-09-13 20:23:34 +0000401
Michal Simek05868752013-01-24 13:04:12 +0100402 /* Setup for Network Control register, MDIO, Rx and Tx enable */
Michal Simek80243522012-10-15 14:01:23 +0200403 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
Michal Simek185f7d92012-09-13 20:23:34 +0000404
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700405 /* Disable the second priority queue */
406 dummy_tx_bd->addr = 0;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530407#if defined(CONFIG_PHYS_64BIT)
408 dummy_tx_bd->addr_hi = 0;
409#endif
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700410 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
411 ZYNQ_GEM_TXBUF_LAST_MASK|
412 ZYNQ_GEM_TXBUF_USED_MASK;
413
414 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
415 ZYNQ_GEM_RXBUF_NEW_MASK;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530416#if defined(CONFIG_PHYS_64BIT)
417 dummy_rx_bd->addr_hi = 0;
418#endif
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700419 dummy_rx_bd->status = 0;
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700420
421 writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
422 writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);
423
Michal Simek05868752013-01-24 13:04:12 +0100424 priv->init++;
425 }
426
Michal Simek55259e72016-05-18 12:37:22 +0200427 ret = phy_startup(priv->phydev);
428 if (ret)
429 return ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000430
Michal Simek64a7ead2015-11-30 13:44:49 +0100431 if (!priv->phydev->link) {
432 printf("%s: No link.\n", priv->phydev->dev->name);
Michal Simek4ed4aa22013-11-12 14:25:29 +0100433 return -1;
434 }
435
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530436 nwconfig = ZYNQ_GEM_NWCFG_INIT;
437
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530438 /*
439 * Set SGMII enable PCS selection only if internal PCS/PMA
440 * core is used and interface is SGMII.
441 */
442 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
443 priv->int_pcs) {
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530444 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
445 ZYNQ_GEM_NWCFG_PCS_SEL;
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530446#ifdef CONFIG_ARM64
447 writel(readl(&regs->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
448 &regs->pcscntrl);
449#endif
450 }
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530451
Michal Simek64a7ead2015-11-30 13:44:49 +0100452 switch (priv->phydev->speed) {
Michal Simek80243522012-10-15 14:01:23 +0200453 case SPEED_1000:
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530454 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
Michal Simek80243522012-10-15 14:01:23 +0200455 &regs->nwcfg);
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800456 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
Michal Simek80243522012-10-15 14:01:23 +0200457 break;
458 case SPEED_100:
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530459 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
Michal Simek242b1542015-09-08 16:55:42 +0200460 &regs->nwcfg);
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800461 clk_rate = ZYNQ_GEM_FREQUENCY_100;
Michal Simek80243522012-10-15 14:01:23 +0200462 break;
463 case SPEED_10:
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800464 clk_rate = ZYNQ_GEM_FREQUENCY_10;
Michal Simek80243522012-10-15 14:01:23 +0200465 break;
466 }
David Andrey01fbf312013-04-05 17:24:24 +0200467
Michal Simek3dc80932018-08-22 16:18:34 +0200468#if !defined(CONFIG_ARCH_VERSAL)
Stefan Herbrechtsmeiereff55c52017-01-17 16:27:25 +0100469 ret = clk_set_rate(&priv->clk, clk_rate);
470 if (IS_ERR_VALUE(ret) && ret != (unsigned long)-ENOSYS) {
471 dev_err(dev, "failed to set tx clock rate\n");
472 return ret;
473 }
474
475 ret = clk_enable(&priv->clk);
476 if (ret && ret != -ENOSYS) {
477 dev_err(dev, "failed to enable tx clock\n");
478 return ret;
479 }
Michal Simek3dc80932018-08-22 16:18:34 +0200480#else
481 debug("requested clk_rate %ld\n", clk_rate);
482#endif
Michal Simek80243522012-10-15 14:01:23 +0200483
484 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
485 ZYNQ_GEM_NWCTRL_TXEN_MASK);
486
Michal Simek185f7d92012-09-13 20:23:34 +0000487 return 0;
488}
489
Michal Simek6889ca72015-11-30 14:14:56 +0100490static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
Michal Simek185f7d92012-09-13 20:23:34 +0000491{
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530492 dma_addr_t addr;
493 u32 size;
Michal Simek6889ca72015-11-30 14:14:56 +0100494 struct zynq_gem_priv *priv = dev_get_priv(dev);
495 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek23a598f2015-08-17 09:58:54 +0200496 struct emac_bd *current_bd = &priv->tx_bd[1];
Michal Simek185f7d92012-09-13 20:23:34 +0000497
Michal Simek185f7d92012-09-13 20:23:34 +0000498 /* Setup Tx BD */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530499 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
Michal Simek185f7d92012-09-13 20:23:34 +0000500
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530501 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
502#if defined(CONFIG_PHYS_64BIT)
503 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
504#endif
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530505 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
Michal Simek23a598f2015-08-17 09:58:54 +0200506 ZYNQ_GEM_TXBUF_LAST_MASK;
507 /* Dummy descriptor to mark it as the last in descriptor chain */
508 current_bd->addr = 0x0;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530509#if defined(CONFIG_PHYS_64BIT)
510 current_bd->addr_hi = 0x0;
511#endif
Michal Simek23a598f2015-08-17 09:58:54 +0200512 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
513 ZYNQ_GEM_TXBUF_LAST_MASK|
514 ZYNQ_GEM_TXBUF_USED_MASK;
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530515
Michal Simek45c07742015-08-17 09:50:09 +0200516 /* setup BD */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530517 writel(lower_32_bits((ulong)priv->tx_bd), &regs->txqbase);
518#if defined(CONFIG_PHYS_64BIT)
519 writel(upper_32_bits((ulong)priv->tx_bd), &regs->upper_txqbase);
520#endif
Michal Simek45c07742015-08-17 09:50:09 +0200521
Prabhakar Kushwaha5b47d402015-10-25 13:18:54 +0530522 addr = (ulong) ptr;
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530523 addr &= ~(ARCH_DMA_MINALIGN - 1);
524 size = roundup(len, ARCH_DMA_MINALIGN);
525 flush_dcache_range(addr, addr + size);
526 barrier();
Michal Simek185f7d92012-09-13 20:23:34 +0000527
528 /* Start transmit */
529 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
530
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530531 /* Read TX BD status */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530532 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
533 printf("TX buffers exhausted in mid frame\n");
Michal Simek185f7d92012-09-13 20:23:34 +0000534
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100535 return wait_for_bit_le32(&regs->txsr, ZYNQ_GEM_TSR_DONE,
536 true, 20000, true);
Michal Simek185f7d92012-09-13 20:23:34 +0000537}
538
539/* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
Michal Simek6889ca72015-11-30 14:14:56 +0100540static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
Michal Simek185f7d92012-09-13 20:23:34 +0000541{
542 int frame_len;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530543 dma_addr_t addr;
Michal Simek6889ca72015-11-30 14:14:56 +0100544 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek185f7d92012-09-13 20:23:34 +0000545 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
Michal Simek185f7d92012-09-13 20:23:34 +0000546
547 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
Michal Simek9d9211a2015-12-09 14:26:48 +0100548 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000549
550 if (!(current_bd->status &
551 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
552 printf("GEM: SOF or EOF not set for last buffer received!\n");
Michal Simek9d9211a2015-12-09 14:26:48 +0100553 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000554 }
555
556 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
Michal Simek9d9211a2015-12-09 14:26:48 +0100557 if (!frame_len) {
558 printf("%s: Zero size packet?\n", __func__);
559 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000560 }
561
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530562#if defined(CONFIG_PHYS_64BIT)
563 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
564 | ((dma_addr_t)current_bd->addr_hi << 32));
565#else
Michal Simek9d9211a2015-12-09 14:26:48 +0100566 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530567#endif
Michal Simek9d9211a2015-12-09 14:26:48 +0100568 addr &= ~(ARCH_DMA_MINALIGN - 1);
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530569
Michal Simek9d9211a2015-12-09 14:26:48 +0100570 *packetp = (uchar *)(uintptr_t)addr;
571
Stefan Theil10598582018-12-17 09:12:30 +0100572 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
573 barrier();
574
Michal Simek9d9211a2015-12-09 14:26:48 +0100575 return frame_len;
576}
577
578static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
579{
580 struct zynq_gem_priv *priv = dev_get_priv(dev);
581 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
582 struct emac_bd *first_bd;
583
584 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
585 priv->rx_first_buf = priv->rxbd_current;
586 } else {
587 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
588 current_bd->status = 0xF0000000; /* FIXME */
589 }
590
591 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
592 first_bd = &priv->rx_bd[priv->rx_first_buf];
593 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
594 first_bd->status = 0xF0000000;
595 }
596
597 if ((++priv->rxbd_current) >= RX_BUF)
598 priv->rxbd_current = 0;
599
Michal Simekda872d72015-12-09 14:16:32 +0100600 return 0;
Michal Simek185f7d92012-09-13 20:23:34 +0000601}
602
Michal Simek6889ca72015-11-30 14:14:56 +0100603static void zynq_gem_halt(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000604{
Michal Simek6889ca72015-11-30 14:14:56 +0100605 struct zynq_gem_priv *priv = dev_get_priv(dev);
606 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek185f7d92012-09-13 20:23:34 +0000607
Michal Simek80243522012-10-15 14:01:23 +0200608 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
609 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
Michal Simek185f7d92012-09-13 20:23:34 +0000610}
611
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600612__weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
613{
614 return -ENOSYS;
615}
616
617static int zynq_gem_read_rom_mac(struct udevice *dev)
618{
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600619 struct eth_pdata *pdata = dev_get_platdata(dev);
620
Olliver Schinaglb2330892017-04-03 16:18:53 +0200621 if (!pdata)
622 return -ENOSYS;
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600623
Olliver Schinaglb2330892017-04-03 16:18:53 +0200624 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600625}
626
Michal Simek6889ca72015-11-30 14:14:56 +0100627static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
628 int devad, int reg)
Michal Simek185f7d92012-09-13 20:23:34 +0000629{
Michal Simek6889ca72015-11-30 14:14:56 +0100630 struct zynq_gem_priv *priv = bus->priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000631 int ret;
Michal Simekd1b226b2018-06-14 09:08:44 +0200632 u16 val = 0;
Michal Simek185f7d92012-09-13 20:23:34 +0000633
Michal Simek6889ca72015-11-30 14:14:56 +0100634 ret = phyread(priv, addr, reg, &val);
635 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
636 return val;
Michal Simek185f7d92012-09-13 20:23:34 +0000637}
638
Michal Simek6889ca72015-11-30 14:14:56 +0100639static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
640 int reg, u16 value)
Michal Simek185f7d92012-09-13 20:23:34 +0000641{
Michal Simek6889ca72015-11-30 14:14:56 +0100642 struct zynq_gem_priv *priv = bus->priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000643
Michal Simek6889ca72015-11-30 14:14:56 +0100644 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
645 return phywrite(priv, addr, reg, value);
Michal Simek185f7d92012-09-13 20:23:34 +0000646}
647
Michal Simek6889ca72015-11-30 14:14:56 +0100648static int zynq_gem_probe(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000649{
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530650 void *bd_space;
Michal Simek6889ca72015-11-30 14:14:56 +0100651 struct zynq_gem_priv *priv = dev_get_priv(dev);
652 int ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000653
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530654 /* Align rxbuffers to ARCH_DMA_MINALIGN */
655 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
Michal Simek5b2c9a62018-06-13 15:20:35 +0200656 if (!priv->rxbuffers)
657 return -ENOMEM;
658
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530659 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
Stefan Theil10598582018-12-17 09:12:30 +0100660 u32 addr = (ulong)priv->rxbuffers;
661 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
662 barrier();
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530663
Siva Durga Prasad Paladugu96f4f142014-12-06 12:57:53 +0530664 /* Align bd_space to MMU_SECTION_SHIFT */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530665 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Michal Simek5b2c9a62018-06-13 15:20:35 +0200666 if (!bd_space)
667 return -ENOMEM;
668
Michal Simek9ce1edc2015-04-15 13:31:28 +0200669 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
670 BD_SPACE, DCACHE_OFF);
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530671
672 /* Initialize the bd spaces for tx and rx bd's */
673 priv->tx_bd = (struct emac_bd *)bd_space;
Prabhakar Kushwaha5b47d402015-10-25 13:18:54 +0530674 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530675
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530676 ret = clk_get_by_name(dev, "tx_clk", &priv->clk);
677 if (ret < 0) {
678 dev_err(dev, "failed to get clock\n");
679 return -EINVAL;
680 }
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530681
Michal Simek6889ca72015-11-30 14:14:56 +0100682 priv->bus = mdio_alloc();
683 priv->bus->read = zynq_gem_miiphy_read;
684 priv->bus->write = zynq_gem_miiphy_write;
685 priv->bus->priv = priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000686
Michal Simek6516e3f2016-12-08 10:25:44 +0100687 ret = mdio_register_seq(priv->bus, dev->seq);
Michal Simekc8e29272015-11-30 13:58:36 +0100688 if (ret)
689 return ret;
690
Siva Durga Prasad Paladugue76d2dc2016-03-30 12:29:49 +0530691 return zynq_phy_init(dev);
Michal Simek185f7d92012-09-13 20:23:34 +0000692}
Michal Simek6889ca72015-11-30 14:14:56 +0100693
694static int zynq_gem_remove(struct udevice *dev)
695{
696 struct zynq_gem_priv *priv = dev_get_priv(dev);
697
698 free(priv->phydev);
699 mdio_unregister(priv->bus);
700 mdio_free(priv->bus);
701
702 return 0;
703}
704
705static const struct eth_ops zynq_gem_ops = {
706 .start = zynq_gem_init,
707 .send = zynq_gem_send,
708 .recv = zynq_gem_recv,
Michal Simek9d9211a2015-12-09 14:26:48 +0100709 .free_pkt = zynq_gem_free_pkt,
Michal Simek6889ca72015-11-30 14:14:56 +0100710 .stop = zynq_gem_halt,
711 .write_hwaddr = zynq_gem_setup_mac,
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600712 .read_rom_hwaddr = zynq_gem_read_rom_mac,
Michal Simek6889ca72015-11-30 14:14:56 +0100713};
714
715static int zynq_gem_ofdata_to_platdata(struct udevice *dev)
716{
717 struct eth_pdata *pdata = dev_get_platdata(dev);
718 struct zynq_gem_priv *priv = dev_get_priv(dev);
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530719 struct ofnode_phandle_args phandle_args;
Michal Simek3cdb1452015-11-30 14:17:50 +0100720 const char *phy_mode;
Michal Simek6889ca72015-11-30 14:14:56 +0100721
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530722 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
Michal Simek6889ca72015-11-30 14:14:56 +0100723 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
724 /* Hardcode for now */
Michal Simekbcdfef72015-12-09 09:29:12 +0100725 priv->phyaddr = -1;
Michal Simek6889ca72015-11-30 14:14:56 +0100726
Michal Simek3888c8d2018-09-20 09:42:27 +0200727 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
728 &phandle_args)) {
729 debug("phy-handle does exist %s\n", dev->name);
730 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
731 "reg", -1);
732 priv->phy_of_node = phandle_args.node;
733 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
734 "max-speed",
735 SPEED_1000);
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530736 }
Michal Simek6889ca72015-11-30 14:14:56 +0100737
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530738 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
Michal Simek3cdb1452015-11-30 14:17:50 +0100739 if (phy_mode)
740 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
741 if (pdata->phy_interface == -1) {
742 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
743 return -EINVAL;
744 }
745 priv->interface = pdata->phy_interface;
746
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530747 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530748
Michal Simek15a2acd2016-11-16 08:41:01 +0100749 printf("ZYNQ GEM: %lx, phyaddr %x, interface %s\n", (ulong)priv->iobase,
Michal Simek3cdb1452015-11-30 14:17:50 +0100750 priv->phyaddr, phy_string_for_interface(priv->interface));
Michal Simek6889ca72015-11-30 14:14:56 +0100751
752 return 0;
753}
754
755static const struct udevice_id zynq_gem_ids[] = {
756 { .compatible = "cdns,zynqmp-gem" },
757 { .compatible = "cdns,zynq-gem" },
758 { .compatible = "cdns,gem" },
759 { }
760};
761
762U_BOOT_DRIVER(zynq_gem) = {
763 .name = "zynq_gem",
764 .id = UCLASS_ETH,
765 .of_match = zynq_gem_ids,
766 .ofdata_to_platdata = zynq_gem_ofdata_to_platdata,
767 .probe = zynq_gem_probe,
768 .remove = zynq_gem_remove,
769 .ops = &zynq_gem_ops,
770 .priv_auto_alloc_size = sizeof(struct zynq_gem_priv),
771 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
772};