blob: 336d569b37400d9bff9009e2fe9208ed4991e2dd [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek185f7d92012-09-13 20:23:34 +00002/*
3 * (C) Copyright 2011 Michal Simek
4 *
5 * Michal SIMEK <monstr@monstr.eu>
6 *
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
Michal Simek185f7d92012-09-13 20:23:34 +00009 */
10
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +053011#include <clk.h>
Michal Simek185f7d92012-09-13 20:23:34 +000012#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Michal Simek6889ca72015-11-30 14:14:56 +010014#include <dm.h>
Michal Simek10c50b12021-12-15 11:00:01 +010015#include <generic-phy.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060016#include <log.h>
Michal Simek185f7d92012-09-13 20:23:34 +000017#include <net.h>
Michal Simek2fd24892014-04-25 14:17:38 +020018#include <netdev.h>
Michal Simek185f7d92012-09-13 20:23:34 +000019#include <config.h>
Michal Simekb8de29f2015-09-24 20:13:45 +020020#include <console.h>
Michal Simek185f7d92012-09-13 20:23:34 +000021#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060022#include <asm/cache.h>
Michal Simek185f7d92012-09-13 20:23:34 +000023#include <asm/io.h>
24#include <phy.h>
Michal Simekb5ffc9f2021-12-06 16:25:20 +010025#include <reset.h>
Michal Simek185f7d92012-09-13 20:23:34 +000026#include <miiphy.h>
Mateusz Kulikowskie7138b32016-01-23 11:54:33 +010027#include <wait_bit.h>
Michal Simek185f7d92012-09-13 20:23:34 +000028#include <watchdog.h>
Siva Durga Prasad Paladugu96f4f142014-12-06 12:57:53 +053029#include <asm/system.h>
David Andrey01fbf312013-04-05 17:24:24 +020030#include <asm/arch/hardware.h>
Michal Simek80243522012-10-15 14:01:23 +020031#include <asm/arch/sys_proto.h>
Simon Glass336d4612020-02-03 07:36:16 -070032#include <dm/device_compat.h>
Simon Glasscd93d622020-05-10 11:40:13 -060033#include <linux/bitops.h>
Simon Glass61b29b82020-02-03 07:36:15 -070034#include <linux/err.h>
Masahiro Yamada5d97dff2016-09-21 11:28:57 +090035#include <linux/errno.h>
Michal Simek185f7d92012-09-13 20:23:34 +000036
Michal Simek185f7d92012-09-13 20:23:34 +000037/* Bit/mask specification */
38#define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
39#define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
40#define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
41#define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
42#define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
43
44#define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
45#define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
46#define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
47
48#define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
49#define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
50#define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
51
52/* Wrap bit, last descriptor */
53#define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
54#define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
Michal Simek23a598f2015-08-17 09:58:54 +020055#define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
Michal Simek185f7d92012-09-13 20:23:34 +000056
Michal Simek185f7d92012-09-13 20:23:34 +000057#define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
58#define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
59#define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
60#define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
61
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053062#define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
63#define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
64#define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
Michal Simekeafdcda2021-10-15 15:03:29 +020065#define ZYNQ_GEM_NWCFG_NO_BRDC BIT(5) /* No broadcast */
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053066#define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
Siva Durga Prasad Paladugu4eaf8f52016-05-16 15:31:38 +053067#define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053068#define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
Michal Simekf17ea712015-09-08 17:20:01 +020069#ifdef CONFIG_ARM64
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053070#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
Michal Simekf17ea712015-09-08 17:20:01 +020071#else
Siva Durga Prasad Paladugu27183d72016-05-16 15:31:37 +053072#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
Michal Simekf17ea712015-09-08 17:20:01 +020073#endif
Michal Simek185f7d92012-09-13 20:23:34 +000074
Siva Durga Prasad Paladugu8a584c82014-07-08 15:31:03 +053075#ifdef CONFIG_ARM64
76# define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
77#else
78# define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
79#endif
80
81#define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
82 ZYNQ_GEM_NWCFG_FDEN | \
Michal Simekeafdcda2021-10-15 15:03:29 +020083 ZYNQ_GEM_NWCFG_NO_BRDC | \
Michal Simek185f7d92012-09-13 20:23:34 +000084 ZYNQ_GEM_NWCFG_FSREM | \
85 ZYNQ_GEM_NWCFG_MDCCLKDIV)
86
87#define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
88
89#define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
90/* Use full configured addressable space (8 Kb) */
91#define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
92/* Use full configured addressable space (4 Kb) */
93#define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
94/* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
95#define ZYNQ_GEM_DMACR_RXBUF 0x00180000
96
Vipul Kumar9a7799f2018-11-26 16:27:38 +053097#if defined(CONFIG_PHYS_64BIT)
98# define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
99#else
100# define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
101#endif
102
Michal Simek185f7d92012-09-13 20:23:34 +0000103#define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
104 ZYNQ_GEM_DMACR_RXSIZE | \
105 ZYNQ_GEM_DMACR_TXSIZE | \
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530106 ZYNQ_GEM_DMACR_RXBUF | \
107 ZYNQ_GEM_DMA_BUS_WIDTH)
Michal Simek185f7d92012-09-13 20:23:34 +0000108
Michal Simeke4d23182015-08-17 09:57:46 +0200109#define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
110
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530111#define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
112
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530113#define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
114
Michal Simekf97d7e82013-04-22 14:41:09 +0200115/* Use MII register 1 (MII status register) to detect PHY */
116#define PHY_DETECT_REG 1
117
118/* Mask used to verify certain PHY features (or register contents)
119 * in the register above:
120 * 0x1000: 10Mbps full duplex support
121 * 0x0800: 10Mbps half duplex support
122 * 0x0008: Auto-negotiation support
123 */
124#define PHY_DETECT_MASK 0x1808
125
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530126/* TX BD status masks */
127#define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
128#define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
129#define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
130
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800131/* Clock frequencies for different speeds */
132#define ZYNQ_GEM_FREQUENCY_10 2500000UL
133#define ZYNQ_GEM_FREQUENCY_100 25000000UL
134#define ZYNQ_GEM_FREQUENCY_1000 125000000UL
135
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700136#define RXCLK_EN BIT(0)
137
Michal Simek185f7d92012-09-13 20:23:34 +0000138/* Device registers */
139struct zynq_gem_regs {
Michal Simek97a51a02015-10-05 11:49:43 +0200140 u32 nwctrl; /* 0x0 - Network Control reg */
141 u32 nwcfg; /* 0x4 - Network Config reg */
142 u32 nwsr; /* 0x8 - Network Status reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000143 u32 reserved1;
Michal Simek97a51a02015-10-05 11:49:43 +0200144 u32 dmacr; /* 0x10 - DMA Control reg */
145 u32 txsr; /* 0x14 - TX Status reg */
146 u32 rxqbase; /* 0x18 - RX Q Base address reg */
147 u32 txqbase; /* 0x1c - TX Q Base address reg */
148 u32 rxsr; /* 0x20 - RX Status reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000149 u32 reserved2[2];
Michal Simek97a51a02015-10-05 11:49:43 +0200150 u32 idr; /* 0x2c - Interrupt Disable reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000151 u32 reserved3;
Michal Simek97a51a02015-10-05 11:49:43 +0200152 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000153 u32 reserved4[18];
Michal Simek97a51a02015-10-05 11:49:43 +0200154 u32 hashl; /* 0x80 - Hash Low address reg */
155 u32 hashh; /* 0x84 - Hash High address reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000156#define LADDR_LOW 0
157#define LADDR_HIGH 1
Michal Simek97a51a02015-10-05 11:49:43 +0200158 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
159 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
Michal Simek185f7d92012-09-13 20:23:34 +0000160 u32 reserved6[18];
Michal Simek0ebf4042015-10-05 12:49:48 +0200161#define STAT_SIZE 44
162 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530163 u32 reserved9[20];
164 u32 pcscntrl;
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530165 u32 rserved12[36];
166 u32 dcfg6; /* 0x294 Design config reg6 */
167 u32 reserved7[106];
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700168 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
169 u32 reserved8[15];
170 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530171 u32 reserved10[17];
172 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
173 u32 reserved11[2];
174 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
Michal Simek185f7d92012-09-13 20:23:34 +0000175};
176
177/* BD descriptors */
178struct emac_bd {
179 u32 addr; /* Next descriptor pointer */
180 u32 status;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530181#if defined(CONFIG_PHYS_64BIT)
182 u32 addr_hi;
183 u32 reserved;
184#endif
Michal Simek185f7d92012-09-13 20:23:34 +0000185};
186
Michal Simek8af4c4d2019-05-22 14:12:20 +0200187/* Reduce amount of BUFs if you have limited amount of memory */
Siva Durga Prasad Paladugueda9d302015-04-15 12:15:01 +0530188#define RX_BUF 32
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530189/* Page table entries are set to 1MB, or multiples of 1MB
190 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
191 */
192#define BD_SPACE 0x100000
193/* BD separation space */
Michal Simekff475872015-08-17 09:45:53 +0200194#define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
Michal Simek185f7d92012-09-13 20:23:34 +0000195
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700196/* Setup the first free TX descriptor */
197#define TX_FREE_DESC 2
198
Michal Simek185f7d92012-09-13 20:23:34 +0000199/* Initialized, rxbd_current, rx_first_buf must be 0 after init */
200struct zynq_gem_priv {
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530201 struct emac_bd *tx_bd;
202 struct emac_bd *rx_bd;
203 char *rxbuffers;
Michal Simek185f7d92012-09-13 20:23:34 +0000204 u32 rxbd_current;
205 u32 rx_first_buf;
206 int phyaddr;
Michal Simek05868752013-01-24 13:04:12 +0100207 int init;
Michal Simekf2fc2762015-11-30 10:24:15 +0100208 struct zynq_gem_regs *iobase;
Michal Simek25de8a82016-05-30 10:43:11 +0200209 struct zynq_gem_regs *mdiobase;
Michal Simek16ce6de2015-10-07 16:42:56 +0200210 phy_interface_t interface;
Michal Simek185f7d92012-09-13 20:23:34 +0000211 struct phy_device *phydev;
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530212 ofnode phy_of_node;
Michal Simek185f7d92012-09-13 20:23:34 +0000213 struct mii_dev *bus;
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700214 struct clk rx_clk;
215 struct clk tx_clk;
Siva Durga Prasad Paladugu69065e82018-04-12 12:22:17 +0200216 u32 max_speed;
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530217 bool int_pcs;
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530218 bool dma_64bit;
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700219 u32 clk_en_info;
Michal Simekb5ffc9f2021-12-06 16:25:20 +0100220 struct reset_ctl_bulk resets;
Michal Simek185f7d92012-09-13 20:23:34 +0000221};
222
Michal Simekb33d4a52018-06-13 10:00:30 +0200223static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
Michal Simekf2fc2762015-11-30 10:24:15 +0100224 u32 op, u16 *data)
Michal Simek185f7d92012-09-13 20:23:34 +0000225{
226 u32 mgtcr;
Michal Simek25de8a82016-05-30 10:43:11 +0200227 struct zynq_gem_regs *regs = priv->mdiobase;
Michal Simekb908fca2016-12-12 09:47:26 +0100228 int err;
Michal Simek185f7d92012-09-13 20:23:34 +0000229
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100230 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
231 true, 20000, false);
Michal Simekb908fca2016-12-12 09:47:26 +0100232 if (err)
233 return err;
Michal Simek185f7d92012-09-13 20:23:34 +0000234
235 /* Construct mgtcr mask for the operation */
236 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
237 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
238 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
239
240 /* Write mgtcr and wait for completion */
241 writel(mgtcr, &regs->phymntnc);
242
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100243 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
244 true, 20000, false);
Michal Simekb908fca2016-12-12 09:47:26 +0100245 if (err)
246 return err;
Michal Simek185f7d92012-09-13 20:23:34 +0000247
248 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
249 *data = readl(&regs->phymntnc);
250
251 return 0;
252}
253
Michal Simekb33d4a52018-06-13 10:00:30 +0200254static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simekf2fc2762015-11-30 10:24:15 +0100255 u32 regnum, u16 *val)
Michal Simek185f7d92012-09-13 20:23:34 +0000256{
Michal Simekb33d4a52018-06-13 10:00:30 +0200257 int ret;
Michal Simek198e9a42015-10-07 16:34:51 +0200258
Michal Simekf2fc2762015-11-30 10:24:15 +0100259 ret = phy_setup_op(priv, phy_addr, regnum,
260 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
Michal Simek198e9a42015-10-07 16:34:51 +0200261
262 if (!ret)
263 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
264 phy_addr, regnum, *val);
265
266 return ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000267}
268
Michal Simekb33d4a52018-06-13 10:00:30 +0200269static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simekf2fc2762015-11-30 10:24:15 +0100270 u32 regnum, u16 data)
Michal Simek185f7d92012-09-13 20:23:34 +0000271{
Michal Simek198e9a42015-10-07 16:34:51 +0200272 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
273 regnum, data);
274
Michal Simekf2fc2762015-11-30 10:24:15 +0100275 return phy_setup_op(priv, phy_addr, regnum,
276 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
Michal Simek185f7d92012-09-13 20:23:34 +0000277}
278
Michal Simek6889ca72015-11-30 14:14:56 +0100279static int zynq_gem_setup_mac(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000280{
281 u32 i, macaddrlow, macaddrhigh;
Simon Glassc69cda22020-12-03 16:55:20 -0700282 struct eth_pdata *pdata = dev_get_plat(dev);
Michal Simek6889ca72015-11-30 14:14:56 +0100283 struct zynq_gem_priv *priv = dev_get_priv(dev);
284 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek185f7d92012-09-13 20:23:34 +0000285
286 /* Set the MAC bits [31:0] in BOT */
Michal Simek6889ca72015-11-30 14:14:56 +0100287 macaddrlow = pdata->enetaddr[0];
288 macaddrlow |= pdata->enetaddr[1] << 8;
289 macaddrlow |= pdata->enetaddr[2] << 16;
290 macaddrlow |= pdata->enetaddr[3] << 24;
Michal Simek185f7d92012-09-13 20:23:34 +0000291
292 /* Set MAC bits [47:32] in TOP */
Michal Simek6889ca72015-11-30 14:14:56 +0100293 macaddrhigh = pdata->enetaddr[4];
294 macaddrhigh |= pdata->enetaddr[5] << 8;
Michal Simek185f7d92012-09-13 20:23:34 +0000295
296 for (i = 0; i < 4; i++) {
297 writel(0, &regs->laddr[i][LADDR_LOW]);
298 writel(0, &regs->laddr[i][LADDR_HIGH]);
299 /* Do not use MATCHx register */
300 writel(0, &regs->match[i]);
301 }
302
303 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
304 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
305
306 return 0;
307}
308
Michal Simek6889ca72015-11-30 14:14:56 +0100309static int zynq_phy_init(struct udevice *dev)
Michal Simek68cc3bd2015-11-30 13:54:43 +0100310{
311 int ret;
Michal Simek6889ca72015-11-30 14:14:56 +0100312 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek25de8a82016-05-30 10:43:11 +0200313 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
Michal Simek68cc3bd2015-11-30 13:54:43 +0100314 const u32 supported = SUPPORTED_10baseT_Half |
315 SUPPORTED_10baseT_Full |
316 SUPPORTED_100baseT_Half |
317 SUPPORTED_100baseT_Full |
318 SUPPORTED_1000baseT_Half |
319 SUPPORTED_1000baseT_Full;
320
Michal Simekc8e29272015-11-30 13:58:36 +0100321 /* Enable only MDIO bus */
Michal Simek25de8a82016-05-30 10:43:11 +0200322 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, &regs_mdio->nwctrl);
Michal Simekc8e29272015-11-30 13:58:36 +0100323
Michal Simek68cc3bd2015-11-30 13:54:43 +0100324 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
325 priv->interface);
Michal Simek90c6f2e2015-11-30 14:03:37 +0100326 if (!priv->phydev)
327 return -ENODEV;
Michal Simek68cc3bd2015-11-30 13:54:43 +0100328
Siva Durga Prasad Paladugu69065e82018-04-12 12:22:17 +0200329 if (priv->max_speed) {
330 ret = phy_set_supported(priv->phydev, priv->max_speed);
331 if (ret)
332 return ret;
333 }
334
Siva Durga Prasad Paladugu51c019f2019-03-27 17:39:59 +0530335 priv->phydev->supported &= supported | ADVERTISED_Pause |
336 ADVERTISED_Asym_Pause;
337
Michal Simek68cc3bd2015-11-30 13:54:43 +0100338 priv->phydev->advertising = priv->phydev->supported;
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530339 priv->phydev->node = priv->phy_of_node;
Dan Murphy20671a92016-05-02 15:45:57 -0500340
Michal Simek7a673f02016-05-18 14:37:23 +0200341 return phy_config(priv->phydev);
Michal Simek68cc3bd2015-11-30 13:54:43 +0100342}
343
Michal Simek6889ca72015-11-30 14:14:56 +0100344static int zynq_gem_init(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000345{
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530346 u32 i, nwconfig;
Michal Simek55259e72016-05-18 12:37:22 +0200347 int ret;
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800348 unsigned long clk_rate = 0;
Michal Simek6889ca72015-11-30 14:14:56 +0100349 struct zynq_gem_priv *priv = dev_get_priv(dev);
350 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek25de8a82016-05-30 10:43:11 +0200351 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700352 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
353 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
Michal Simek185f7d92012-09-13 20:23:34 +0000354
Siva Durga Prasad Paladugu5f68f442018-11-26 16:27:39 +0530355 if (readl(&regs->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
356 priv->dma_64bit = true;
357 else
358 priv->dma_64bit = false;
359
360#if defined(CONFIG_PHYS_64BIT)
361 if (!priv->dma_64bit) {
362 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
363 __func__);
364 return -EINVAL;
365 }
366#else
367 if (priv->dma_64bit)
368 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
369 __func__);
370#endif
371
Michal Simek05868752013-01-24 13:04:12 +0100372 if (!priv->init) {
373 /* Disable all interrupts */
374 writel(0xFFFFFFFF, &regs->idr);
Michal Simek185f7d92012-09-13 20:23:34 +0000375
Michal Simek05868752013-01-24 13:04:12 +0100376 /* Disable the receiver & transmitter */
377 writel(0, &regs->nwctrl);
378 writel(0, &regs->txsr);
379 writel(0, &regs->rxsr);
380 writel(0, &regs->phymntnc);
Michal Simek185f7d92012-09-13 20:23:34 +0000381
Michal Simek05868752013-01-24 13:04:12 +0100382 /* Clear the Hash registers for the mac address
383 * pointed by AddressPtr
384 */
385 writel(0x0, &regs->hashl);
386 /* Write bits [63:32] in TOP */
387 writel(0x0, &regs->hashh);
Michal Simek185f7d92012-09-13 20:23:34 +0000388
Michal Simek05868752013-01-24 13:04:12 +0100389 /* Clear all counters */
Michal Simek0ebf4042015-10-05 12:49:48 +0200390 for (i = 0; i < STAT_SIZE; i++)
Michal Simek05868752013-01-24 13:04:12 +0100391 readl(&regs->stat[i]);
Michal Simek185f7d92012-09-13 20:23:34 +0000392
Michal Simek05868752013-01-24 13:04:12 +0100393 /* Setup RxBD space */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530394 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
Michal Simek185f7d92012-09-13 20:23:34 +0000395
Michal Simek05868752013-01-24 13:04:12 +0100396 for (i = 0; i < RX_BUF; i++) {
397 priv->rx_bd[i].status = 0xF0000000;
398 priv->rx_bd[i].addr =
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530399 (lower_32_bits((ulong)(priv->rxbuffers)
400 + (i * PKTSIZE_ALIGN)));
401#if defined(CONFIG_PHYS_64BIT)
402 priv->rx_bd[i].addr_hi =
403 (upper_32_bits((ulong)(priv->rxbuffers)
404 + (i * PKTSIZE_ALIGN)));
405#endif
406 }
Michal Simek05868752013-01-24 13:04:12 +0100407 /* WRAP bit to last BD */
408 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
409 /* Write RxBDs to IP */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530410 writel(lower_32_bits((ulong)priv->rx_bd), &regs->rxqbase);
411#if defined(CONFIG_PHYS_64BIT)
412 writel(upper_32_bits((ulong)priv->rx_bd), &regs->upper_rxqbase);
413#endif
Michal Simek185f7d92012-09-13 20:23:34 +0000414
Michal Simek05868752013-01-24 13:04:12 +0100415 /* Setup for DMA Configuration register */
416 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
Michal Simek185f7d92012-09-13 20:23:34 +0000417
Michal Simek05868752013-01-24 13:04:12 +0100418 /* Setup for Network Control register, MDIO, Rx and Tx enable */
Michal Simek25de8a82016-05-30 10:43:11 +0200419 setbits_le32(&regs_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
Michal Simek185f7d92012-09-13 20:23:34 +0000420
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700421 /* Disable the second priority queue */
422 dummy_tx_bd->addr = 0;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530423#if defined(CONFIG_PHYS_64BIT)
424 dummy_tx_bd->addr_hi = 0;
425#endif
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700426 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
427 ZYNQ_GEM_TXBUF_LAST_MASK|
428 ZYNQ_GEM_TXBUF_USED_MASK;
429
430 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
431 ZYNQ_GEM_RXBUF_NEW_MASK;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530432#if defined(CONFIG_PHYS_64BIT)
433 dummy_rx_bd->addr_hi = 0;
434#endif
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700435 dummy_rx_bd->status = 0;
Edgar E. Iglesias603ff002015-09-25 23:50:07 -0700436
437 writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
438 writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);
439
Michal Simek05868752013-01-24 13:04:12 +0100440 priv->init++;
441 }
442
Michal Simek55259e72016-05-18 12:37:22 +0200443 ret = phy_startup(priv->phydev);
444 if (ret)
445 return ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000446
Michal Simek64a7ead2015-11-30 13:44:49 +0100447 if (!priv->phydev->link) {
448 printf("%s: No link.\n", priv->phydev->dev->name);
Michal Simek4ed4aa22013-11-12 14:25:29 +0100449 return -1;
450 }
451
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530452 nwconfig = ZYNQ_GEM_NWCFG_INIT;
453
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530454 /*
455 * Set SGMII enable PCS selection only if internal PCS/PMA
456 * core is used and interface is SGMII.
457 */
458 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
459 priv->int_pcs) {
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530460 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
461 ZYNQ_GEM_NWCFG_PCS_SEL;
Siva Durga Prasad Paladugu845ee5f2016-03-25 12:53:44 +0530462 }
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530463
Michal Simek64a7ead2015-11-30 13:44:49 +0100464 switch (priv->phydev->speed) {
Michal Simek80243522012-10-15 14:01:23 +0200465 case SPEED_1000:
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530466 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
Michal Simek80243522012-10-15 14:01:23 +0200467 &regs->nwcfg);
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800468 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
Michal Simek80243522012-10-15 14:01:23 +0200469 break;
470 case SPEED_100:
Siva Durga Prasad Paladugua06c3412016-02-05 13:22:11 +0530471 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
Michal Simek242b1542015-09-08 16:55:42 +0200472 &regs->nwcfg);
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800473 clk_rate = ZYNQ_GEM_FREQUENCY_100;
Michal Simek80243522012-10-15 14:01:23 +0200474 break;
475 case SPEED_10:
Soren Brinkmann97598fc2013-11-21 13:39:01 -0800476 clk_rate = ZYNQ_GEM_FREQUENCY_10;
Michal Simek80243522012-10-15 14:01:23 +0200477 break;
478 }
David Andrey01fbf312013-04-05 17:24:24 +0200479
Robert Hancocke8a212a2021-03-11 16:55:50 -0600480#ifdef CONFIG_ARM64
481 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
482 priv->int_pcs) {
483 /*
484 * Disable AN for fixed link configuration, enable otherwise.
485 * Must be written after PCS_SEL is set in nwconfig,
486 * otherwise writes will not take effect.
487 */
488 if (priv->phydev->phy_id != PHY_FIXED_ID)
489 writel(readl(&regs->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
490 &regs->pcscntrl);
491 else
492 writel(readl(&regs->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
493 &regs->pcscntrl);
494 }
495#endif
496
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700497 ret = clk_set_rate(&priv->tx_clk, clk_rate);
Michal Simek9b7aac72021-02-09 15:28:15 +0100498 if (IS_ERR_VALUE(ret)) {
Stefan Herbrechtsmeiereff55c52017-01-17 16:27:25 +0100499 dev_err(dev, "failed to set tx clock rate\n");
500 return ret;
501 }
502
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700503 ret = clk_enable(&priv->tx_clk);
Michal Simek9b7aac72021-02-09 15:28:15 +0100504 if (ret) {
Stefan Herbrechtsmeiereff55c52017-01-17 16:27:25 +0100505 dev_err(dev, "failed to enable tx clock\n");
506 return ret;
507 }
Michal Simek80243522012-10-15 14:01:23 +0200508
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700509 if (priv->clk_en_info & RXCLK_EN) {
510 ret = clk_enable(&priv->rx_clk);
511 if (ret) {
512 dev_err(dev, "failed to enable rx clock\n");
513 return ret;
514 }
515 }
Michal Simek80243522012-10-15 14:01:23 +0200516 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
517 ZYNQ_GEM_NWCTRL_TXEN_MASK);
518
Michal Simek185f7d92012-09-13 20:23:34 +0000519 return 0;
520}
521
Michal Simek6889ca72015-11-30 14:14:56 +0100522static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
Michal Simek185f7d92012-09-13 20:23:34 +0000523{
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530524 dma_addr_t addr;
525 u32 size;
Michal Simek6889ca72015-11-30 14:14:56 +0100526 struct zynq_gem_priv *priv = dev_get_priv(dev);
527 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek23a598f2015-08-17 09:58:54 +0200528 struct emac_bd *current_bd = &priv->tx_bd[1];
Michal Simek185f7d92012-09-13 20:23:34 +0000529
Michal Simek185f7d92012-09-13 20:23:34 +0000530 /* Setup Tx BD */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530531 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
Michal Simek185f7d92012-09-13 20:23:34 +0000532
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530533 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
534#if defined(CONFIG_PHYS_64BIT)
535 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
536#endif
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530537 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
Michal Simek23a598f2015-08-17 09:58:54 +0200538 ZYNQ_GEM_TXBUF_LAST_MASK;
539 /* Dummy descriptor to mark it as the last in descriptor chain */
540 current_bd->addr = 0x0;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530541#if defined(CONFIG_PHYS_64BIT)
542 current_bd->addr_hi = 0x0;
543#endif
Michal Simek23a598f2015-08-17 09:58:54 +0200544 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
545 ZYNQ_GEM_TXBUF_LAST_MASK|
546 ZYNQ_GEM_TXBUF_USED_MASK;
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530547
Michal Simek45c07742015-08-17 09:50:09 +0200548 /* setup BD */
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530549 writel(lower_32_bits((ulong)priv->tx_bd), &regs->txqbase);
550#if defined(CONFIG_PHYS_64BIT)
551 writel(upper_32_bits((ulong)priv->tx_bd), &regs->upper_txqbase);
552#endif
Michal Simek45c07742015-08-17 09:50:09 +0200553
Prabhakar Kushwaha5b47d402015-10-25 13:18:54 +0530554 addr = (ulong) ptr;
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530555 addr &= ~(ARCH_DMA_MINALIGN - 1);
556 size = roundup(len, ARCH_DMA_MINALIGN);
557 flush_dcache_range(addr, addr + size);
558 barrier();
Michal Simek185f7d92012-09-13 20:23:34 +0000559
560 /* Start transmit */
561 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
562
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530563 /* Read TX BD status */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530564 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
565 printf("TX buffers exhausted in mid frame\n");
Michal Simek185f7d92012-09-13 20:23:34 +0000566
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100567 return wait_for_bit_le32(&regs->txsr, ZYNQ_GEM_TSR_DONE,
568 true, 20000, true);
Michal Simek185f7d92012-09-13 20:23:34 +0000569}
570
571/* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
Michal Simek6889ca72015-11-30 14:14:56 +0100572static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
Michal Simek185f7d92012-09-13 20:23:34 +0000573{
574 int frame_len;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530575 dma_addr_t addr;
Michal Simek6889ca72015-11-30 14:14:56 +0100576 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek185f7d92012-09-13 20:23:34 +0000577 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
Michal Simek185f7d92012-09-13 20:23:34 +0000578
579 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
Michal Simek9d9211a2015-12-09 14:26:48 +0100580 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000581
582 if (!(current_bd->status &
583 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
584 printf("GEM: SOF or EOF not set for last buffer received!\n");
Michal Simek9d9211a2015-12-09 14:26:48 +0100585 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000586 }
587
588 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
Michal Simek9d9211a2015-12-09 14:26:48 +0100589 if (!frame_len) {
590 printf("%s: Zero size packet?\n", __func__);
591 return -1;
Michal Simek185f7d92012-09-13 20:23:34 +0000592 }
593
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530594#if defined(CONFIG_PHYS_64BIT)
595 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
596 | ((dma_addr_t)current_bd->addr_hi << 32));
597#else
Michal Simek9d9211a2015-12-09 14:26:48 +0100598 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530599#endif
Michal Simek9d9211a2015-12-09 14:26:48 +0100600 addr &= ~(ARCH_DMA_MINALIGN - 1);
Vipul Kumar9a7799f2018-11-26 16:27:38 +0530601
Michal Simek9d9211a2015-12-09 14:26:48 +0100602 *packetp = (uchar *)(uintptr_t)addr;
603
Stefan Theil10598582018-12-17 09:12:30 +0100604 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
605 barrier();
606
Michal Simek9d9211a2015-12-09 14:26:48 +0100607 return frame_len;
608}
609
610static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
611{
612 struct zynq_gem_priv *priv = dev_get_priv(dev);
613 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
614 struct emac_bd *first_bd;
Ashok Reddy Soma0f8defd2020-02-23 08:01:29 -0700615 dma_addr_t addr;
Michal Simek9d9211a2015-12-09 14:26:48 +0100616
617 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
618 priv->rx_first_buf = priv->rxbd_current;
619 } else {
620 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
621 current_bd->status = 0xF0000000; /* FIXME */
622 }
623
624 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
625 first_bd = &priv->rx_bd[priv->rx_first_buf];
626 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
627 first_bd->status = 0xF0000000;
628 }
629
Ashok Reddy Soma0f8defd2020-02-23 08:01:29 -0700630 /* Flush the cache for the packet as well */
631#if defined(CONFIG_PHYS_64BIT)
632 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
633 | ((dma_addr_t)current_bd->addr_hi << 32));
634#else
635 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
636#endif
637 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
638 ARCH_DMA_MINALIGN));
639 barrier();
640
Michal Simek9d9211a2015-12-09 14:26:48 +0100641 if ((++priv->rxbd_current) >= RX_BUF)
642 priv->rxbd_current = 0;
643
Michal Simekda872d72015-12-09 14:16:32 +0100644 return 0;
Michal Simek185f7d92012-09-13 20:23:34 +0000645}
646
Michal Simek6889ca72015-11-30 14:14:56 +0100647static void zynq_gem_halt(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000648{
Michal Simek6889ca72015-11-30 14:14:56 +0100649 struct zynq_gem_priv *priv = dev_get_priv(dev);
650 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek185f7d92012-09-13 20:23:34 +0000651
Michal Simek80243522012-10-15 14:01:23 +0200652 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
653 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
Michal Simek185f7d92012-09-13 20:23:34 +0000654}
655
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600656__weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
657{
658 return -ENOSYS;
659}
660
661static int zynq_gem_read_rom_mac(struct udevice *dev)
662{
Simon Glassc69cda22020-12-03 16:55:20 -0700663 struct eth_pdata *pdata = dev_get_plat(dev);
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600664
Olliver Schinaglb2330892017-04-03 16:18:53 +0200665 if (!pdata)
666 return -ENOSYS;
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600667
Olliver Schinaglb2330892017-04-03 16:18:53 +0200668 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600669}
670
Michal Simek6889ca72015-11-30 14:14:56 +0100671static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
672 int devad, int reg)
Michal Simek185f7d92012-09-13 20:23:34 +0000673{
Michal Simek6889ca72015-11-30 14:14:56 +0100674 struct zynq_gem_priv *priv = bus->priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000675 int ret;
Michal Simekd1b226b2018-06-14 09:08:44 +0200676 u16 val = 0;
Michal Simek185f7d92012-09-13 20:23:34 +0000677
Michal Simek6889ca72015-11-30 14:14:56 +0100678 ret = phyread(priv, addr, reg, &val);
679 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
680 return val;
Michal Simek185f7d92012-09-13 20:23:34 +0000681}
682
Michal Simek6889ca72015-11-30 14:14:56 +0100683static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
684 int reg, u16 value)
Michal Simek185f7d92012-09-13 20:23:34 +0000685{
Michal Simek6889ca72015-11-30 14:14:56 +0100686 struct zynq_gem_priv *priv = bus->priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000687
Michal Simek6889ca72015-11-30 14:14:56 +0100688 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
689 return phywrite(priv, addr, reg, value);
Michal Simek185f7d92012-09-13 20:23:34 +0000690}
691
Michal Simekb5ffc9f2021-12-06 16:25:20 +0100692static int zynq_gem_reset_init(struct udevice *dev)
693{
694 struct zynq_gem_priv *priv = dev_get_priv(dev);
695 int ret;
696
697 ret = reset_get_bulk(dev, &priv->resets);
698 if (ret == -ENOTSUPP || ret == -ENOENT)
699 return 0;
700 else if (ret)
701 return ret;
702
703 ret = reset_deassert_bulk(&priv->resets);
704 if (ret) {
705 reset_release_bulk(&priv->resets);
706 return ret;
707 }
708
709 return 0;
710}
711
Michal Simek6889ca72015-11-30 14:14:56 +0100712static int zynq_gem_probe(struct udevice *dev)
Michal Simek185f7d92012-09-13 20:23:34 +0000713{
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530714 void *bd_space;
Michal Simek6889ca72015-11-30 14:14:56 +0100715 struct zynq_gem_priv *priv = dev_get_priv(dev);
716 int ret;
Michal Simek10c50b12021-12-15 11:00:01 +0100717 struct phy phy;
718
719 if (priv->interface == PHY_INTERFACE_MODE_SGMII) {
720 ret = generic_phy_get_by_index(dev, 0, &phy);
721 if (!ret) {
722 ret = generic_phy_init(&phy);
723 if (ret)
724 return ret;
725 } else if (ret != -ENOENT) {
726 debug("could not get phy (err %d)\n", ret);
727 return ret;
728 }
729 }
Michal Simek185f7d92012-09-13 20:23:34 +0000730
Michal Simekb5ffc9f2021-12-06 16:25:20 +0100731 ret = zynq_gem_reset_init(dev);
732 if (ret)
733 return ret;
734
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530735 /* Align rxbuffers to ARCH_DMA_MINALIGN */
736 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
Michal Simek5b2c9a62018-06-13 15:20:35 +0200737 if (!priv->rxbuffers)
738 return -ENOMEM;
739
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530740 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
T Karthik Reddyb6779272020-01-15 02:15:13 -0700741 ulong addr = (ulong)priv->rxbuffers;
Stefan Theil10598582018-12-17 09:12:30 +0100742 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
743 barrier();
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530744
Siva Durga Prasad Paladugu96f4f142014-12-06 12:57:53 +0530745 /* Align bd_space to MMU_SECTION_SHIFT */
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530746 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Michal Simek58ecd9a2020-02-06 14:36:46 +0100747 if (!bd_space) {
748 ret = -ENOMEM;
749 goto err1;
750 }
Michal Simek5b2c9a62018-06-13 15:20:35 +0200751
Michal Simek9ce1edc2015-04-15 13:31:28 +0200752 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
753 BD_SPACE, DCACHE_OFF);
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530754
755 /* Initialize the bd spaces for tx and rx bd's */
756 priv->tx_bd = (struct emac_bd *)bd_space;
Prabhakar Kushwaha5b47d402015-10-25 13:18:54 +0530757 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
Srikanth Thokalaa5144232013-11-08 22:55:48 +0530758
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700759 ret = clk_get_by_name(dev, "tx_clk", &priv->tx_clk);
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530760 if (ret < 0) {
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700761 dev_err(dev, "failed to get tx_clock\n");
Michal Simeka13a8212021-02-11 19:03:30 +0100762 goto err2;
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530763 }
Siva Durga Prasad Paladugua765bdd2016-11-15 16:15:42 +0530764
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700765 if (priv->clk_en_info & RXCLK_EN) {
766 ret = clk_get_by_name(dev, "rx_clk", &priv->rx_clk);
767 if (ret < 0) {
768 dev_err(dev, "failed to get rx_clock\n");
Michal Simeka13a8212021-02-11 19:03:30 +0100769 goto err2;
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700770 }
771 }
772
Michal Simek6889ca72015-11-30 14:14:56 +0100773 priv->bus = mdio_alloc();
774 priv->bus->read = zynq_gem_miiphy_read;
775 priv->bus->write = zynq_gem_miiphy_write;
776 priv->bus->priv = priv;
Michal Simek185f7d92012-09-13 20:23:34 +0000777
Simon Glass8b85dfc2020-12-16 21:20:07 -0700778 ret = mdio_register_seq(priv->bus, dev_seq(dev));
Michal Simekc8e29272015-11-30 13:58:36 +0100779 if (ret)
Michal Simek58ecd9a2020-02-06 14:36:46 +0100780 goto err2;
Michal Simekc8e29272015-11-30 13:58:36 +0100781
Michal Simek58ecd9a2020-02-06 14:36:46 +0100782 ret = zynq_phy_init(dev);
783 if (ret)
Michael Walle038e0242021-02-10 22:41:57 +0100784 goto err3;
Michal Simek58ecd9a2020-02-06 14:36:46 +0100785
Michal Simek10c50b12021-12-15 11:00:01 +0100786 if (priv->interface == PHY_INTERFACE_MODE_SGMII && phy.dev) {
787 ret = generic_phy_power_on(&phy);
788 if (ret)
789 return ret;
790 }
791
Michal Simek58ecd9a2020-02-06 14:36:46 +0100792 return ret;
793
Michael Walle038e0242021-02-10 22:41:57 +0100794err3:
795 mdio_unregister(priv->bus);
Michal Simek58ecd9a2020-02-06 14:36:46 +0100796err2:
Michal Simek58ecd9a2020-02-06 14:36:46 +0100797 free(priv->tx_bd);
Michal Simeka13a8212021-02-11 19:03:30 +0100798err1:
799 free(priv->rxbuffers);
Michal Simek58ecd9a2020-02-06 14:36:46 +0100800 return ret;
Michal Simek185f7d92012-09-13 20:23:34 +0000801}
Michal Simek6889ca72015-11-30 14:14:56 +0100802
803static int zynq_gem_remove(struct udevice *dev)
804{
805 struct zynq_gem_priv *priv = dev_get_priv(dev);
806
807 free(priv->phydev);
808 mdio_unregister(priv->bus);
809 mdio_free(priv->bus);
810
811 return 0;
812}
813
814static const struct eth_ops zynq_gem_ops = {
815 .start = zynq_gem_init,
816 .send = zynq_gem_send,
817 .recv = zynq_gem_recv,
Michal Simek9d9211a2015-12-09 14:26:48 +0100818 .free_pkt = zynq_gem_free_pkt,
Michal Simek6889ca72015-11-30 14:14:56 +0100819 .stop = zynq_gem_halt,
820 .write_hwaddr = zynq_gem_setup_mac,
Joe Hershbergera509a1d2016-01-26 11:57:03 -0600821 .read_rom_hwaddr = zynq_gem_read_rom_mac,
Michal Simek6889ca72015-11-30 14:14:56 +0100822};
823
Simon Glassd1998a92020-12-03 16:55:21 -0700824static int zynq_gem_of_to_plat(struct udevice *dev)
Michal Simek6889ca72015-11-30 14:14:56 +0100825{
Simon Glassc69cda22020-12-03 16:55:20 -0700826 struct eth_pdata *pdata = dev_get_plat(dev);
Michal Simek6889ca72015-11-30 14:14:56 +0100827 struct zynq_gem_priv *priv = dev_get_priv(dev);
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530828 struct ofnode_phandle_args phandle_args;
Michal Simek3cdb1452015-11-30 14:17:50 +0100829 const char *phy_mode;
Michal Simek6889ca72015-11-30 14:14:56 +0100830
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530831 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
Michal Simek6889ca72015-11-30 14:14:56 +0100832 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
Michal Simek25de8a82016-05-30 10:43:11 +0200833 priv->mdiobase = priv->iobase;
Michal Simek6889ca72015-11-30 14:14:56 +0100834 /* Hardcode for now */
Michal Simekbcdfef72015-12-09 09:29:12 +0100835 priv->phyaddr = -1;
Michal Simek6889ca72015-11-30 14:14:56 +0100836
Michal Simek3888c8d2018-09-20 09:42:27 +0200837 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
838 &phandle_args)) {
Michal Simek8c40e072016-05-30 10:43:11 +0200839 fdt_addr_t addr;
840 ofnode parent;
841
Michal Simek3888c8d2018-09-20 09:42:27 +0200842 debug("phy-handle does exist %s\n", dev->name);
843 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
844 "reg", -1);
845 priv->phy_of_node = phandle_args.node;
846 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
847 "max-speed",
848 SPEED_1000);
Michal Simek8c40e072016-05-30 10:43:11 +0200849
850 parent = ofnode_get_parent(phandle_args.node);
Michal Simek12133b12021-12-06 14:53:17 +0100851 if (ofnode_name_eq(parent, "mdio"))
852 parent = ofnode_get_parent(parent);
853
Michal Simek8c40e072016-05-30 10:43:11 +0200854 addr = ofnode_get_addr(parent);
855 if (addr != FDT_ADDR_T_NONE) {
856 debug("MDIO bus not found %s\n", dev->name);
857 priv->mdiobase = (struct zynq_gem_regs *)addr;
858 }
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530859 }
Michal Simek6889ca72015-11-30 14:14:56 +0100860
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530861 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
Michal Simek3cdb1452015-11-30 14:17:50 +0100862 if (phy_mode)
863 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
864 if (pdata->phy_interface == -1) {
865 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
866 return -EINVAL;
867 }
868 priv->interface = pdata->phy_interface;
869
Siva Durga Prasad Paladugu26026e62018-07-16 18:25:45 +0530870 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
Siva Durga Prasad Paladugudd12a272017-11-23 12:56:55 +0530871
Michal Simek25de8a82016-05-30 10:43:11 +0200872 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
873 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phyaddr,
874 phy_string_for_interface(priv->interface));
Michal Simek6889ca72015-11-30 14:14:56 +0100875
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700876 priv->clk_en_info = dev_get_driver_data(dev);
877
Michal Simek6889ca72015-11-30 14:14:56 +0100878 return 0;
879}
880
881static const struct udevice_id zynq_gem_ids[] = {
T Karthik Reddyea4d4cb2021-02-03 03:10:48 -0700882 { .compatible = "cdns,versal-gem", .data = RXCLK_EN },
Michal Simek6889ca72015-11-30 14:14:56 +0100883 { .compatible = "cdns,zynqmp-gem" },
884 { .compatible = "cdns,zynq-gem" },
885 { .compatible = "cdns,gem" },
886 { }
887};
888
889U_BOOT_DRIVER(zynq_gem) = {
890 .name = "zynq_gem",
891 .id = UCLASS_ETH,
892 .of_match = zynq_gem_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700893 .of_to_plat = zynq_gem_of_to_plat,
Michal Simek6889ca72015-11-30 14:14:56 +0100894 .probe = zynq_gem_probe,
895 .remove = zynq_gem_remove,
896 .ops = &zynq_gem_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700897 .priv_auto = sizeof(struct zynq_gem_priv),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700898 .plat_auto = sizeof(struct eth_pdata),
Michal Simek6889ca72015-11-30 14:14:56 +0100899};