blob: 7b1f59dc498961f6ff018f30041288ca1e0621aa [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +09002/*
Robert P. J. Day1cc0a9f2016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu3bb4cc32011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsuf7ca1f72014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsuf7ca1f72014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass7b51b572019-08-01 09:46:52 -060014#include <env.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060015#include <log.h>
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090016#include <malloc.h>
17#include <net.h>
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +090018#include <netdev.h>
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +090019#include <miiphy.h>
Simon Glass90526e92020-05-10 11:39:56 -060020#include <asm/cache.h>
Simon Glassc05ed002020-05-10 11:40:11 -060021#include <linux/delay.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090022#include <linux/errno.h>
Simon Glass401d1c42020-10-30 21:38:53 -060023#include <asm/global_data.h>
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090024#include <asm/io.h>
25
Marek Vasut31920262018-01-19 18:57:17 +010026#include <clk.h>
27#include <dm.h>
28#include <linux/mii.h>
29#include <asm/gpio.h>
Marek Vasut31920262018-01-19 18:57:17 +010030
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090031#include "sh_eth.h"
32
Tom Rini97148cb2022-12-04 10:13:52 -050033#ifndef CFG_SH_ETHER_USE_PORT
34# error "Please define CFG_SH_ETHER_USE_PORT"
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090035#endif
Tom Rini7c480ba2022-12-04 10:13:50 -050036#ifndef CFG_SH_ETHER_PHY_ADDR
37# error "Please define CFG_SH_ETHER_PHY_ADDR"
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090038#endif
Nobuhiro Iwamatsu870cc232013-08-22 13:22:01 +090039
Tom Riniff53ecc2022-12-04 10:13:49 -050040#if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
Trevor Woerner10015022019-05-03 09:41:00 -040041 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +090042#define flush_cache_wback(addr, len) \
Marek Vasut7234a282019-07-31 14:48:17 +020043 flush_dcache_range((unsigned long)addr, \
Tom Rini24513c32022-12-04 10:13:47 -050044 (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda68260aa2011-01-27 10:06:08 +090045#else
46#define flush_cache_wback(...)
47#endif
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090048
Tom Rinic253cea2022-12-04 10:13:48 -050049#if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +090050#define invalidate_cache(addr, len) \
51 { \
Tom Rini24513c32022-12-04 10:13:47 -050052 unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
Marek Vasut7234a282019-07-31 14:48:17 +020053 unsigned long start, end; \
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +090054 \
Marek Vasut7234a282019-07-31 14:48:17 +020055 start = (unsigned long)addr; \
56 end = start + len; \
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +090057 start &= ~(line_size - 1); \
58 end = ((end + line_size - 1) & ~(line_size - 1)); \
59 \
60 invalidate_dcache_range(start, end); \
61 }
62#else
63#define invalidate_cache(...)
64#endif
65
Nobuhiro Iwamatsu4ba62c72012-01-11 10:23:51 +090066#define TIMEOUT_CNT 1000
67
Marek Vasutdca221b2018-01-21 14:27:51 +010068static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090069{
Marek Vasut3c5a7b72018-02-17 00:46:26 +010070 int ret = 0, timeout;
71 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090072
73 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +090074 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
75 ret = -EINVAL;
76 goto err;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090077 }
78
79 /* packet must be a 4 byte boundary */
Marek Vasut7234a282019-07-31 14:48:17 +020080 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +090081 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsue2752db2014-01-23 07:52:19 +090082 , __func__);
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +090083 ret = -EFAULT;
84 goto err;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090085 }
86
87 /* Update tx descriptor */
Yoshihiro Shimoda68260aa2011-01-27 10:06:08 +090088 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090089 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
90 port_info->tx_desc_cur->td1 = len << 16;
91 /* Must preserve the end of descriptor list indication */
92 if (port_info->tx_desc_cur->td0 & TD_TDLE)
93 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
94 else
95 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
96
Nobuhiro Iwamatsuf7ca1f72014-11-04 09:15:48 +090097 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
98
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +090099 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900100 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
101 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900102
103 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu4ba62c72012-01-11 10:23:51 +0900104 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +0900105 do {
106 invalidate_cache(port_info->tx_desc_cur,
107 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900108 udelay(100);
Nobuhiro Iwamatsu92f07132013-08-22 13:22:03 +0900109 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900110
111 if (timeout < 0) {
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900112 printf(SHETHER_NAME ": transmit timeout\n");
113 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900114 goto err;
115 }
116
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900117 port_info->tx_desc_cur++;
118 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
119 port_info->tx_desc_cur = port_info->tx_desc_base;
120
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900121err:
122 return ret;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900123}
124
Marek Vasut52c15e22018-01-21 15:39:50 +0100125static int sh_eth_recv_start(struct sh_eth_dev *eth)
126{
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100127 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut52c15e22018-01-21 15:39:50 +0100128
129 /* Check if the rx descriptor is ready */
130 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
131 if (port_info->rx_desc_cur->rd0 & RD_RACT)
Valentine Barshak41a0cfd2023-05-31 00:51:31 +0200132 return -EAGAIN;
Marek Vasut52c15e22018-01-21 15:39:50 +0100133
134 /* Check for errors */
135 if (port_info->rx_desc_cur->rd0 & RD_RFE)
Valentine Barshak41a0cfd2023-05-31 00:51:31 +0200136 return 0;
Marek Vasut52c15e22018-01-21 15:39:50 +0100137
Marek Vasut60279b52018-02-17 00:47:38 +0100138 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut52c15e22018-01-21 15:39:50 +0100139}
140
141static void sh_eth_recv_finish(struct sh_eth_dev *eth)
142{
143 struct sh_eth_info *port_info = &eth->port_info[eth->port];
144
Valentine Barshakd49ba9c2023-05-31 00:51:30 +0200145 invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE);
146
Marek Vasut52c15e22018-01-21 15:39:50 +0100147 /* Make current descriptor available again */
148 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
149 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
150 else
151 port_info->rx_desc_cur->rd0 = RD_RACT;
152
153 flush_cache_wback(port_info->rx_desc_cur,
154 sizeof(struct rx_desc_s));
155
156 /* Point to the next descriptor */
157 port_info->rx_desc_cur++;
158 if (port_info->rx_desc_cur >=
159 port_info->rx_desc_base + NUM_RX_DESC)
160 port_info->rx_desc_cur = port_info->rx_desc_base;
161}
162
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900163static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900164{
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900165 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu62cbddc2014-01-23 07:52:18 +0900166#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900167 int ret = 0, i;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900168
169 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900170 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900171
172 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900173 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsue2752db2014-01-23 07:52:19 +0900174 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900175 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900176 break;
177 udelay(1000);
178 }
179
Nobuhiro Iwamatsu4ba62c72012-01-11 10:23:51 +0900180 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900181 printf(SHETHER_NAME ": Software reset timeout\n");
182 ret = -EIO;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900183 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900184
185 return ret;
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900186#else
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900187 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut52627672018-02-17 00:57:49 +0100188 mdelay(3);
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900189 sh_eth_write(port_info,
190 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900191
192 return 0;
193#endif
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900194}
195
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900196static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900197{
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100198 int i, ret = 0;
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900199 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100200 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900201 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900202
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900203 /*
Nobuhiro Iwamatsu703949e2014-11-04 09:15:46 +0900204 * Allocate rx descriptors. They must be aligned to size of struct
205 * tx_desc_s.
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900206 */
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900207 port_info->tx_desc_alloc =
208 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
209 if (!port_info->tx_desc_alloc) {
210 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900211 ret = -ENOMEM;
212 goto err;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900213 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900214
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900215 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900216 port_info->tx_desc_base =
Marek Vasut7234a282019-07-31 14:48:17 +0200217 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900218 port_info->tx_desc_cur = port_info->tx_desc_base;
219
220 /* Initialize all descriptors */
221 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
222 cur_tx_desc++, i++) {
223 cur_tx_desc->td0 = 0x00;
224 cur_tx_desc->td1 = 0x00;
225 cur_tx_desc->td2 = 0x00;
226 }
227
228 /* Mark the end of the descriptors */
229 cur_tx_desc--;
230 cur_tx_desc->td0 |= TD_TDLE;
231
Valentine Barshakd49ba9c2023-05-31 00:51:30 +0200232 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +0900233 /*
234 * Point the controller to the tx descriptor list. Must use physical
235 * addresses
236 */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900237 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu62cbddc2014-01-23 07:52:18 +0900238#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900239 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
240 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
241 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900242#endif
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900243
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900244err:
245 return ret;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900246}
247
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900248static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900249{
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100250 int i, ret = 0;
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900251 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100252 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900253 struct rx_desc_s *cur_rx_desc;
254 u8 *rx_buf;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900255
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900256 /*
Nobuhiro Iwamatsu703949e2014-11-04 09:15:46 +0900257 * Allocate rx descriptors. They must be aligned to size of struct
258 * rx_desc_s.
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900259 */
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900260 port_info->rx_desc_alloc =
261 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
262 if (!port_info->rx_desc_alloc) {
263 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900264 ret = -ENOMEM;
265 goto err;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900266 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900267
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900268 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900269 port_info->rx_desc_base =
Marek Vasut7234a282019-07-31 14:48:17 +0200270 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900271
272 port_info->rx_desc_cur = port_info->rx_desc_base;
273
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900274 /*
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900275 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
276 * aligned and in P2 area.
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900277 */
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900278 port_info->rx_buf_alloc =
279 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
280 if (!port_info->rx_buf_alloc) {
281 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900282 ret = -ENOMEM;
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900283 goto err_buf_alloc;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900284 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900285
Marek Vasut7234a282019-07-31 14:48:17 +0200286 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900287
288 /* Initialize all descriptors */
289 for (cur_rx_desc = port_info->rx_desc_base,
290 rx_buf = port_info->rx_buf_base, i = 0;
291 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
292 cur_rx_desc->rd0 = RD_RACT;
293 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +0900294 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900295 }
296
297 /* Mark the end of the descriptors */
298 cur_rx_desc--;
299 cur_rx_desc->rd0 |= RD_RDLE;
300
Valentine Barshakd49ba9c2023-05-31 00:51:30 +0200301 invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE);
302 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
303
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900304 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900305 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu62cbddc2014-01-23 07:52:18 +0900306#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900307 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
308 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
309 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900310#endif
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900311
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900312 return ret;
313
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900314err_buf_alloc:
315 free(port_info->rx_desc_alloc);
316 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900317
318err:
319 return ret;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900320}
321
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900322static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900323{
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100324 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900325
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900326 if (port_info->tx_desc_alloc) {
327 free(port_info->tx_desc_alloc);
328 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900329 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900330}
331
332static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
333{
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100334 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900335
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900336 if (port_info->rx_desc_alloc) {
337 free(port_info->rx_desc_alloc);
338 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900339 }
340
Nobuhiro Iwamatsu000889c2014-11-04 09:15:47 +0900341 if (port_info->rx_buf_alloc) {
342 free(port_info->rx_buf_alloc);
343 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900344 }
345}
346
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900347static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900348{
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900349 int ret = 0;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900350
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900351 ret = sh_eth_tx_desc_init(eth);
352 if (ret)
353 goto err_tx_init;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900354
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900355 ret = sh_eth_rx_desc_init(eth);
356 if (ret)
357 goto err_rx_init;
358
359 return ret;
360err_rx_init:
361 sh_eth_tx_desc_free(eth);
362
363err_tx_init:
364 return ret;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900365}
366
Marek Vasut68ac92e2018-01-21 14:55:44 +0100367static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
368 unsigned char *mac)
369{
370 u32 val;
371
372 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
373 sh_eth_write(port_info, val, MAHR);
374
375 val = (mac[4] << 8) | mac[5];
376 sh_eth_write(port_info, val, MALR);
377}
378
Marek Vasut013af642018-01-21 15:10:21 +0100379static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900380{
Marek Vasut013af642018-01-21 15:10:21 +0100381 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut46c33162019-07-31 12:58:06 +0200382 unsigned long edmr;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900383
384 /* Configure e-dmac registers */
Marek Vasut46c33162019-07-31 12:58:06 +0200385 edmr = sh_eth_read(port_info, EDMR);
386 edmr &= ~EMDR_DESC_R;
387 edmr |= EMDR_DESC | EDMR_EL;
388#if defined(CONFIG_R8A77980)
389 edmr |= EDMR_NBST;
390#endif
391 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsuf8b75072013-08-22 13:22:02 +0900392
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900393 sh_eth_write(port_info, 0, EESIPR);
394 sh_eth_write(port_info, 0, TRSCER);
395 sh_eth_write(port_info, 0, TFTR);
396 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
397 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu62cbddc2014-01-23 07:52:18 +0900398#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900399 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900400#endif
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900401 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900402
403 /* Configure e-mac registers */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900404 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900405
406 /* Set Mac address */
Marek Vasut013af642018-01-21 15:10:21 +0100407 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900408
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900409 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda26235092012-06-26 16:38:06 +0000410#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900411 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu62cbddc2014-01-23 07:52:18 +0900412#endif
413#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900414 sh_eth_write(port_info, APR_AP, APR);
415 sh_eth_write(port_info, MPR_MP, MPR);
416 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Nobuhiro Iwamatsu3bb4cc32011-11-14 16:56:59 +0900417#endif
418
Nobuhiro Iwamatsudcd5a592012-08-02 22:08:40 +0000419#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900420 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut46c33162019-07-31 12:58:06 +0200421#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900422 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu4398d552012-05-15 15:49:39 +0000423#endif
Marek Vasut013af642018-01-21 15:10:21 +0100424}
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900425
Marek Vasut013af642018-01-21 15:10:21 +0100426static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
427{
428 struct sh_eth_info *port_info = &eth->port_info[eth->port];
429 struct phy_device *phy = port_info->phydev;
430 int ret = 0;
431 u32 val = 0;
Nobuhiro Iwamatsu3bb4cc32011-11-14 16:56:59 +0900432
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900433 /* Set the transfer speed */
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900434 if (phy->speed == 100) {
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900435 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda26235092012-06-26 16:38:06 +0000436#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900437 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodae3bb3252012-11-04 15:54:30 +0000438#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900439 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut46c33162019-07-31 12:58:06 +0200440#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu3bb4cc32011-11-14 16:56:59 +0900441 val = ECMR_RTM;
442#endif
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900443 } else if (phy->speed == 10) {
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900444 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda26235092012-06-26 16:38:06 +0000445#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900446 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodae3bb3252012-11-04 15:54:30 +0000447#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900448 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda903de462011-01-18 17:53:45 +0900449#endif
Nobuhiro Iwamatsu3bb4cc32011-11-14 16:56:59 +0900450 }
Yoshihiro Shimoda26235092012-06-26 16:38:06 +0000451#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu4398d552012-05-15 15:49:39 +0000452 else if (phy->speed == 1000) {
453 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900454 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu4398d552012-05-15 15:49:39 +0000455 }
456#endif
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900457
458 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900459 if (phy->duplex) {
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900460 printf("Full\n");
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900461 sh_eth_write(port_info,
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +0900462 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda49afb8c2012-06-26 16:38:09 +0000463 ECMR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900464 } else {
465 printf("Half\n");
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900466 sh_eth_write(port_info,
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +0900467 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
468 ECMR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900469 }
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900470
471 return ret;
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900472}
473
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900474static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900475{
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900476 struct sh_eth_info *port_info = &eth->port_info[eth->port];
477
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900478 /*
479 * Enable the e-dmac receiver only. The transmitter will be enabled when
480 * we have something to transmit
481 */
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900482 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900483}
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900484
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900485static void sh_eth_stop(struct sh_eth_dev *eth)
486{
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900487 struct sh_eth_info *port_info = &eth->port_info[eth->port];
488
489 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900490}
491
Marek Vasut013af642018-01-21 15:10:21 +0100492static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu9751ee02008-06-11 21:05:00 +0900493{
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900494 int ret = 0;
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900495
496 ret = sh_eth_reset(eth);
497 if (ret)
Marek Vasut013af642018-01-21 15:10:21 +0100498 return ret;
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900499
500 ret = sh_eth_desc_init(eth);
501 if (ret)
Marek Vasut013af642018-01-21 15:10:21 +0100502 return ret;
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900503
Marek Vasut013af642018-01-21 15:10:21 +0100504 sh_eth_mac_regs_config(eth, mac);
505
506 return 0;
507}
508
509static int sh_eth_start_common(struct sh_eth_dev *eth)
510{
511 struct sh_eth_info *port_info = &eth->port_info[eth->port];
512 int ret;
513
514 ret = phy_startup(port_info->phydev);
515 if (ret) {
516 printf(SHETHER_NAME ": phy startup failure\n");
517 return ret;
518 }
519
520 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900521 if (ret)
Marek Vasut013af642018-01-21 15:10:21 +0100522 return ret;
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900523
524 sh_eth_start(eth);
525
Marek Vasut013af642018-01-21 15:10:21 +0100526 return 0;
527}
Nobuhiro Iwamatsubd3980c2008-11-21 12:04:18 +0900528
Marek Vasut31920262018-01-19 18:57:17 +0100529struct sh_ether_priv {
530 struct sh_eth_dev shdev;
531
532 struct mii_dev *bus;
Marek Vasut5abcbd72018-02-17 00:57:49 +0100533 phys_addr_t iobase;
Marek Vasut31920262018-01-19 18:57:17 +0100534 struct clk clk;
Marek Vasut31920262018-01-19 18:57:17 +0100535};
536
537static int sh_ether_send(struct udevice *dev, void *packet, int len)
538{
539 struct sh_ether_priv *priv = dev_get_priv(dev);
540 struct sh_eth_dev *eth = &priv->shdev;
541
542 return sh_eth_send_common(eth, packet, len);
543}
544
545static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
546{
547 struct sh_ether_priv *priv = dev_get_priv(dev);
548 struct sh_eth_dev *eth = &priv->shdev;
549 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7234a282019-07-31 14:48:17 +0200550 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut31920262018-01-19 18:57:17 +0100551 int len;
552
553 len = sh_eth_recv_start(eth);
554 if (len > 0) {
555 invalidate_cache(packet, len);
556 *packetp = packet;
557
558 return len;
Marek Vasut31920262018-01-19 18:57:17 +0100559 }
Valentine Barshak41a0cfd2023-05-31 00:51:31 +0200560
561 /* Restart the receiver if disabled */
562 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
563 sh_eth_write(port_info, EDRRR_R, EDRRR);
564
565 return len;
Marek Vasut31920262018-01-19 18:57:17 +0100566}
567
568static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
569{
570 struct sh_ether_priv *priv = dev_get_priv(dev);
571 struct sh_eth_dev *eth = &priv->shdev;
572 struct sh_eth_info *port_info = &eth->port_info[eth->port];
573
574 sh_eth_recv_finish(eth);
575
576 /* Restart the receiver if disabled */
577 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
578 sh_eth_write(port_info, EDRRR_R, EDRRR);
579
580 return 0;
581}
582
583static int sh_ether_write_hwaddr(struct udevice *dev)
584{
585 struct sh_ether_priv *priv = dev_get_priv(dev);
586 struct sh_eth_dev *eth = &priv->shdev;
587 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Simon Glassc69cda22020-12-03 16:55:20 -0700588 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut31920262018-01-19 18:57:17 +0100589
590 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
591
592 return 0;
593}
594
595static int sh_eth_phy_config(struct udevice *dev)
596{
597 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700598 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut31920262018-01-19 18:57:17 +0100599 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut3c5a7b72018-02-17 00:46:26 +0100600 int ret = 0;
601 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31920262018-01-19 18:57:17 +0100602 struct phy_device *phydev;
Marek Vasut31920262018-01-19 18:57:17 +0100603
Marek Vasutb9b04f82023-05-31 00:51:23 +0200604 phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface);
Marek Vasut31920262018-01-19 18:57:17 +0100605 if (!phydev)
606 return -ENODEV;
607
Marek Vasut31920262018-01-19 18:57:17 +0100608 port_info->phydev = phydev;
609 phy_config(phydev);
610
611 return ret;
612}
613
614static int sh_ether_start(struct udevice *dev)
615{
616 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassc69cda22020-12-03 16:55:20 -0700617 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut31920262018-01-19 18:57:17 +0100618 struct sh_eth_dev *eth = &priv->shdev;
619 int ret;
620
Marek Vasut31920262018-01-19 18:57:17 +0100621 ret = sh_eth_init_common(eth, pdata->enetaddr);
622 if (ret)
Marek Vasut4a45e932019-03-30 07:22:09 +0100623 return ret;
Marek Vasut31920262018-01-19 18:57:17 +0100624
625 ret = sh_eth_start_common(eth);
626 if (ret)
627 goto err_start;
628
629 return 0;
630
631err_start:
632 sh_eth_tx_desc_free(eth);
633 sh_eth_rx_desc_free(eth);
Marek Vasut31920262018-01-19 18:57:17 +0100634 return ret;
635}
636
637static void sh_ether_stop(struct udevice *dev)
638{
639 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasut4a45e932019-03-30 07:22:09 +0100640 struct sh_eth_dev *eth = &priv->shdev;
641 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31920262018-01-19 18:57:17 +0100642
Marek Vasut4a45e932019-03-30 07:22:09 +0100643 phy_shutdown(port_info->phydev);
Marek Vasut31920262018-01-19 18:57:17 +0100644 sh_eth_stop(&priv->shdev);
Marek Vasut31920262018-01-19 18:57:17 +0100645}
646
647static int sh_ether_probe(struct udevice *udev)
648{
Simon Glassc69cda22020-12-03 16:55:20 -0700649 struct eth_pdata *pdata = dev_get_plat(udev);
Marek Vasut31920262018-01-19 18:57:17 +0100650 struct sh_ether_priv *priv = dev_get_priv(udev);
651 struct sh_eth_dev *eth = &priv->shdev;
652 struct mii_dev *mdiodev;
Marek Vasut31920262018-01-19 18:57:17 +0100653 int ret;
654
Marek Vasut5abcbd72018-02-17 00:57:49 +0100655 priv->iobase = pdata->iobase;
Marek Vasut31920262018-01-19 18:57:17 +0100656
Marek Vasut24b32472019-05-02 00:03:26 +0200657#if CONFIG_IS_ENABLED(CLK)
Marek Vasut31920262018-01-19 18:57:17 +0100658 ret = clk_get_by_index(udev, 0, &priv->clk);
659 if (ret < 0)
Marek Vasut5abcbd72018-02-17 00:57:49 +0100660 return ret;
Marek Vasut24b32472019-05-02 00:03:26 +0200661#endif
Marek Vasut31920262018-01-19 18:57:17 +0100662 mdiodev = mdio_alloc();
663 if (!mdiodev) {
664 ret = -ENOMEM;
Marek Vasut5abcbd72018-02-17 00:57:49 +0100665 return ret;
Marek Vasut31920262018-01-19 18:57:17 +0100666 }
667
668 mdiodev->read = bb_miiphy_read;
669 mdiodev->write = bb_miiphy_write;
670 bb_miiphy_buses[0].priv = eth;
671 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
672
673 ret = mdio_register(mdiodev);
674 if (ret < 0)
675 goto err_mdio_register;
676
677 priv->bus = miiphy_get_dev_by_name(udev->name);
678
Tom Rini97148cb2022-12-04 10:13:52 -0500679 eth->port = CFG_SH_ETHER_USE_PORT;
Tom Rini7c480ba2022-12-04 10:13:50 -0500680 eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
Marek Vasut31920262018-01-19 18:57:17 +0100681 eth->port_info[eth->port].iobase =
Marek Vasut7234a282019-07-31 14:48:17 +0200682 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut31920262018-01-19 18:57:17 +0100683
Marek Vasut24b32472019-05-02 00:03:26 +0200684#if CONFIG_IS_ENABLED(CLK)
Marek Vasut4a45e932019-03-30 07:22:09 +0100685 ret = clk_enable(&priv->clk);
686 if (ret)
687 goto err_mdio_register;
Marek Vasut24b32472019-05-02 00:03:26 +0200688#endif
Marek Vasut4a45e932019-03-30 07:22:09 +0100689
Marek Vasutb13da112020-04-04 15:01:22 +0200690 ret = sh_eth_init_common(eth, pdata->enetaddr);
691 if (ret)
692 goto err_phy_config;
693
Marek Vasut4a45e932019-03-30 07:22:09 +0100694 ret = sh_eth_phy_config(udev);
695 if (ret) {
696 printf(SHETHER_NAME ": phy config timeout\n");
697 goto err_phy_config;
698 }
699
Marek Vasut31920262018-01-19 18:57:17 +0100700 return 0;
701
Marek Vasut4a45e932019-03-30 07:22:09 +0100702err_phy_config:
Marek Vasut24b32472019-05-02 00:03:26 +0200703#if CONFIG_IS_ENABLED(CLK)
Marek Vasut4a45e932019-03-30 07:22:09 +0100704 clk_disable(&priv->clk);
Marek Vasut24b32472019-05-02 00:03:26 +0200705#endif
Marek Vasut31920262018-01-19 18:57:17 +0100706err_mdio_register:
707 mdio_free(mdiodev);
Marek Vasut31920262018-01-19 18:57:17 +0100708 return ret;
709}
710
711static int sh_ether_remove(struct udevice *udev)
712{
713 struct sh_ether_priv *priv = dev_get_priv(udev);
714 struct sh_eth_dev *eth = &priv->shdev;
715 struct sh_eth_info *port_info = &eth->port_info[eth->port];
716
Marek Vasut24b32472019-05-02 00:03:26 +0200717#if CONFIG_IS_ENABLED(CLK)
Marek Vasut4a45e932019-03-30 07:22:09 +0100718 clk_disable(&priv->clk);
Marek Vasut24b32472019-05-02 00:03:26 +0200719#endif
Marek Vasut31920262018-01-19 18:57:17 +0100720 free(port_info->phydev);
721 mdio_unregister(priv->bus);
722 mdio_free(priv->bus);
723
Marek Vasut31920262018-01-19 18:57:17 +0100724 return 0;
725}
726
727static const struct eth_ops sh_ether_ops = {
728 .start = sh_ether_start,
729 .send = sh_ether_send,
730 .recv = sh_ether_recv,
731 .free_pkt = sh_ether_free_pkt,
732 .stop = sh_ether_stop,
733 .write_hwaddr = sh_ether_write_hwaddr,
734};
735
Simon Glassd1998a92020-12-03 16:55:21 -0700736int sh_ether_of_to_plat(struct udevice *dev)
Marek Vasut31920262018-01-19 18:57:17 +0100737{
Simon Glassc69cda22020-12-03 16:55:20 -0700738 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut31920262018-01-19 18:57:17 +0100739 const fdt32_t *cell;
Marek Vasut31920262018-01-19 18:57:17 +0100740
Masahiro Yamada25484932020-07-17 14:36:48 +0900741 pdata->iobase = dev_read_addr(dev);
Marek Behún123ca112022-04-07 00:33:01 +0200742
743 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behúnffb0f6f2022-04-07 00:33:03 +0200744 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut31920262018-01-19 18:57:17 +0100745 return -EINVAL;
Marek Vasut31920262018-01-19 18:57:17 +0100746
747 pdata->max_speed = 1000;
748 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
749 if (cell)
750 pdata->max_speed = fdt32_to_cpu(*cell);
751
752 sprintf(bb_miiphy_buses[0].name, dev->name);
753
Marek Behún123ca112022-04-07 00:33:01 +0200754 return 0;
Marek Vasut31920262018-01-19 18:57:17 +0100755}
756
757static const struct udevice_id sh_ether_ids[] = {
Marek Vasut24b32472019-05-02 00:03:26 +0200758 { .compatible = "renesas,ether-r7s72100" },
Marek Vasutd5268012018-04-12 15:23:46 +0200759 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut31920262018-01-19 18:57:17 +0100760 { .compatible = "renesas,ether-r8a7791" },
Marek Vasutd5268012018-04-12 15:23:46 +0200761 { .compatible = "renesas,ether-r8a7793" },
762 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut46c33162019-07-31 12:58:06 +0200763 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut31920262018-01-19 18:57:17 +0100764 { }
765};
766
767U_BOOT_DRIVER(eth_sh_ether) = {
768 .name = "sh_ether",
769 .id = UCLASS_ETH,
770 .of_match = sh_ether_ids,
Simon Glassd1998a92020-12-03 16:55:21 -0700771 .of_to_plat = sh_ether_of_to_plat,
Marek Vasut31920262018-01-19 18:57:17 +0100772 .probe = sh_ether_probe,
773 .remove = sh_ether_remove,
774 .ops = &sh_ether_ops,
Simon Glass41575d82020-12-03 16:55:17 -0700775 .priv_auto = sizeof(struct sh_ether_priv),
Simon Glasscaa4daa2020-12-03 16:55:18 -0700776 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut31920262018-01-19 18:57:17 +0100777 .flags = DM_FLAG_ALLOC_PRIV_DMA,
778};
Marek Vasut31920262018-01-19 18:57:17 +0100779
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900780/******* for bb_miiphy *******/
781static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
782{
783 return 0;
784}
785
786static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
787{
788 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900789 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900790
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900791 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900792
793 return 0;
794}
795
796static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
797{
798 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900799 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900800
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900801 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900802
803 return 0;
804}
805
806static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
807{
808 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900809 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900810
811 if (v)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900812 sh_eth_write(port_info,
813 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900814 else
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900815 sh_eth_write(port_info,
816 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900817
818 return 0;
819}
820
821static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
822{
823 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900824 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900825
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900826 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900827
828 return 0;
829}
830
831static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
832{
833 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900834 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900835
836 if (v)
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900837 sh_eth_write(port_info,
838 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900839 else
Nobuhiro Iwamatsufbfb5112017-12-01 08:10:32 +0900840 sh_eth_write(port_info,
841 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900842
843 return 0;
844}
845
846static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
847{
848 udelay(10);
849
850 return 0;
851}
852
853struct bb_miiphy_bus bb_miiphy_buses[] = {
854 {
855 .name = "sh_eth",
856 .init = sh_eth_bb_init,
857 .mdio_active = sh_eth_bb_mdio_active,
858 .mdio_tristate = sh_eth_bb_mdio_tristate,
859 .set_mdio = sh_eth_bb_set_mdio,
860 .get_mdio = sh_eth_bb_get_mdio,
861 .set_mdc = sh_eth_bb_set_mdc,
862 .delay = sh_eth_bb_delay,
863 }
864};
Nobuhiro Iwamatsudc148672017-12-01 08:08:00 +0900865
Yoshihiro Shimodabd1024b2011-10-11 18:10:14 +0900866int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);