Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 2 | /* |
Robert P. J. Day | 1cc0a9f | 2016-05-04 04:47:31 -0400 | [diff] [blame] | 3 | * sh_eth.c - Driver for Renesas ethernet controller. |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 4 | * |
Nobuhiro Iwamatsu | 3bb4cc3 | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 5 | * Copyright (C) 2008, 2011 Renesas Solutions Corp. |
Nobuhiro Iwamatsu | f7ca1f7 | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 6 | * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 7 | * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com> |
Nobuhiro Iwamatsu | f7ca1f7 | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 8 | * Copyright (C) 2013, 2014 Renesas Electronics Corporation |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <config.h> |
| 12 | #include <common.h> |
Simon Glass | 1eb69ae | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 13 | #include <cpu_func.h> |
Simon Glass | 7b51b57 | 2019-08-01 09:46:52 -0600 | [diff] [blame] | 14 | #include <env.h> |
Simon Glass | f7ae49f | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 15 | #include <log.h> |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 16 | #include <malloc.h> |
| 17 | #include <net.h> |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 18 | #include <netdev.h> |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 19 | #include <miiphy.h> |
Simon Glass | 90526e9 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 20 | #include <asm/cache.h> |
Simon Glass | c05ed00 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 21 | #include <linux/delay.h> |
Masahiro Yamada | 1221ce4 | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 22 | #include <linux/errno.h> |
Simon Glass | 401d1c4 | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 23 | #include <asm/global_data.h> |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 24 | #include <asm/io.h> |
| 25 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 26 | #ifdef CONFIG_DM_ETH |
| 27 | #include <clk.h> |
| 28 | #include <dm.h> |
| 29 | #include <linux/mii.h> |
| 30 | #include <asm/gpio.h> |
| 31 | #endif |
| 32 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 33 | #include "sh_eth.h" |
| 34 | |
| 35 | #ifndef CONFIG_SH_ETHER_USE_PORT |
| 36 | # error "Please define CONFIG_SH_ETHER_USE_PORT" |
| 37 | #endif |
| 38 | #ifndef CONFIG_SH_ETHER_PHY_ADDR |
| 39 | # error "Please define CONFIG_SH_ETHER_PHY_ADDR" |
| 40 | #endif |
Nobuhiro Iwamatsu | 870cc23 | 2013-08-22 13:22:01 +0900 | [diff] [blame] | 41 | |
Trevor Woerner | 1001502 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 42 | #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \ |
| 43 | !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 44 | #define flush_cache_wback(addr, len) \ |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 45 | flush_dcache_range((unsigned long)addr, \ |
| 46 | (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE))) |
Yoshihiro Shimoda | 68260aa | 2011-01-27 10:06:08 +0900 | [diff] [blame] | 47 | #else |
| 48 | #define flush_cache_wback(...) |
| 49 | #endif |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 50 | |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 51 | #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM) |
| 52 | #define invalidate_cache(addr, len) \ |
| 53 | { \ |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 54 | unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \ |
| 55 | unsigned long start, end; \ |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 56 | \ |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 57 | start = (unsigned long)addr; \ |
| 58 | end = start + len; \ |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 59 | start &= ~(line_size - 1); \ |
| 60 | end = ((end + line_size - 1) & ~(line_size - 1)); \ |
| 61 | \ |
| 62 | invalidate_dcache_range(start, end); \ |
| 63 | } |
| 64 | #else |
| 65 | #define invalidate_cache(...) |
| 66 | #endif |
| 67 | |
Nobuhiro Iwamatsu | 4ba62c7 | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 68 | #define TIMEOUT_CNT 1000 |
| 69 | |
Marek Vasut | dca221b | 2018-01-21 14:27:51 +0100 | [diff] [blame] | 70 | static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 71 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 72 | int ret = 0, timeout; |
| 73 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 74 | |
| 75 | if (!packet || len > 0xffff) { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 76 | printf(SHETHER_NAME ": %s: Invalid argument\n", __func__); |
| 77 | ret = -EINVAL; |
| 78 | goto err; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | /* packet must be a 4 byte boundary */ |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 82 | if ((uintptr_t)packet & 3) { |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 83 | printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n" |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 84 | , __func__); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 85 | ret = -EFAULT; |
| 86 | goto err; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* Update tx descriptor */ |
Yoshihiro Shimoda | 68260aa | 2011-01-27 10:06:08 +0900 | [diff] [blame] | 90 | flush_cache_wback(packet, len); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 91 | port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet); |
| 92 | port_info->tx_desc_cur->td1 = len << 16; |
| 93 | /* Must preserve the end of descriptor list indication */ |
| 94 | if (port_info->tx_desc_cur->td0 & TD_TDLE) |
| 95 | port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE; |
| 96 | else |
| 97 | port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP; |
| 98 | |
Nobuhiro Iwamatsu | f7ca1f7 | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 99 | flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s)); |
| 100 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 101 | /* Restart the transmitter if disabled */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 102 | if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS)) |
| 103 | sh_eth_write(port_info, EDTRR_TRNS, EDTRR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 104 | |
| 105 | /* Wait until packet is transmitted */ |
Nobuhiro Iwamatsu | 4ba62c7 | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 106 | timeout = TIMEOUT_CNT; |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 107 | do { |
| 108 | invalidate_cache(port_info->tx_desc_cur, |
| 109 | sizeof(struct tx_desc_s)); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 110 | udelay(100); |
Nobuhiro Iwamatsu | 92f0713 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 111 | } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 112 | |
| 113 | if (timeout < 0) { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 114 | printf(SHETHER_NAME ": transmit timeout\n"); |
| 115 | ret = -ETIMEDOUT; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 116 | goto err; |
| 117 | } |
| 118 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 119 | port_info->tx_desc_cur++; |
| 120 | if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC) |
| 121 | port_info->tx_desc_cur = port_info->tx_desc_base; |
| 122 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 123 | err: |
| 124 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 125 | } |
| 126 | |
Marek Vasut | 52c15e2 | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 127 | static int sh_eth_recv_start(struct sh_eth_dev *eth) |
| 128 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 129 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 52c15e2 | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 130 | |
| 131 | /* Check if the rx descriptor is ready */ |
| 132 | invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); |
| 133 | if (port_info->rx_desc_cur->rd0 & RD_RACT) |
| 134 | return -EINVAL; |
| 135 | |
| 136 | /* Check for errors */ |
| 137 | if (port_info->rx_desc_cur->rd0 & RD_RFE) |
| 138 | return -EINVAL; |
| 139 | |
Marek Vasut | 60279b5 | 2018-02-17 00:47:38 +0100 | [diff] [blame] | 140 | return port_info->rx_desc_cur->rd1 & 0xffff; |
Marek Vasut | 52c15e2 | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | static void sh_eth_recv_finish(struct sh_eth_dev *eth) |
| 144 | { |
| 145 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 146 | |
| 147 | /* Make current descriptor available again */ |
| 148 | if (port_info->rx_desc_cur->rd0 & RD_RDLE) |
| 149 | port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE; |
| 150 | else |
| 151 | port_info->rx_desc_cur->rd0 = RD_RACT; |
| 152 | |
| 153 | flush_cache_wback(port_info->rx_desc_cur, |
| 154 | sizeof(struct rx_desc_s)); |
| 155 | |
| 156 | /* Point to the next descriptor */ |
| 157 | port_info->rx_desc_cur++; |
| 158 | if (port_info->rx_desc_cur >= |
| 159 | port_info->rx_desc_base + NUM_RX_DESC) |
| 160 | port_info->rx_desc_cur = port_info->rx_desc_base; |
| 161 | } |
| 162 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 163 | static int sh_eth_reset(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 164 | { |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 165 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 62cbddc | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 166 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 167 | int ret = 0, i; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 168 | |
| 169 | /* Start e-dmac transmitter and receiver */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 170 | sh_eth_write(port_info, EDSR_ENALL, EDSR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 171 | |
| 172 | /* Perform a software reset and wait for it to complete */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 173 | sh_eth_write(port_info, EDMR_SRST, EDMR); |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 174 | for (i = 0; i < TIMEOUT_CNT; i++) { |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 175 | if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST)) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 176 | break; |
| 177 | udelay(1000); |
| 178 | } |
| 179 | |
Nobuhiro Iwamatsu | 4ba62c7 | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 180 | if (i == TIMEOUT_CNT) { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 181 | printf(SHETHER_NAME ": Software reset timeout\n"); |
| 182 | ret = -EIO; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 183 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 184 | |
| 185 | return ret; |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 186 | #else |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 187 | sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR); |
Marek Vasut | 5262767 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 188 | mdelay(3); |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 189 | sh_eth_write(port_info, |
| 190 | sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR); |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 191 | |
| 192 | return 0; |
| 193 | #endif |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 194 | } |
| 195 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 196 | static int sh_eth_tx_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 197 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 198 | int i, ret = 0; |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 199 | u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s); |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 200 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 201 | struct tx_desc_s *cur_tx_desc; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 202 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 203 | /* |
Nobuhiro Iwamatsu | 703949e | 2014-11-04 09:15:46 +0900 | [diff] [blame] | 204 | * Allocate rx descriptors. They must be aligned to size of struct |
| 205 | * tx_desc_s. |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 206 | */ |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 207 | port_info->tx_desc_alloc = |
| 208 | memalign(sizeof(struct tx_desc_s), alloc_desc_size); |
| 209 | if (!port_info->tx_desc_alloc) { |
| 210 | printf(SHETHER_NAME ": memalign failed\n"); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 211 | ret = -ENOMEM; |
| 212 | goto err; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 213 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 214 | |
Nobuhiro Iwamatsu | aae5d23 | 2017-12-01 13:56:08 +0900 | [diff] [blame] | 215 | flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size); |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 216 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 217 | /* Make sure we use a P2 address (non-cacheable) */ |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 218 | port_info->tx_desc_base = |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 219 | (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 220 | port_info->tx_desc_cur = port_info->tx_desc_base; |
| 221 | |
| 222 | /* Initialize all descriptors */ |
| 223 | for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC; |
| 224 | cur_tx_desc++, i++) { |
| 225 | cur_tx_desc->td0 = 0x00; |
| 226 | cur_tx_desc->td1 = 0x00; |
| 227 | cur_tx_desc->td2 = 0x00; |
| 228 | } |
| 229 | |
| 230 | /* Mark the end of the descriptors */ |
| 231 | cur_tx_desc--; |
| 232 | cur_tx_desc->td0 |= TD_TDLE; |
| 233 | |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 234 | /* |
| 235 | * Point the controller to the tx descriptor list. Must use physical |
| 236 | * addresses |
| 237 | */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 238 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR); |
Nobuhiro Iwamatsu | 62cbddc | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 239 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 240 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR); |
| 241 | sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR); |
| 242 | sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */ |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 243 | #endif |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 244 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 245 | err: |
| 246 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 247 | } |
| 248 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 249 | static int sh_eth_rx_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 250 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 251 | int i, ret = 0; |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 252 | u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s); |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 253 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 254 | struct rx_desc_s *cur_rx_desc; |
| 255 | u8 *rx_buf; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 256 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 257 | /* |
Nobuhiro Iwamatsu | 703949e | 2014-11-04 09:15:46 +0900 | [diff] [blame] | 258 | * Allocate rx descriptors. They must be aligned to size of struct |
| 259 | * rx_desc_s. |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 260 | */ |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 261 | port_info->rx_desc_alloc = |
| 262 | memalign(sizeof(struct rx_desc_s), alloc_desc_size); |
| 263 | if (!port_info->rx_desc_alloc) { |
| 264 | printf(SHETHER_NAME ": memalign failed\n"); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 265 | ret = -ENOMEM; |
| 266 | goto err; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 267 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 268 | |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 269 | flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size); |
| 270 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 271 | /* Make sure we use a P2 address (non-cacheable) */ |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 272 | port_info->rx_desc_base = |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 273 | (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 274 | |
| 275 | port_info->rx_desc_cur = port_info->rx_desc_base; |
| 276 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 277 | /* |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 278 | * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes |
| 279 | * aligned and in P2 area. |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 280 | */ |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 281 | port_info->rx_buf_alloc = |
| 282 | memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE); |
| 283 | if (!port_info->rx_buf_alloc) { |
| 284 | printf(SHETHER_NAME ": alloc failed\n"); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 285 | ret = -ENOMEM; |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 286 | goto err_buf_alloc; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 287 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 288 | |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 289 | port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 290 | |
| 291 | /* Initialize all descriptors */ |
| 292 | for (cur_rx_desc = port_info->rx_desc_base, |
| 293 | rx_buf = port_info->rx_buf_base, i = 0; |
| 294 | i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) { |
| 295 | cur_rx_desc->rd0 = RD_RACT; |
| 296 | cur_rx_desc->rd1 = MAX_BUF_SIZE << 16; |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 297 | cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* Mark the end of the descriptors */ |
| 301 | cur_rx_desc--; |
| 302 | cur_rx_desc->rd0 |= RD_RDLE; |
| 303 | |
| 304 | /* Point the controller to the rx descriptor list */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 305 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR); |
Nobuhiro Iwamatsu | 62cbddc | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 306 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 307 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR); |
| 308 | sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR); |
| 309 | sh_eth_write(port_info, RDFFR_RDLF, RDFFR); |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 310 | #endif |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 311 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 312 | return ret; |
| 313 | |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 314 | err_buf_alloc: |
| 315 | free(port_info->rx_desc_alloc); |
| 316 | port_info->rx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 317 | |
| 318 | err: |
| 319 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 320 | } |
| 321 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 322 | static void sh_eth_tx_desc_free(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 323 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 324 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 325 | |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 326 | if (port_info->tx_desc_alloc) { |
| 327 | free(port_info->tx_desc_alloc); |
| 328 | port_info->tx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 329 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | static void sh_eth_rx_desc_free(struct sh_eth_dev *eth) |
| 333 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 334 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 335 | |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 336 | if (port_info->rx_desc_alloc) { |
| 337 | free(port_info->rx_desc_alloc); |
| 338 | port_info->rx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 339 | } |
| 340 | |
Nobuhiro Iwamatsu | 000889c | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 341 | if (port_info->rx_buf_alloc) { |
| 342 | free(port_info->rx_buf_alloc); |
| 343 | port_info->rx_buf_alloc = NULL; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 347 | static int sh_eth_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 348 | { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 349 | int ret = 0; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 350 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 351 | ret = sh_eth_tx_desc_init(eth); |
| 352 | if (ret) |
| 353 | goto err_tx_init; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 354 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 355 | ret = sh_eth_rx_desc_init(eth); |
| 356 | if (ret) |
| 357 | goto err_rx_init; |
| 358 | |
| 359 | return ret; |
| 360 | err_rx_init: |
| 361 | sh_eth_tx_desc_free(eth); |
| 362 | |
| 363 | err_tx_init: |
| 364 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 365 | } |
| 366 | |
Marek Vasut | 68ac92e | 2018-01-21 14:55:44 +0100 | [diff] [blame] | 367 | static void sh_eth_write_hwaddr(struct sh_eth_info *port_info, |
| 368 | unsigned char *mac) |
| 369 | { |
| 370 | u32 val; |
| 371 | |
| 372 | val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3]; |
| 373 | sh_eth_write(port_info, val, MAHR); |
| 374 | |
| 375 | val = (mac[4] << 8) | mac[5]; |
| 376 | sh_eth_write(port_info, val, MALR); |
| 377 | } |
| 378 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 379 | static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 380 | { |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 381 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 46c3316 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 382 | unsigned long edmr; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 383 | |
| 384 | /* Configure e-dmac registers */ |
Marek Vasut | 46c3316 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 385 | edmr = sh_eth_read(port_info, EDMR); |
| 386 | edmr &= ~EMDR_DESC_R; |
| 387 | edmr |= EMDR_DESC | EDMR_EL; |
| 388 | #if defined(CONFIG_R8A77980) |
| 389 | edmr |= EDMR_NBST; |
| 390 | #endif |
| 391 | sh_eth_write(port_info, edmr, EDMR); |
Nobuhiro Iwamatsu | f8b7507 | 2013-08-22 13:22:02 +0900 | [diff] [blame] | 392 | |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 393 | sh_eth_write(port_info, 0, EESIPR); |
| 394 | sh_eth_write(port_info, 0, TRSCER); |
| 395 | sh_eth_write(port_info, 0, TFTR); |
| 396 | sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR); |
| 397 | sh_eth_write(port_info, RMCR_RST, RMCR); |
Nobuhiro Iwamatsu | 62cbddc | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 398 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 399 | sh_eth_write(port_info, 0, RPADIR); |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 400 | #endif |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 401 | sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 402 | |
| 403 | /* Configure e-mac registers */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 404 | sh_eth_write(port_info, 0, ECSIPR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 405 | |
| 406 | /* Set Mac address */ |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 407 | sh_eth_write_hwaddr(port_info, mac); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 408 | |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 409 | sh_eth_write(port_info, RFLR_RFL_MIN, RFLR); |
Yoshihiro Shimoda | 2623509 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 410 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 411 | sh_eth_write(port_info, 0, PIPR); |
Nobuhiro Iwamatsu | 62cbddc | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 412 | #endif |
| 413 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 414 | sh_eth_write(port_info, APR_AP, APR); |
| 415 | sh_eth_write(port_info, MPR_MP, MPR); |
| 416 | sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER); |
Nobuhiro Iwamatsu | 3bb4cc3 | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 417 | #endif |
| 418 | |
Nobuhiro Iwamatsu | dcd5a59 | 2012-08-02 22:08:40 +0000 | [diff] [blame] | 419 | #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 420 | sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII); |
Marek Vasut | 46c3316 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 421 | #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 422 | sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR); |
Nobuhiro Iwamatsu | 4398d55 | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 423 | #endif |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 424 | } |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 425 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 426 | static int sh_eth_phy_regs_config(struct sh_eth_dev *eth) |
| 427 | { |
| 428 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 429 | struct phy_device *phy = port_info->phydev; |
| 430 | int ret = 0; |
| 431 | u32 val = 0; |
Nobuhiro Iwamatsu | 3bb4cc3 | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 432 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 433 | /* Set the transfer speed */ |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 434 | if (phy->speed == 100) { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 435 | printf(SHETHER_NAME ": 100Base/"); |
Yoshihiro Shimoda | 2623509 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 436 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 437 | sh_eth_write(port_info, GECMR_100B, GECMR); |
Yoshihiro Shimoda | e3bb325 | 2012-11-04 15:54:30 +0000 | [diff] [blame] | 438 | #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 439 | sh_eth_write(port_info, 1, RTRATE); |
Marek Vasut | 46c3316 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 440 | #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980) |
Nobuhiro Iwamatsu | 3bb4cc3 | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 441 | val = ECMR_RTM; |
| 442 | #endif |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 443 | } else if (phy->speed == 10) { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 444 | printf(SHETHER_NAME ": 10Base/"); |
Yoshihiro Shimoda | 2623509 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 445 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 446 | sh_eth_write(port_info, GECMR_10B, GECMR); |
Yoshihiro Shimoda | e3bb325 | 2012-11-04 15:54:30 +0000 | [diff] [blame] | 447 | #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 448 | sh_eth_write(port_info, 0, RTRATE); |
Yoshihiro Shimoda | 903de46 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 449 | #endif |
Nobuhiro Iwamatsu | 3bb4cc3 | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 450 | } |
Yoshihiro Shimoda | 2623509 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 451 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | 4398d55 | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 452 | else if (phy->speed == 1000) { |
| 453 | printf(SHETHER_NAME ": 1000Base/"); |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 454 | sh_eth_write(port_info, GECMR_1000B, GECMR); |
Nobuhiro Iwamatsu | 4398d55 | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 455 | } |
| 456 | #endif |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 457 | |
| 458 | /* Check if full duplex mode is supported by the phy */ |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 459 | if (phy->duplex) { |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 460 | printf("Full\n"); |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 461 | sh_eth_write(port_info, |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 462 | val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM), |
Yoshihiro Shimoda | 49afb8c | 2012-06-26 16:38:09 +0000 | [diff] [blame] | 463 | ECMR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 464 | } else { |
| 465 | printf("Half\n"); |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 466 | sh_eth_write(port_info, |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 467 | val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE), |
| 468 | ECMR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 469 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 470 | |
| 471 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 472 | } |
| 473 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 474 | static void sh_eth_start(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 475 | { |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 476 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 477 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 478 | /* |
| 479 | * Enable the e-dmac receiver only. The transmitter will be enabled when |
| 480 | * we have something to transmit |
| 481 | */ |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 482 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 483 | } |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 484 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 485 | static void sh_eth_stop(struct sh_eth_dev *eth) |
| 486 | { |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 487 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 488 | |
| 489 | sh_eth_write(port_info, ~EDRRR_R, EDRRR); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 490 | } |
| 491 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 492 | static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac) |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 493 | { |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 494 | int ret = 0; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 495 | |
| 496 | ret = sh_eth_reset(eth); |
| 497 | if (ret) |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 498 | return ret; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 499 | |
| 500 | ret = sh_eth_desc_init(eth); |
| 501 | if (ret) |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 502 | return ret; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 503 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 504 | sh_eth_mac_regs_config(eth, mac); |
| 505 | |
| 506 | return 0; |
| 507 | } |
| 508 | |
| 509 | static int sh_eth_start_common(struct sh_eth_dev *eth) |
| 510 | { |
| 511 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 512 | int ret; |
| 513 | |
| 514 | ret = phy_startup(port_info->phydev); |
| 515 | if (ret) { |
| 516 | printf(SHETHER_NAME ": phy startup failure\n"); |
| 517 | return ret; |
| 518 | } |
| 519 | |
| 520 | ret = sh_eth_phy_regs_config(eth); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 521 | if (ret) |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 522 | return ret; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 523 | |
| 524 | sh_eth_start(eth); |
| 525 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 526 | return 0; |
| 527 | } |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 528 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 529 | #ifndef CONFIG_DM_ETH |
Marek Vasut | a220784 | 2018-01-21 15:31:48 +0100 | [diff] [blame] | 530 | static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth) |
| 531 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 532 | int ret = 0; |
| 533 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | a220784 | 2018-01-21 15:31:48 +0100 | [diff] [blame] | 534 | struct eth_device *dev = port_info->dev; |
| 535 | struct phy_device *phydev; |
| 536 | |
| 537 | phydev = phy_connect( |
| 538 | miiphy_get_dev_by_name(dev->name), |
| 539 | port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE); |
| 540 | port_info->phydev = phydev; |
| 541 | phy_config(phydev); |
| 542 | |
| 543 | return ret; |
| 544 | } |
| 545 | |
| 546 | static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len) |
| 547 | { |
| 548 | struct sh_eth_dev *eth = dev->priv; |
| 549 | |
| 550 | return sh_eth_send_common(eth, packet, len); |
| 551 | } |
| 552 | |
| 553 | static int sh_eth_recv_common(struct sh_eth_dev *eth) |
| 554 | { |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 555 | int len = 0; |
| 556 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | a220784 | 2018-01-21 15:31:48 +0100 | [diff] [blame] | 557 | uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2); |
| 558 | |
| 559 | len = sh_eth_recv_start(eth); |
| 560 | if (len > 0) { |
| 561 | invalidate_cache(packet, len); |
| 562 | net_process_received_packet(packet, len); |
| 563 | sh_eth_recv_finish(eth); |
| 564 | } else |
| 565 | len = 0; |
| 566 | |
| 567 | /* Restart the receiver if disabled */ |
| 568 | if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R)) |
| 569 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
| 570 | |
| 571 | return len; |
| 572 | } |
| 573 | |
| 574 | static int sh_eth_recv_legacy(struct eth_device *dev) |
| 575 | { |
| 576 | struct sh_eth_dev *eth = dev->priv; |
| 577 | |
| 578 | return sh_eth_recv_common(eth); |
| 579 | } |
| 580 | |
Masahiro Yamada | b75d8dc | 2020-06-26 15:13:33 +0900 | [diff] [blame] | 581 | static int sh_eth_init_legacy(struct eth_device *dev, struct bd_info *bd) |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 582 | { |
| 583 | struct sh_eth_dev *eth = dev->priv; |
| 584 | int ret; |
| 585 | |
| 586 | ret = sh_eth_init_common(eth, dev->enetaddr); |
| 587 | if (ret) |
| 588 | return ret; |
| 589 | |
| 590 | ret = sh_eth_phy_config_legacy(eth); |
| 591 | if (ret) { |
| 592 | printf(SHETHER_NAME ": phy config timeout\n"); |
| 593 | goto err_start; |
| 594 | } |
| 595 | |
| 596 | ret = sh_eth_start_common(eth); |
| 597 | if (ret) |
| 598 | goto err_start; |
| 599 | |
| 600 | return 0; |
| 601 | |
| 602 | err_start: |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 603 | sh_eth_tx_desc_free(eth); |
| 604 | sh_eth_rx_desc_free(eth); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 605 | return ret; |
| 606 | } |
| 607 | |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 608 | void sh_eth_halt_legacy(struct eth_device *dev) |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 609 | { |
| 610 | struct sh_eth_dev *eth = dev->priv; |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 611 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 612 | sh_eth_stop(eth); |
| 613 | } |
| 614 | |
Masahiro Yamada | b75d8dc | 2020-06-26 15:13:33 +0900 | [diff] [blame] | 615 | int sh_eth_initialize(struct bd_info *bd) |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 616 | { |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 617 | int ret = 0; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 618 | struct sh_eth_dev *eth = NULL; |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 619 | struct eth_device *dev = NULL; |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 620 | struct mii_dev *mdiodev; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 621 | |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 622 | eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev)); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 623 | if (!eth) { |
| 624 | printf(SHETHER_NAME ": %s: malloc failed\n", __func__); |
| 625 | ret = -ENOMEM; |
| 626 | goto err; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 627 | } |
| 628 | |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 629 | dev = (struct eth_device *)malloc(sizeof(struct eth_device)); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 630 | if (!dev) { |
| 631 | printf(SHETHER_NAME ": %s: malloc failed\n", __func__); |
| 632 | ret = -ENOMEM; |
| 633 | goto err; |
| 634 | } |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 635 | memset(dev, 0, sizeof(struct eth_device)); |
| 636 | memset(eth, 0, sizeof(struct sh_eth_dev)); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 637 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 638 | eth->port = CONFIG_SH_ETHER_USE_PORT; |
| 639 | eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 640 | eth->port_info[eth->port].iobase = |
| 641 | (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 642 | |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 643 | dev->priv = (void *)eth; |
| 644 | dev->iobase = 0; |
Marek Vasut | 013af64 | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 645 | dev->init = sh_eth_init_legacy; |
| 646 | dev->halt = sh_eth_halt_legacy; |
Marek Vasut | dca221b | 2018-01-21 14:27:51 +0100 | [diff] [blame] | 647 | dev->send = sh_eth_send_legacy; |
| 648 | dev->recv = sh_eth_recv_legacy; |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 649 | eth->port_info[eth->port].dev = dev; |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 650 | |
Ben Whitten | 192bc69 | 2015-12-30 13:05:58 +0000 | [diff] [blame] | 651 | strcpy(dev->name, SHETHER_NAME); |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 652 | |
Nobuhiro Iwamatsu | e2752db | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 653 | /* Register Device to EtherNet subsystem */ |
| 654 | eth_register(dev); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 655 | |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 656 | bb_miiphy_buses[0].priv = eth; |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 657 | mdiodev = mdio_alloc(); |
Joe Hershberger | 5a49f17 | 2016-08-08 11:28:38 -0500 | [diff] [blame] | 658 | if (!mdiodev) |
| 659 | return -ENOMEM; |
Vladimir Oltean | 7616240 | 2021-09-27 14:21:56 +0300 | [diff] [blame] | 660 | strlcpy(mdiodev->name, dev->name, MDIO_NAME_LEN); |
Joe Hershberger | 5a49f17 | 2016-08-08 11:28:38 -0500 | [diff] [blame] | 661 | mdiodev->read = bb_miiphy_read; |
| 662 | mdiodev->write = bb_miiphy_write; |
| 663 | |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 664 | ret = mdio_register(mdiodev); |
| 665 | if (ret < 0) |
| 666 | return ret; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 667 | |
Simon Glass | 35affd7 | 2017-08-03 12:22:14 -0600 | [diff] [blame] | 668 | if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr)) |
Mike Frysinger | c527ce9 | 2009-02-11 19:14:09 -0500 | [diff] [blame] | 669 | puts("Please set MAC address\n"); |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 670 | |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 671 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 672 | |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 673 | err: |
Nobuhiro Iwamatsu | bd3980c | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 674 | if (dev) |
| 675 | free(dev); |
| 676 | |
| 677 | if (eth) |
| 678 | free(eth); |
| 679 | |
| 680 | printf(SHETHER_NAME ": Failed\n"); |
| 681 | return ret; |
Nobuhiro Iwamatsu | 9751ee0 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 682 | } |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 683 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 684 | #else /* CONFIG_DM_ETH */ |
| 685 | |
| 686 | struct sh_ether_priv { |
| 687 | struct sh_eth_dev shdev; |
| 688 | |
| 689 | struct mii_dev *bus; |
Marek Vasut | 5abcbd7 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 690 | phys_addr_t iobase; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 691 | struct clk clk; |
| 692 | struct gpio_desc reset_gpio; |
| 693 | }; |
| 694 | |
| 695 | static int sh_ether_send(struct udevice *dev, void *packet, int len) |
| 696 | { |
| 697 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 698 | struct sh_eth_dev *eth = &priv->shdev; |
| 699 | |
| 700 | return sh_eth_send_common(eth, packet, len); |
| 701 | } |
| 702 | |
| 703 | static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp) |
| 704 | { |
| 705 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 706 | struct sh_eth_dev *eth = &priv->shdev; |
| 707 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 708 | uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 709 | int len; |
| 710 | |
| 711 | len = sh_eth_recv_start(eth); |
| 712 | if (len > 0) { |
| 713 | invalidate_cache(packet, len); |
| 714 | *packetp = packet; |
| 715 | |
| 716 | return len; |
| 717 | } else { |
| 718 | len = 0; |
| 719 | |
| 720 | /* Restart the receiver if disabled */ |
| 721 | if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R)) |
| 722 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
| 723 | |
| 724 | return -EAGAIN; |
| 725 | } |
| 726 | } |
| 727 | |
| 728 | static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length) |
| 729 | { |
| 730 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 731 | struct sh_eth_dev *eth = &priv->shdev; |
| 732 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 733 | |
| 734 | sh_eth_recv_finish(eth); |
| 735 | |
| 736 | /* Restart the receiver if disabled */ |
| 737 | if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R)) |
| 738 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
| 739 | |
| 740 | return 0; |
| 741 | } |
| 742 | |
| 743 | static int sh_ether_write_hwaddr(struct udevice *dev) |
| 744 | { |
| 745 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 746 | struct sh_eth_dev *eth = &priv->shdev; |
| 747 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 748 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 749 | |
| 750 | sh_eth_write_hwaddr(port_info, pdata->enetaddr); |
| 751 | |
| 752 | return 0; |
| 753 | } |
| 754 | |
| 755 | static int sh_eth_phy_config(struct udevice *dev) |
| 756 | { |
| 757 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 758 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 759 | struct sh_eth_dev *eth = &priv->shdev; |
Marek Vasut | 3c5a7b7 | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 760 | int ret = 0; |
| 761 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 762 | struct phy_device *phydev; |
| 763 | int mask = 0xffffffff; |
| 764 | |
| 765 | phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface); |
| 766 | if (!phydev) |
| 767 | return -ENODEV; |
| 768 | |
| 769 | phy_connect_dev(phydev, dev); |
| 770 | |
| 771 | port_info->phydev = phydev; |
| 772 | phy_config(phydev); |
| 773 | |
| 774 | return ret; |
| 775 | } |
| 776 | |
| 777 | static int sh_ether_start(struct udevice *dev) |
| 778 | { |
| 779 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 780 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 781 | struct sh_eth_dev *eth = &priv->shdev; |
| 782 | int ret; |
| 783 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 784 | ret = sh_eth_init_common(eth, pdata->enetaddr); |
| 785 | if (ret) |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 786 | return ret; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 787 | |
| 788 | ret = sh_eth_start_common(eth); |
| 789 | if (ret) |
| 790 | goto err_start; |
| 791 | |
| 792 | return 0; |
| 793 | |
| 794 | err_start: |
| 795 | sh_eth_tx_desc_free(eth); |
| 796 | sh_eth_rx_desc_free(eth); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 797 | return ret; |
| 798 | } |
| 799 | |
| 800 | static void sh_ether_stop(struct udevice *dev) |
| 801 | { |
| 802 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 803 | struct sh_eth_dev *eth = &priv->shdev; |
| 804 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 805 | |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 806 | phy_shutdown(port_info->phydev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 807 | sh_eth_stop(&priv->shdev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | static int sh_ether_probe(struct udevice *udev) |
| 811 | { |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 812 | struct eth_pdata *pdata = dev_get_plat(udev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 813 | struct sh_ether_priv *priv = dev_get_priv(udev); |
| 814 | struct sh_eth_dev *eth = &priv->shdev; |
Marek Vasut | 159b329 | 2018-06-18 04:03:01 +0200 | [diff] [blame] | 815 | struct ofnode_phandle_args phandle_args; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 816 | struct mii_dev *mdiodev; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 817 | int ret; |
| 818 | |
Marek Vasut | 5abcbd7 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 819 | priv->iobase = pdata->iobase; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 820 | |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 821 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 822 | ret = clk_get_by_index(udev, 0, &priv->clk); |
| 823 | if (ret < 0) |
Marek Vasut | 5abcbd7 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 824 | return ret; |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 825 | #endif |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 826 | |
Marek Vasut | 159b329 | 2018-06-18 04:03:01 +0200 | [diff] [blame] | 827 | ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args); |
| 828 | if (!ret) { |
| 829 | gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0, |
| 830 | &priv->reset_gpio, GPIOD_IS_OUT); |
| 831 | } |
| 832 | |
| 833 | if (!dm_gpio_is_valid(&priv->reset_gpio)) { |
| 834 | gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio, |
| 835 | GPIOD_IS_OUT); |
| 836 | } |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 837 | |
| 838 | mdiodev = mdio_alloc(); |
| 839 | if (!mdiodev) { |
| 840 | ret = -ENOMEM; |
Marek Vasut | 5abcbd7 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 841 | return ret; |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 842 | } |
| 843 | |
| 844 | mdiodev->read = bb_miiphy_read; |
| 845 | mdiodev->write = bb_miiphy_write; |
| 846 | bb_miiphy_buses[0].priv = eth; |
| 847 | snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name); |
| 848 | |
| 849 | ret = mdio_register(mdiodev); |
| 850 | if (ret < 0) |
| 851 | goto err_mdio_register; |
| 852 | |
| 853 | priv->bus = miiphy_get_dev_by_name(udev->name); |
| 854 | |
| 855 | eth->port = CONFIG_SH_ETHER_USE_PORT; |
| 856 | eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR; |
| 857 | eth->port_info[eth->port].iobase = |
Marek Vasut | 7234a28 | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 858 | (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 859 | |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 860 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 861 | ret = clk_enable(&priv->clk); |
| 862 | if (ret) |
| 863 | goto err_mdio_register; |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 864 | #endif |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 865 | |
Marek Vasut | b13da11 | 2020-04-04 15:01:22 +0200 | [diff] [blame] | 866 | ret = sh_eth_init_common(eth, pdata->enetaddr); |
| 867 | if (ret) |
| 868 | goto err_phy_config; |
| 869 | |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 870 | ret = sh_eth_phy_config(udev); |
| 871 | if (ret) { |
| 872 | printf(SHETHER_NAME ": phy config timeout\n"); |
| 873 | goto err_phy_config; |
| 874 | } |
| 875 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 876 | return 0; |
| 877 | |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 878 | err_phy_config: |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 879 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 880 | clk_disable(&priv->clk); |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 881 | #endif |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 882 | err_mdio_register: |
| 883 | mdio_free(mdiodev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 884 | return ret; |
| 885 | } |
| 886 | |
| 887 | static int sh_ether_remove(struct udevice *udev) |
| 888 | { |
| 889 | struct sh_ether_priv *priv = dev_get_priv(udev); |
| 890 | struct sh_eth_dev *eth = &priv->shdev; |
| 891 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 892 | |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 893 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | 4a45e93 | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 894 | clk_disable(&priv->clk); |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 895 | #endif |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 896 | free(port_info->phydev); |
| 897 | mdio_unregister(priv->bus); |
| 898 | mdio_free(priv->bus); |
| 899 | |
| 900 | if (dm_gpio_is_valid(&priv->reset_gpio)) |
| 901 | dm_gpio_free(udev, &priv->reset_gpio); |
| 902 | |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 903 | return 0; |
| 904 | } |
| 905 | |
| 906 | static const struct eth_ops sh_ether_ops = { |
| 907 | .start = sh_ether_start, |
| 908 | .send = sh_ether_send, |
| 909 | .recv = sh_ether_recv, |
| 910 | .free_pkt = sh_ether_free_pkt, |
| 911 | .stop = sh_ether_stop, |
| 912 | .write_hwaddr = sh_ether_write_hwaddr, |
| 913 | }; |
| 914 | |
Simon Glass | d1998a9 | 2020-12-03 16:55:21 -0700 | [diff] [blame] | 915 | int sh_ether_of_to_plat(struct udevice *dev) |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 916 | { |
Simon Glass | c69cda2 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 917 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 918 | const char *phy_mode; |
| 919 | const fdt32_t *cell; |
| 920 | int ret = 0; |
| 921 | |
Masahiro Yamada | 2548493 | 2020-07-17 14:36:48 +0900 | [diff] [blame] | 922 | pdata->iobase = dev_read_addr(dev); |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 923 | pdata->phy_interface = -1; |
| 924 | phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", |
| 925 | NULL); |
| 926 | if (phy_mode) |
| 927 | pdata->phy_interface = phy_get_interface_by_name(phy_mode); |
| 928 | if (pdata->phy_interface == -1) { |
| 929 | debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); |
| 930 | return -EINVAL; |
| 931 | } |
| 932 | |
| 933 | pdata->max_speed = 1000; |
| 934 | cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL); |
| 935 | if (cell) |
| 936 | pdata->max_speed = fdt32_to_cpu(*cell); |
| 937 | |
| 938 | sprintf(bb_miiphy_buses[0].name, dev->name); |
| 939 | |
| 940 | return ret; |
| 941 | } |
| 942 | |
| 943 | static const struct udevice_id sh_ether_ids[] = { |
Marek Vasut | 24b3247 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 944 | { .compatible = "renesas,ether-r7s72100" }, |
Marek Vasut | d526801 | 2018-04-12 15:23:46 +0200 | [diff] [blame] | 945 | { .compatible = "renesas,ether-r8a7790" }, |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 946 | { .compatible = "renesas,ether-r8a7791" }, |
Marek Vasut | d526801 | 2018-04-12 15:23:46 +0200 | [diff] [blame] | 947 | { .compatible = "renesas,ether-r8a7793" }, |
| 948 | { .compatible = "renesas,ether-r8a7794" }, |
Marek Vasut | 46c3316 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 949 | { .compatible = "renesas,gether-r8a77980" }, |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 950 | { } |
| 951 | }; |
| 952 | |
| 953 | U_BOOT_DRIVER(eth_sh_ether) = { |
| 954 | .name = "sh_ether", |
| 955 | .id = UCLASS_ETH, |
| 956 | .of_match = sh_ether_ids, |
Simon Glass | d1998a9 | 2020-12-03 16:55:21 -0700 | [diff] [blame] | 957 | .of_to_plat = sh_ether_of_to_plat, |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 958 | .probe = sh_ether_probe, |
| 959 | .remove = sh_ether_remove, |
| 960 | .ops = &sh_ether_ops, |
Simon Glass | 41575d8 | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 961 | .priv_auto = sizeof(struct sh_ether_priv), |
Simon Glass | caa4daa | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 962 | .plat_auto = sizeof(struct eth_pdata), |
Marek Vasut | 3192026 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 963 | .flags = DM_FLAG_ALLOC_PRIV_DMA, |
| 964 | }; |
| 965 | #endif |
| 966 | |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 967 | /******* for bb_miiphy *******/ |
| 968 | static int sh_eth_bb_init(struct bb_miiphy_bus *bus) |
| 969 | { |
| 970 | return 0; |
| 971 | } |
| 972 | |
| 973 | static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus) |
| 974 | { |
| 975 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 976 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 977 | |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 978 | sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 979 | |
| 980 | return 0; |
| 981 | } |
| 982 | |
| 983 | static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus) |
| 984 | { |
| 985 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 986 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 987 | |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 988 | sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 989 | |
| 990 | return 0; |
| 991 | } |
| 992 | |
| 993 | static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v) |
| 994 | { |
| 995 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 996 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 997 | |
| 998 | if (v) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 999 | sh_eth_write(port_info, |
| 1000 | sh_eth_read(port_info, PIR) | PIR_MDO, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1001 | else |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1002 | sh_eth_write(port_info, |
| 1003 | sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1004 | |
| 1005 | return 0; |
| 1006 | } |
| 1007 | |
| 1008 | static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v) |
| 1009 | { |
| 1010 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1011 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1012 | |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1013 | *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1014 | |
| 1015 | return 0; |
| 1016 | } |
| 1017 | |
| 1018 | static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v) |
| 1019 | { |
| 1020 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1021 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1022 | |
| 1023 | if (v) |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1024 | sh_eth_write(port_info, |
| 1025 | sh_eth_read(port_info, PIR) | PIR_MDC, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1026 | else |
Nobuhiro Iwamatsu | fbfb511 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 1027 | sh_eth_write(port_info, |
| 1028 | sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR); |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1029 | |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
| 1033 | static int sh_eth_bb_delay(struct bb_miiphy_bus *bus) |
| 1034 | { |
| 1035 | udelay(10); |
| 1036 | |
| 1037 | return 0; |
| 1038 | } |
| 1039 | |
| 1040 | struct bb_miiphy_bus bb_miiphy_buses[] = { |
| 1041 | { |
| 1042 | .name = "sh_eth", |
| 1043 | .init = sh_eth_bb_init, |
| 1044 | .mdio_active = sh_eth_bb_mdio_active, |
| 1045 | .mdio_tristate = sh_eth_bb_mdio_tristate, |
| 1046 | .set_mdio = sh_eth_bb_set_mdio, |
| 1047 | .get_mdio = sh_eth_bb_get_mdio, |
| 1048 | .set_mdc = sh_eth_bb_set_mdc, |
| 1049 | .delay = sh_eth_bb_delay, |
| 1050 | } |
| 1051 | }; |
Nobuhiro Iwamatsu | dc14867 | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 1052 | |
Yoshihiro Shimoda | bd1024b | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 1053 | int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses); |