blob: 505fabd3fa685dce36fd70354ed0886af6793782 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese19fc2ea2014-10-22 12:13:14 +02002/*
3 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 *
5 * U-Boot version:
Stefan Roesee3b9c982015-11-19 07:46:15 +01006 * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de>
Stefan Roese19fc2ea2014-10-22 12:13:14 +02007 *
8 * Based on the Linux version which is:
9 * Copyright (C) 2012 Marvell
10 *
11 * Rami Rosen <rosenr@marvell.com>
12 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Stefan Roese19fc2ea2014-10-22 12:13:14 +020013 */
14
15#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070016#include <cpu_func.h>
Stefan Roesee3b9c982015-11-19 07:46:15 +010017#include <dm.h>
Stefan Roese19fc2ea2014-10-22 12:13:14 +020018#include <net.h>
19#include <netdev.h>
20#include <config.h>
21#include <malloc.h>
22#include <asm/io.h>
Simon Glass61b29b82020-02-03 07:36:15 -070023#include <dm/devres.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090024#include <linux/errno.h>
Stefan Roese19fc2ea2014-10-22 12:13:14 +020025#include <phy.h>
26#include <miiphy.h>
27#include <watchdog.h>
28#include <asm/arch/cpu.h>
29#include <asm/arch/soc.h>
30#include <linux/compat.h>
31#include <linux/mbus.h>
Aditya Prayoga18bfc8f2018-12-05 00:39:23 +080032#include <asm-generic/gpio.h>
Stefan Roese19fc2ea2014-10-22 12:13:14 +020033
Stefan Roesee3b9c982015-11-19 07:46:15 +010034DECLARE_GLOBAL_DATA_PTR;
35
Stefan Roese19fc2ea2014-10-22 12:13:14 +020036#if !defined(CONFIG_PHYLIB)
37# error Marvell mvneta requires PHYLIB
38#endif
39
Stefan Roese19fc2ea2014-10-22 12:13:14 +020040#define CONFIG_NR_CPUS 1
Stefan Roese19fc2ea2014-10-22 12:13:14 +020041#define ETH_HLEN 14 /* Total octets in header */
42
43/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
44#define WRAP (2 + ETH_HLEN + 4 + 32)
45#define MTU 1500
46#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
47
48#define MVNETA_SMI_TIMEOUT 10000
49
50/* Registers */
51#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
52#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
53#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
54#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
55#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
56#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
57#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
58#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
59#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
60#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
61#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
62#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
63#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
64#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
65#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
66#define MVNETA_PORT_RX_RESET 0x1cc0
67#define MVNETA_PORT_RX_DMA_RESET BIT(0)
68#define MVNETA_PHY_ADDR 0x2000
69#define MVNETA_PHY_ADDR_MASK 0x1f
70#define MVNETA_SMI 0x2004
71#define MVNETA_PHY_REG_MASK 0x1f
72/* SMI register fields */
73#define MVNETA_SMI_DATA_OFFS 0 /* Data */
74#define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS)
75#define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
76#define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
77#define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
78#define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS)
79#define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */
80#define MVNETA_SMI_BUSY (1 << 28) /* Busy */
81#define MVNETA_MBUS_RETRY 0x2010
82#define MVNETA_UNIT_INTR_CAUSE 0x2080
83#define MVNETA_UNIT_CONTROL 0x20B0
84#define MVNETA_PHY_POLLING_ENABLE BIT(1)
85#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
86#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
87#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
Stefan Roese544eefe2016-05-19 17:46:36 +020088#define MVNETA_WIN_SIZE_MASK (0xffff0000)
Stefan Roese19fc2ea2014-10-22 12:13:14 +020089#define MVNETA_BASE_ADDR_ENABLE 0x2290
Stefan Roese544eefe2016-05-19 17:46:36 +020090#define MVNETA_BASE_ADDR_ENABLE_BIT 0x1
91#define MVNETA_PORT_ACCESS_PROTECT 0x2294
92#define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3
Stefan Roese19fc2ea2014-10-22 12:13:14 +020093#define MVNETA_PORT_CONFIG 0x2400
94#define MVNETA_UNI_PROMISC_MODE BIT(0)
95#define MVNETA_DEF_RXQ(q) ((q) << 1)
96#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
97#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
98#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
99#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
100#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
101#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
102#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
103 MVNETA_DEF_RXQ_ARP(q) | \
104 MVNETA_DEF_RXQ_TCP(q) | \
105 MVNETA_DEF_RXQ_UDP(q) | \
106 MVNETA_DEF_RXQ_BPDU(q) | \
107 MVNETA_TX_UNSET_ERR_SUM | \
108 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
109#define MVNETA_PORT_CONFIG_EXTEND 0x2404
110#define MVNETA_MAC_ADDR_LOW 0x2414
111#define MVNETA_MAC_ADDR_HIGH 0x2418
112#define MVNETA_SDMA_CONFIG 0x241c
113#define MVNETA_SDMA_BRST_SIZE_16 4
114#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
115#define MVNETA_RX_NO_DATA_SWAP BIT(4)
116#define MVNETA_TX_NO_DATA_SWAP BIT(5)
117#define MVNETA_DESC_SWAP BIT(6)
118#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
119#define MVNETA_PORT_STATUS 0x2444
120#define MVNETA_TX_IN_PRGRS BIT(1)
121#define MVNETA_TX_FIFO_EMPTY BIT(8)
122#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
123#define MVNETA_SERDES_CFG 0x24A0
124#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
125#define MVNETA_QSGMII_SERDES_PROTO 0x0667
126#define MVNETA_TYPE_PRIO 0x24bc
127#define MVNETA_FORCE_UNI BIT(21)
128#define MVNETA_TXQ_CMD_1 0x24e4
129#define MVNETA_TXQ_CMD 0x2448
130#define MVNETA_TXQ_DISABLE_SHIFT 8
131#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
132#define MVNETA_ACC_MODE 0x2500
133#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
134#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
135#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
136#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
137
138/* Exception Interrupt Port/Queue Cause register */
139
140#define MVNETA_INTR_NEW_CAUSE 0x25a0
141#define MVNETA_INTR_NEW_MASK 0x25a4
142
143/* bits 0..7 = TXQ SENT, one bit per queue.
144 * bits 8..15 = RXQ OCCUP, one bit per queue.
145 * bits 16..23 = RXQ FREE, one bit per queue.
146 * bit 29 = OLD_REG_SUM, see old reg ?
147 * bit 30 = TX_ERR_SUM, one bit for 4 ports
148 * bit 31 = MISC_SUM, one bit for 4 ports
149 */
150#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
151#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
152#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
153#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
154
155#define MVNETA_INTR_OLD_CAUSE 0x25a8
156#define MVNETA_INTR_OLD_MASK 0x25ac
157
158/* Data Path Port/Queue Cause Register */
159#define MVNETA_INTR_MISC_CAUSE 0x25b0
160#define MVNETA_INTR_MISC_MASK 0x25b4
161#define MVNETA_INTR_ENABLE 0x25b8
162
163#define MVNETA_RXQ_CMD 0x2680
164#define MVNETA_RXQ_DISABLE_SHIFT 8
165#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
166#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
167#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
168#define MVNETA_GMAC_CTRL_0 0x2c00
169#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
170#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
171#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
172#define MVNETA_GMAC_CTRL_2 0x2c08
173#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
174#define MVNETA_GMAC2_PORT_RGMII BIT(4)
175#define MVNETA_GMAC2_PORT_RESET BIT(6)
176#define MVNETA_GMAC_STATUS 0x2c10
177#define MVNETA_GMAC_LINK_UP BIT(0)
178#define MVNETA_GMAC_SPEED_1000 BIT(1)
179#define MVNETA_GMAC_SPEED_100 BIT(2)
180#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
181#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
182#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
183#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
184#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
185#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
186#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
187#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +0200188#define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1))
189#define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3)
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200190#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
191#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
192#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +0200193#define MVNETA_GMAC_SET_FC_EN BIT(8)
194#define MVNETA_GMAC_ADVERT_FC_EN BIT(9)
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200195#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
196#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +0200197#define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15)
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200198#define MVNETA_MIB_COUNTERS_BASE 0x3080
199#define MVNETA_MIB_LATE_COLLISION 0x7c
200#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
201#define MVNETA_DA_FILT_OTH_MCAST 0x3500
202#define MVNETA_DA_FILT_UCAST_BASE 0x3600
203#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
204#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
205#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
206#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
207#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
208#define MVNETA_TXQ_DEC_SENT_SHIFT 16
209#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
210#define MVNETA_TXQ_SENT_DESC_SHIFT 16
211#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
212#define MVNETA_PORT_TX_RESET 0x3cf0
213#define MVNETA_PORT_TX_DMA_RESET BIT(0)
214#define MVNETA_TX_MTU 0x3e0c
215#define MVNETA_TX_TOKEN_SIZE 0x3e14
216#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
217#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
218#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
219
220/* Descriptor ring Macros */
221#define MVNETA_QUEUE_NEXT_DESC(q, index) \
222 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
223
224/* Various constants */
225
226/* Coalescing */
227#define MVNETA_TXDONE_COAL_PKTS 16
228#define MVNETA_RX_COAL_PKTS 32
229#define MVNETA_RX_COAL_USEC 100
230
231/* The two bytes Marvell header. Either contains a special value used
232 * by Marvell switches when a specific hardware mode is enabled (not
233 * supported by this driver) or is filled automatically by zeroes on
234 * the RX side. Those two bytes being at the front of the Ethernet
235 * header, they allow to have the IP header aligned on a 4 bytes
236 * boundary automatically: the hardware skips those two bytes on its
237 * own.
238 */
239#define MVNETA_MH_SIZE 2
240
241#define MVNETA_VLAN_TAG_LEN 4
242
243#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
244#define MVNETA_TX_CSUM_MAX_SIZE 9800
245#define MVNETA_ACC_MODE_EXT 1
246
247/* Timeout constants */
248#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
249#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
250#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
251
252#define MVNETA_TX_MTU_MAX 0x3ffff
253
254/* Max number of Rx descriptors */
255#define MVNETA_MAX_RXD 16
256
257/* Max number of Tx descriptors */
258#define MVNETA_MAX_TXD 16
259
260/* descriptor aligned size */
261#define MVNETA_DESC_ALIGNED_SIZE 32
262
263struct mvneta_port {
264 void __iomem *base;
265 struct mvneta_rx_queue *rxqs;
266 struct mvneta_tx_queue *txqs;
267
268 u8 mcast_count[256];
269 u16 tx_ring_size;
270 u16 rx_ring_size;
271
272 phy_interface_t phy_interface;
273 unsigned int link;
274 unsigned int duplex;
275 unsigned int speed;
276
277 int init;
278 int phyaddr;
279 struct phy_device *phydev;
Simon Glassbcee8d62019-12-06 21:41:35 -0700280#if CONFIG_IS_ENABLED(DM_GPIO)
Aditya Prayoga18bfc8f2018-12-05 00:39:23 +0800281 struct gpio_desc phy_reset_gpio;
282#endif
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200283 struct mii_dev *bus;
284};
285
286/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
287 * layout of the transmit and reception DMA descriptors, and their
288 * layout is therefore defined by the hardware design
289 */
290
291#define MVNETA_TX_L3_OFF_SHIFT 0
292#define MVNETA_TX_IP_HLEN_SHIFT 8
293#define MVNETA_TX_L4_UDP BIT(16)
294#define MVNETA_TX_L3_IP6 BIT(17)
295#define MVNETA_TXD_IP_CSUM BIT(18)
296#define MVNETA_TXD_Z_PAD BIT(19)
297#define MVNETA_TXD_L_DESC BIT(20)
298#define MVNETA_TXD_F_DESC BIT(21)
299#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
300 MVNETA_TXD_L_DESC | \
301 MVNETA_TXD_F_DESC)
302#define MVNETA_TX_L4_CSUM_FULL BIT(30)
303#define MVNETA_TX_L4_CSUM_NOT BIT(31)
304
305#define MVNETA_RXD_ERR_CRC 0x0
306#define MVNETA_RXD_ERR_SUMMARY BIT(16)
307#define MVNETA_RXD_ERR_OVERRUN BIT(17)
308#define MVNETA_RXD_ERR_LEN BIT(18)
309#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
310#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
311#define MVNETA_RXD_L3_IP4 BIT(25)
312#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
313#define MVNETA_RXD_L4_CSUM_OK BIT(30)
314
315struct mvneta_tx_desc {
316 u32 command; /* Options used by HW for packet transmitting.*/
317 u16 reserverd1; /* csum_l4 (for future use) */
318 u16 data_size; /* Data size of transmitted packet in bytes */
319 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
320 u32 reserved2; /* hw_cmd - (for future use, PMT) */
321 u32 reserved3[4]; /* Reserved - (for future use) */
322};
323
324struct mvneta_rx_desc {
325 u32 status; /* Info about received packet */
326 u16 reserved1; /* pnc_info - (for future use, PnC) */
327 u16 data_size; /* Size of received packet in bytes */
328
329 u32 buf_phys_addr; /* Physical address of the buffer */
330 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
331
332 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
333 u16 reserved3; /* prefetch_cmd, for future use */
334 u16 reserved4; /* csum_l4 - (for future use, PnC) */
335
336 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
337 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
338};
339
340struct mvneta_tx_queue {
341 /* Number of this TX queue, in the range 0-7 */
342 u8 id;
343
344 /* Number of TX DMA descriptors in the descriptor ring */
345 int size;
346
347 /* Index of last TX DMA descriptor that was inserted */
348 int txq_put_index;
349
350 /* Index of the TX DMA descriptor to be cleaned up */
351 int txq_get_index;
352
353 /* Virtual address of the TX DMA descriptors array */
354 struct mvneta_tx_desc *descs;
355
356 /* DMA address of the TX DMA descriptors array */
357 dma_addr_t descs_phys;
358
359 /* Index of the last TX DMA descriptor */
360 int last_desc;
361
362 /* Index of the next TX DMA descriptor to process */
363 int next_desc_to_proc;
364};
365
366struct mvneta_rx_queue {
367 /* rx queue number, in the range 0-7 */
368 u8 id;
369
370 /* num of rx descriptors in the rx descriptor ring */
371 int size;
372
373 /* Virtual address of the RX DMA descriptors array */
374 struct mvneta_rx_desc *descs;
375
376 /* DMA address of the RX DMA descriptors array */
377 dma_addr_t descs_phys;
378
379 /* Index of the last RX DMA descriptor */
380 int last_desc;
381
382 /* Index of the next RX DMA descriptor to process */
383 int next_desc_to_proc;
384};
385
386/* U-Boot doesn't use the queues, so set the number to 1 */
387static int rxq_number = 1;
388static int txq_number = 1;
389static int rxq_def;
390
391struct buffer_location {
392 struct mvneta_tx_desc *tx_descs;
393 struct mvneta_rx_desc *rx_descs;
394 u32 rx_buffers;
395};
396
397/*
398 * All 4 interfaces use the same global buffer, since only one interface
399 * can be enabled at once
400 */
401static struct buffer_location buffer_loc;
402
403/*
404 * Page table entries are set to 1MB, or multiples of 1MB
405 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
406 */
407#define BD_SPACE (1 << 20)
408
Konstantin Porotchkin976feda2017-02-16 13:52:27 +0200409/*
410 * Dummy implementation that can be overwritten by a board
411 * specific function
412 */
413__weak int board_network_enable(struct mii_dev *bus)
414{
415 return 0;
416}
417
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200418/* Utility/helper methods */
419
420/* Write helper method */
421static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
422{
423 writel(data, pp->base + offset);
424}
425
426/* Read helper method */
427static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
428{
429 return readl(pp->base + offset);
430}
431
432/* Clear all MIB counters */
433static void mvneta_mib_counters_clear(struct mvneta_port *pp)
434{
435 int i;
436
437 /* Perform dummy reads from MIB counters */
438 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
439 mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
440}
441
442/* Rx descriptors helper methods */
443
444/* Checks whether the RX descriptor having this status is both the first
445 * and the last descriptor for the RX packet. Each RX packet is currently
446 * received through a single RX descriptor, so not having each RX
447 * descriptor with its first and last bits set is an error
448 */
449static int mvneta_rxq_desc_is_first_last(u32 status)
450{
451 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
452 MVNETA_RXD_FIRST_LAST_DESC;
453}
454
455/* Add number of descriptors ready to receive new packets */
456static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
457 struct mvneta_rx_queue *rxq,
458 int ndescs)
459{
460 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
461 * be added at once
462 */
463 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
464 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
465 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
466 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
467 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
468 }
469
470 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
471 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
472}
473
474/* Get number of RX descriptors occupied by received packets */
475static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
476 struct mvneta_rx_queue *rxq)
477{
478 u32 val;
479
480 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
481 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
482}
483
484/* Update num of rx desc called upon return from rx path or
485 * from mvneta_rxq_drop_pkts().
486 */
487static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
488 struct mvneta_rx_queue *rxq,
489 int rx_done, int rx_filled)
490{
491 u32 val;
492
493 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
494 val = rx_done |
495 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
496 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
497 return;
498 }
499
500 /* Only 255 descriptors can be added at once */
501 while ((rx_done > 0) || (rx_filled > 0)) {
502 if (rx_done <= 0xff) {
503 val = rx_done;
504 rx_done = 0;
505 } else {
506 val = 0xff;
507 rx_done -= 0xff;
508 }
509 if (rx_filled <= 0xff) {
510 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
511 rx_filled = 0;
512 } else {
513 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
514 rx_filled -= 0xff;
515 }
516 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
517 }
518}
519
520/* Get pointer to next RX descriptor to be processed by SW */
521static struct mvneta_rx_desc *
522mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
523{
524 int rx_desc = rxq->next_desc_to_proc;
525
526 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
527 return rxq->descs + rx_desc;
528}
529
530/* Tx descriptors helper methods */
531
532/* Update HW with number of TX descriptors to be sent */
533static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
534 struct mvneta_tx_queue *txq,
535 int pend_desc)
536{
537 u32 val;
538
539 /* Only 255 descriptors can be added at once ; Assume caller
Heinrich Schuchardte4691562017-08-29 18:44:37 +0200540 * process TX descriptors in quanta less than 256
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200541 */
542 val = pend_desc;
543 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
544}
545
546/* Get pointer to next TX descriptor to be processed (send) by HW */
547static struct mvneta_tx_desc *
548mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
549{
550 int tx_desc = txq->next_desc_to_proc;
551
552 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
553 return txq->descs + tx_desc;
554}
555
556/* Set rxq buf size */
557static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
558 struct mvneta_rx_queue *rxq,
559 int buf_size)
560{
561 u32 val;
562
563 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
564
565 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
566 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
567
568 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
569}
570
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +0200571static int mvneta_port_is_fixed_link(struct mvneta_port *pp)
572{
573 /* phy_addr is set to invalid value for fixed link */
574 return pp->phyaddr > PHY_MAX_ADDR;
575}
576
577
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200578/* Start the Ethernet port RX and TX activity */
579static void mvneta_port_up(struct mvneta_port *pp)
580{
581 int queue;
582 u32 q_map;
583
584 /* Enable all initialized TXs. */
585 mvneta_mib_counters_clear(pp);
586 q_map = 0;
587 for (queue = 0; queue < txq_number; queue++) {
588 struct mvneta_tx_queue *txq = &pp->txqs[queue];
589 if (txq->descs != NULL)
590 q_map |= (1 << queue);
591 }
592 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
593
594 /* Enable all initialized RXQs. */
595 q_map = 0;
596 for (queue = 0; queue < rxq_number; queue++) {
597 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
598 if (rxq->descs != NULL)
599 q_map |= (1 << queue);
600 }
601 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
602}
603
604/* Stop the Ethernet port activity */
605static void mvneta_port_down(struct mvneta_port *pp)
606{
607 u32 val;
608 int count;
609
610 /* Stop Rx port activity. Check port Rx activity. */
611 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
612
613 /* Issue stop command for active channels only */
614 if (val != 0)
615 mvreg_write(pp, MVNETA_RXQ_CMD,
616 val << MVNETA_RXQ_DISABLE_SHIFT);
617
618 /* Wait for all Rx activity to terminate. */
619 count = 0;
620 do {
621 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
622 netdev_warn(pp->dev,
623 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
624 val);
625 break;
626 }
627 mdelay(1);
628
629 val = mvreg_read(pp, MVNETA_RXQ_CMD);
630 } while (val & 0xff);
631
632 /* Stop Tx port activity. Check port Tx activity. Issue stop
633 * command for active channels only
634 */
635 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
636
637 if (val != 0)
638 mvreg_write(pp, MVNETA_TXQ_CMD,
639 (val << MVNETA_TXQ_DISABLE_SHIFT));
640
641 /* Wait for all Tx activity to terminate. */
642 count = 0;
643 do {
644 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
645 netdev_warn(pp->dev,
646 "TIMEOUT for TX stopped status=0x%08x\n",
647 val);
648 break;
649 }
650 mdelay(1);
651
652 /* Check TX Command reg that all Txqs are stopped */
653 val = mvreg_read(pp, MVNETA_TXQ_CMD);
654
655 } while (val & 0xff);
656
657 /* Double check to verify that TX FIFO is empty */
658 count = 0;
659 do {
660 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
661 netdev_warn(pp->dev,
662 "TX FIFO empty timeout status=0x08%x\n",
663 val);
664 break;
665 }
666 mdelay(1);
667
668 val = mvreg_read(pp, MVNETA_PORT_STATUS);
669 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
670 (val & MVNETA_TX_IN_PRGRS));
671
672 udelay(200);
673}
674
675/* Enable the port by setting the port enable bit of the MAC control register */
676static void mvneta_port_enable(struct mvneta_port *pp)
677{
678 u32 val;
679
680 /* Enable port */
681 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
682 val |= MVNETA_GMAC0_PORT_ENABLE;
683 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
684}
685
686/* Disable the port and wait for about 200 usec before retuning */
687static void mvneta_port_disable(struct mvneta_port *pp)
688{
689 u32 val;
690
691 /* Reset the Enable bit in the Serial Control Register */
692 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
693 val &= ~MVNETA_GMAC0_PORT_ENABLE;
694 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
695
696 udelay(200);
697}
698
699/* Multicast tables methods */
700
701/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
702static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
703{
704 int offset;
705 u32 val;
706
707 if (queue == -1) {
708 val = 0;
709 } else {
710 val = 0x1 | (queue << 1);
711 val |= (val << 24) | (val << 16) | (val << 8);
712 }
713
714 for (offset = 0; offset <= 0xc; offset += 4)
715 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
716}
717
718/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
719static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
720{
721 int offset;
722 u32 val;
723
724 if (queue == -1) {
725 val = 0;
726 } else {
727 val = 0x1 | (queue << 1);
728 val |= (val << 24) | (val << 16) | (val << 8);
729 }
730
731 for (offset = 0; offset <= 0xfc; offset += 4)
732 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
733}
734
735/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
736static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
737{
738 int offset;
739 u32 val;
740
741 if (queue == -1) {
742 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
743 val = 0;
744 } else {
745 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
746 val = 0x1 | (queue << 1);
747 val |= (val << 24) | (val << 16) | (val << 8);
748 }
749
750 for (offset = 0; offset <= 0xfc; offset += 4)
751 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
752}
753
754/* This method sets defaults to the NETA port:
755 * Clears interrupt Cause and Mask registers.
756 * Clears all MAC tables.
757 * Sets defaults to all registers.
758 * Resets RX and TX descriptor rings.
759 * Resets PHY.
760 * This method can be called after mvneta_port_down() to return the port
761 * settings to defaults.
762 */
763static void mvneta_defaults_set(struct mvneta_port *pp)
764{
765 int cpu;
766 int queue;
767 u32 val;
768
769 /* Clear all Cause registers */
770 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
771 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
772 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
773
774 /* Mask all interrupts */
775 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
776 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
777 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
778 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
779
780 /* Enable MBUS Retry bit16 */
781 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
782
783 /* Set CPU queue access map - all CPUs have access to all RX
784 * queues and to all TX queues
785 */
786 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
787 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
788 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
789 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
790
791 /* Reset RX and TX DMAs */
792 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
793 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
794
795 /* Disable Legacy WRR, Disable EJP, Release from reset */
796 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
797 for (queue = 0; queue < txq_number; queue++) {
798 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
799 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
800 }
801
802 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
803 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
804
805 /* Set Port Acceleration Mode */
806 val = MVNETA_ACC_MODE_EXT;
807 mvreg_write(pp, MVNETA_ACC_MODE, val);
808
809 /* Update val of portCfg register accordingly with all RxQueue types */
810 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
811 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
812
813 val = 0;
814 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
815 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
816
817 /* Build PORT_SDMA_CONFIG_REG */
818 val = 0;
819
820 /* Default burst size */
821 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
822 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
823 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
824
825 /* Assign port SDMA configuration */
826 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
827
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +0200828 /* Enable PHY polling in hardware if not in fixed-link mode */
829 if (!mvneta_port_is_fixed_link(pp)) {
830 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
831 val |= MVNETA_PHY_POLLING_ENABLE;
832 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
833 }
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200834
835 mvneta_set_ucast_table(pp, -1);
836 mvneta_set_special_mcast_table(pp, -1);
837 mvneta_set_other_mcast_table(pp, -1);
838}
839
840/* Set unicast address */
841static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
842 int queue)
843{
844 unsigned int unicast_reg;
845 unsigned int tbl_offset;
846 unsigned int reg_offset;
847
848 /* Locate the Unicast table entry */
849 last_nibble = (0xf & last_nibble);
850
851 /* offset from unicast tbl base */
852 tbl_offset = (last_nibble / 4) * 4;
853
854 /* offset within the above reg */
855 reg_offset = last_nibble % 4;
856
857 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
858
859 if (queue == -1) {
860 /* Clear accepts frame bit at specified unicast DA tbl entry */
861 unicast_reg &= ~(0xff << (8 * reg_offset));
862 } else {
863 unicast_reg &= ~(0xff << (8 * reg_offset));
864 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
865 }
866
867 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
868}
869
870/* Set mac address */
871static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
872 int queue)
873{
874 unsigned int mac_h;
875 unsigned int mac_l;
876
877 if (queue != -1) {
878 mac_l = (addr[4] << 8) | (addr[5]);
879 mac_h = (addr[0] << 24) | (addr[1] << 16) |
880 (addr[2] << 8) | (addr[3] << 0);
881
882 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
883 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
884 }
885
886 /* Accept frames of this address */
887 mvneta_set_ucast_addr(pp, addr[5], queue);
888}
889
Matt Pelland0a85f022018-03-27 13:18:25 -0400890static int mvneta_write_hwaddr(struct udevice *dev)
891{
892 mvneta_mac_addr_set(dev_get_priv(dev),
893 ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr,
894 rxq_def);
895
896 return 0;
897}
898
Stefan Roese19fc2ea2014-10-22 12:13:14 +0200899/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
900static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
901 u32 phys_addr, u32 cookie)
902{
903 rx_desc->buf_cookie = cookie;
904 rx_desc->buf_phys_addr = phys_addr;
905}
906
907/* Decrement sent descriptors counter */
908static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
909 struct mvneta_tx_queue *txq,
910 int sent_desc)
911{
912 u32 val;
913
914 /* Only 255 TX descriptors can be updated at once */
915 while (sent_desc > 0xff) {
916 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
917 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
918 sent_desc = sent_desc - 0xff;
919 }
920
921 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
922 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
923}
924
925/* Get number of TX descriptors already sent by HW */
926static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
927 struct mvneta_tx_queue *txq)
928{
929 u32 val;
930 int sent_desc;
931
932 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
933 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
934 MVNETA_TXQ_SENT_DESC_SHIFT;
935
936 return sent_desc;
937}
938
939/* Display more error info */
940static void mvneta_rx_error(struct mvneta_port *pp,
941 struct mvneta_rx_desc *rx_desc)
942{
943 u32 status = rx_desc->status;
944
945 if (!mvneta_rxq_desc_is_first_last(status)) {
946 netdev_err(pp->dev,
947 "bad rx status %08x (buffer oversize), size=%d\n",
948 status, rx_desc->data_size);
949 return;
950 }
951
952 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
953 case MVNETA_RXD_ERR_CRC:
954 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
955 status, rx_desc->data_size);
956 break;
957 case MVNETA_RXD_ERR_OVERRUN:
958 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
959 status, rx_desc->data_size);
960 break;
961 case MVNETA_RXD_ERR_LEN:
962 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
963 status, rx_desc->data_size);
964 break;
965 case MVNETA_RXD_ERR_RESOURCE:
966 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
967 status, rx_desc->data_size);
968 break;
969 }
970}
971
972static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp,
973 int rxq)
974{
975 return &pp->rxqs[rxq];
976}
977
978
979/* Drop packets received by the RXQ and free buffers */
980static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
981 struct mvneta_rx_queue *rxq)
982{
983 int rx_done;
984
985 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
986 if (rx_done)
987 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
988}
989
990/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
991static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
992 int num)
993{
994 int i;
995
996 for (i = 0; i < num; i++) {
997 u32 addr;
998
999 /* U-Boot special: Fill in the rx buffer addresses */
1000 addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE);
1001 mvneta_rx_desc_fill(rxq->descs + i, addr, addr);
1002 }
1003
1004 /* Add this number of RX descriptors as non occupied (ready to
1005 * get packets)
1006 */
1007 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1008
1009 return 0;
1010}
1011
1012/* Rx/Tx queue initialization/cleanup methods */
1013
1014/* Create a specified RX queue */
1015static int mvneta_rxq_init(struct mvneta_port *pp,
1016 struct mvneta_rx_queue *rxq)
1017
1018{
1019 rxq->size = pp->rx_ring_size;
1020
1021 /* Allocate memory for RX descriptors */
1022 rxq->descs_phys = (dma_addr_t)rxq->descs;
1023 if (rxq->descs == NULL)
1024 return -ENOMEM;
1025
Jon Nettleton199b27b2018-05-30 08:52:29 +03001026 WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
1027
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001028 rxq->last_desc = rxq->size - 1;
1029
1030 /* Set Rx descriptors queue starting address */
1031 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
1032 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
1033
1034 /* Fill RXQ with buffers from RX pool */
1035 mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE);
1036 mvneta_rxq_fill(pp, rxq, rxq->size);
1037
1038 return 0;
1039}
1040
1041/* Cleanup Rx queue */
1042static void mvneta_rxq_deinit(struct mvneta_port *pp,
1043 struct mvneta_rx_queue *rxq)
1044{
1045 mvneta_rxq_drop_pkts(pp, rxq);
1046
1047 rxq->descs = NULL;
1048 rxq->last_desc = 0;
1049 rxq->next_desc_to_proc = 0;
1050 rxq->descs_phys = 0;
1051}
1052
1053/* Create and initialize a tx queue */
1054static int mvneta_txq_init(struct mvneta_port *pp,
1055 struct mvneta_tx_queue *txq)
1056{
1057 txq->size = pp->tx_ring_size;
1058
1059 /* Allocate memory for TX descriptors */
Stefan Roese3cbc11d2016-05-19 18:09:17 +02001060 txq->descs_phys = (dma_addr_t)txq->descs;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001061 if (txq->descs == NULL)
1062 return -ENOMEM;
1063
Jon Nettleton199b27b2018-05-30 08:52:29 +03001064 WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
1065
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001066 txq->last_desc = txq->size - 1;
1067
1068 /* Set maximum bandwidth for enabled TXQs */
1069 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
1070 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
1071
1072 /* Set Tx descriptors queue starting address */
1073 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
1074 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
1075
1076 return 0;
1077}
1078
1079/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
1080static void mvneta_txq_deinit(struct mvneta_port *pp,
1081 struct mvneta_tx_queue *txq)
1082{
1083 txq->descs = NULL;
1084 txq->last_desc = 0;
1085 txq->next_desc_to_proc = 0;
1086 txq->descs_phys = 0;
1087
1088 /* Set minimum bandwidth for disabled TXQs */
1089 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
1090 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
1091
1092 /* Set Tx descriptors queue starting address and size */
1093 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
1094 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
1095}
1096
1097/* Cleanup all Tx queues */
1098static void mvneta_cleanup_txqs(struct mvneta_port *pp)
1099{
1100 int queue;
1101
1102 for (queue = 0; queue < txq_number; queue++)
1103 mvneta_txq_deinit(pp, &pp->txqs[queue]);
1104}
1105
1106/* Cleanup all Rx queues */
1107static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
1108{
1109 int queue;
1110
1111 for (queue = 0; queue < rxq_number; queue++)
1112 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
1113}
1114
1115
1116/* Init all Rx queues */
1117static int mvneta_setup_rxqs(struct mvneta_port *pp)
1118{
1119 int queue;
1120
1121 for (queue = 0; queue < rxq_number; queue++) {
1122 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
1123 if (err) {
1124 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
1125 __func__, queue);
1126 mvneta_cleanup_rxqs(pp);
1127 return err;
1128 }
1129 }
1130
1131 return 0;
1132}
1133
1134/* Init all tx queues */
1135static int mvneta_setup_txqs(struct mvneta_port *pp)
1136{
1137 int queue;
1138
1139 for (queue = 0; queue < txq_number; queue++) {
1140 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
1141 if (err) {
1142 netdev_err(pp->dev, "%s: can't create txq=%d\n",
1143 __func__, queue);
1144 mvneta_cleanup_txqs(pp);
1145 return err;
1146 }
1147 }
1148
1149 return 0;
1150}
1151
1152static void mvneta_start_dev(struct mvneta_port *pp)
1153{
1154 /* start the Rx/Tx activity */
1155 mvneta_port_enable(pp);
1156}
1157
Stefan Roesee3b9c982015-11-19 07:46:15 +01001158static void mvneta_adjust_link(struct udevice *dev)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001159{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001160 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001161 struct phy_device *phydev = pp->phydev;
1162 int status_change = 0;
1163
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001164 if (mvneta_port_is_fixed_link(pp)) {
1165 debug("Using fixed link, skip link adjust\n");
1166 return;
1167 }
1168
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001169 if (phydev->link) {
1170 if ((pp->speed != phydev->speed) ||
1171 (pp->duplex != phydev->duplex)) {
1172 u32 val;
1173
1174 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1175 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
1176 MVNETA_GMAC_CONFIG_GMII_SPEED |
1177 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
1178 MVNETA_GMAC_AN_SPEED_EN |
1179 MVNETA_GMAC_AN_DUPLEX_EN);
1180
1181 if (phydev->duplex)
1182 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1183
1184 if (phydev->speed == SPEED_1000)
1185 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1186 else
1187 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1188
1189 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1190
1191 pp->duplex = phydev->duplex;
1192 pp->speed = phydev->speed;
1193 }
1194 }
1195
1196 if (phydev->link != pp->link) {
1197 if (!phydev->link) {
1198 pp->duplex = -1;
1199 pp->speed = 0;
1200 }
1201
1202 pp->link = phydev->link;
1203 status_change = 1;
1204 }
1205
1206 if (status_change) {
1207 if (phydev->link) {
1208 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1209 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
1210 MVNETA_GMAC_FORCE_LINK_DOWN);
1211 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1212 mvneta_port_up(pp);
1213 } else {
1214 mvneta_port_down(pp);
1215 }
1216 }
1217}
1218
Stefan Roesee3b9c982015-11-19 07:46:15 +01001219static int mvneta_open(struct udevice *dev)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001220{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001221 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001222 int ret;
1223
1224 ret = mvneta_setup_rxqs(pp);
1225 if (ret)
1226 return ret;
1227
1228 ret = mvneta_setup_txqs(pp);
1229 if (ret)
1230 return ret;
1231
1232 mvneta_adjust_link(dev);
1233
1234 mvneta_start_dev(pp);
1235
1236 return 0;
1237}
1238
1239/* Initialize hw */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001240static int mvneta_init2(struct mvneta_port *pp)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001241{
1242 int queue;
1243
1244 /* Disable port */
1245 mvneta_port_disable(pp);
1246
1247 /* Set port default values */
1248 mvneta_defaults_set(pp);
1249
1250 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
1251 GFP_KERNEL);
1252 if (!pp->txqs)
1253 return -ENOMEM;
1254
1255 /* U-Boot special: use preallocated area */
1256 pp->txqs[0].descs = buffer_loc.tx_descs;
1257
1258 /* Initialize TX descriptor rings */
1259 for (queue = 0; queue < txq_number; queue++) {
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1261 txq->id = queue;
1262 txq->size = pp->tx_ring_size;
1263 }
1264
1265 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
1266 GFP_KERNEL);
1267 if (!pp->rxqs) {
1268 kfree(pp->txqs);
1269 return -ENOMEM;
1270 }
1271
1272 /* U-Boot special: use preallocated area */
1273 pp->rxqs[0].descs = buffer_loc.rx_descs;
1274
1275 /* Create Rx descriptor rings */
1276 for (queue = 0; queue < rxq_number; queue++) {
1277 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1278 rxq->id = queue;
1279 rxq->size = pp->rx_ring_size;
1280 }
1281
1282 return 0;
1283}
1284
1285/* platform glue : initialize decoding windows */
Stefan Roese544eefe2016-05-19 17:46:36 +02001286
1287/*
1288 * Not like A380, in Armada3700, there are two layers of decode windows for GBE:
1289 * First layer is: GbE Address window that resides inside the GBE unit,
1290 * Second layer is: Fabric address window which is located in the NIC400
1291 * (South Fabric).
1292 * To simplify the address decode configuration for Armada3700, we bypass the
1293 * first layer of GBE decode window by setting the first window to 4GB.
1294 */
1295static void mvneta_bypass_mbus_windows(struct mvneta_port *pp)
1296{
1297 /*
1298 * Set window size to 4GB, to bypass GBE address decode, leave the
1299 * work to MBUS decode window
1300 */
1301 mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK);
1302
1303 /* Enable GBE address decode window 0 by set bit 0 to 0 */
1304 clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE,
1305 MVNETA_BASE_ADDR_ENABLE_BIT);
1306
1307 /* Set GBE address decode window 0 to full Access (read or write) */
1308 setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT,
1309 MVNETA_PORT_ACCESS_PROTECT_WIN0_RW);
1310}
1311
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001312static void mvneta_conf_mbus_windows(struct mvneta_port *pp)
1313{
1314 const struct mbus_dram_target_info *dram;
1315 u32 win_enable;
1316 u32 win_protect;
1317 int i;
1318
1319 dram = mvebu_mbus_dram_info();
1320 for (i = 0; i < 6; i++) {
1321 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1322 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1323
1324 if (i < 4)
1325 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1326 }
1327
1328 win_enable = 0x3f;
1329 win_protect = 0;
1330
1331 for (i = 0; i < dram->num_cs; i++) {
1332 const struct mbus_dram_window *cs = dram->cs + i;
1333 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
1334 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
1335
1336 mvreg_write(pp, MVNETA_WIN_SIZE(i),
1337 (cs->size - 1) & 0xffff0000);
1338
1339 win_enable &= ~(1 << i);
1340 win_protect |= 3 << (2 * i);
1341 }
1342
1343 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1344}
1345
1346/* Power up the port */
1347static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
1348{
1349 u32 ctrl;
1350
1351 /* MAC Cause register should be cleared */
1352 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
1353
1354 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1355
1356 /* Even though it might look weird, when we're configured in
1357 * SGMII or QSGMII mode, the RGMII bit needs to be set.
1358 */
1359 switch (phy_mode) {
1360 case PHY_INTERFACE_MODE_QSGMII:
1361 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
1362 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1363 break;
1364 case PHY_INTERFACE_MODE_SGMII:
1365 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
1366 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
1367 break;
1368 case PHY_INTERFACE_MODE_RGMII:
1369 case PHY_INTERFACE_MODE_RGMII_ID:
1370 ctrl |= MVNETA_GMAC2_PORT_RGMII;
1371 break;
1372 default:
1373 return -EINVAL;
1374 }
1375
1376 /* Cancel Port Reset */
1377 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
1378 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
1379
1380 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
1381 MVNETA_GMAC2_PORT_RESET) != 0)
1382 continue;
1383
1384 return 0;
1385}
1386
1387/* Device initialization routine */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001388static int mvneta_init(struct udevice *dev)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001389{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001390 struct eth_pdata *pdata = dev_get_platdata(dev);
1391 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001392 int err;
1393
1394 pp->tx_ring_size = MVNETA_MAX_TXD;
1395 pp->rx_ring_size = MVNETA_MAX_RXD;
1396
Stefan Roesee3b9c982015-11-19 07:46:15 +01001397 err = mvneta_init2(pp);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001398 if (err < 0) {
1399 dev_err(&pdev->dev, "can't init eth hal\n");
1400 return err;
1401 }
1402
Stefan Roesee3b9c982015-11-19 07:46:15 +01001403 mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001404
1405 err = mvneta_port_power_up(pp, pp->phy_interface);
1406 if (err < 0) {
1407 dev_err(&pdev->dev, "can't power up port\n");
1408 return err;
1409 }
1410
1411 /* Call open() now as it needs to be done before runing send() */
1412 mvneta_open(dev);
1413
1414 return 0;
1415}
1416
1417/* U-Boot only functions follow here */
1418
1419/* SMI / MDIO functions */
1420
1421static int smi_wait_ready(struct mvneta_port *pp)
1422{
1423 u32 timeout = MVNETA_SMI_TIMEOUT;
1424 u32 smi_reg;
1425
1426 /* wait till the SMI is not busy */
1427 do {
1428 /* read smi register */
1429 smi_reg = mvreg_read(pp, MVNETA_SMI);
1430 if (timeout-- == 0) {
1431 printf("Error: SMI busy timeout\n");
1432 return -EFAULT;
1433 }
1434 } while (smi_reg & MVNETA_SMI_BUSY);
1435
1436 return 0;
1437}
1438
1439/*
Stefan Roesee3b9c982015-11-19 07:46:15 +01001440 * mvneta_mdio_read - miiphy_read callback function.
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001441 *
1442 * Returns 16bit phy register value, or 0xffff on error
1443 */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001444static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001445{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001446 struct mvneta_port *pp = bus->priv;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001447 u32 smi_reg;
1448 u32 timeout;
1449
1450 /* check parameters */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001451 if (addr > MVNETA_PHY_ADDR_MASK) {
1452 printf("Error: Invalid PHY address %d\n", addr);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001453 return -EFAULT;
1454 }
1455
Stefan Roesee3b9c982015-11-19 07:46:15 +01001456 if (reg > MVNETA_PHY_REG_MASK) {
1457 printf("Err: Invalid register offset %d\n", reg);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001458 return -EFAULT;
1459 }
1460
1461 /* wait till the SMI is not busy */
1462 if (smi_wait_ready(pp) < 0)
1463 return -EFAULT;
1464
1465 /* fill the phy address and regiser offset and read opcode */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001466 smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1467 | (reg << MVNETA_SMI_REG_ADDR_OFFS)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001468 | MVNETA_SMI_OPCODE_READ;
1469
1470 /* write the smi register */
1471 mvreg_write(pp, MVNETA_SMI, smi_reg);
1472
Stefan Roesee3b9c982015-11-19 07:46:15 +01001473 /* wait till read value is ready */
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001474 timeout = MVNETA_SMI_TIMEOUT;
1475
1476 do {
1477 /* read smi register */
1478 smi_reg = mvreg_read(pp, MVNETA_SMI);
1479 if (timeout-- == 0) {
1480 printf("Err: SMI read ready timeout\n");
1481 return -EFAULT;
1482 }
1483 } while (!(smi_reg & MVNETA_SMI_READ_VALID));
1484
1485 /* Wait for the data to update in the SMI register */
1486 for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++)
1487 ;
1488
Stefan Roesee3b9c982015-11-19 07:46:15 +01001489 return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001490}
1491
1492/*
Stefan Roesee3b9c982015-11-19 07:46:15 +01001493 * mvneta_mdio_write - miiphy_write callback function.
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001494 *
1495 * Returns 0 if write succeed, -EINVAL on bad parameters
1496 * -ETIME on timeout
1497 */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001498static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
1499 u16 value)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001500{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001501 struct mvneta_port *pp = bus->priv;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001502 u32 smi_reg;
1503
1504 /* check parameters */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001505 if (addr > MVNETA_PHY_ADDR_MASK) {
1506 printf("Error: Invalid PHY address %d\n", addr);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001507 return -EFAULT;
1508 }
1509
Stefan Roesee3b9c982015-11-19 07:46:15 +01001510 if (reg > MVNETA_PHY_REG_MASK) {
1511 printf("Err: Invalid register offset %d\n", reg);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001512 return -EFAULT;
1513 }
1514
1515 /* wait till the SMI is not busy */
1516 if (smi_wait_ready(pp) < 0)
1517 return -EFAULT;
1518
1519 /* fill the phy addr and reg offset and write opcode and data */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001520 smi_reg = value << MVNETA_SMI_DATA_OFFS;
1521 smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS)
1522 | (reg << MVNETA_SMI_REG_ADDR_OFFS);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001523 smi_reg &= ~MVNETA_SMI_OPCODE_READ;
1524
1525 /* write the smi register */
1526 mvreg_write(pp, MVNETA_SMI, smi_reg);
1527
1528 return 0;
1529}
1530
Stefan Roesee3b9c982015-11-19 07:46:15 +01001531static int mvneta_start(struct udevice *dev)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001532{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001533 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001534 struct phy_device *phydev;
1535
1536 mvneta_port_power_up(pp, pp->phy_interface);
1537
1538 if (!pp->init || pp->link == 0) {
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001539 if (mvneta_port_is_fixed_link(pp)) {
1540 u32 val;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001541
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001542 pp->init = 1;
1543 pp->link = 1;
1544 mvneta_init(dev);
1545
1546 val = MVNETA_GMAC_FORCE_LINK_UP |
1547 MVNETA_GMAC_IB_BYPASS_AN_EN |
1548 MVNETA_GMAC_SET_FC_EN |
1549 MVNETA_GMAC_ADVERT_FC_EN |
1550 MVNETA_GMAC_SAMPLE_TX_CFG_EN;
1551
1552 if (pp->duplex)
1553 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
1554
1555 if (pp->speed == SPEED_1000)
1556 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
1557 else if (pp->speed == SPEED_100)
1558 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
1559
1560 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1561 } else {
1562 /* Set phy address of the port */
1563 mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr);
1564
1565 phydev = phy_connect(pp->bus, pp->phyaddr, dev,
1566 pp->phy_interface);
Marek Behúncf2cf852018-04-24 17:21:29 +02001567 if (!phydev) {
1568 printf("phy_connect failed\n");
1569 return -ENODEV;
1570 }
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001571
1572 pp->phydev = phydev;
1573 phy_config(phydev);
1574 phy_startup(phydev);
1575 if (!phydev->link) {
1576 printf("%s: No link.\n", phydev->dev->name);
1577 return -1;
1578 }
1579
1580 /* Full init on first call */
1581 mvneta_init(dev);
1582 pp->init = 1;
1583 return 0;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001584 }
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001585 }
1586
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001587 /* Upon all following calls, this is enough */
1588 mvneta_port_up(pp);
1589 mvneta_port_enable(pp);
1590
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001591 return 0;
1592}
1593
Stefan Roesee3b9c982015-11-19 07:46:15 +01001594static int mvneta_send(struct udevice *dev, void *packet, int length)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001595{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001596 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001597 struct mvneta_tx_queue *txq = &pp->txqs[0];
1598 struct mvneta_tx_desc *tx_desc;
1599 int sent_desc;
1600 u32 timeout = 0;
1601
1602 /* Get a descriptor for the first part of the packet */
1603 tx_desc = mvneta_txq_next_desc_get(txq);
1604
Stefan Roese3cbc11d2016-05-19 18:09:17 +02001605 tx_desc->buf_phys_addr = (u32)(uintptr_t)packet;
Stefan Roesee3b9c982015-11-19 07:46:15 +01001606 tx_desc->data_size = length;
Stefan Roese3cbc11d2016-05-19 18:09:17 +02001607 flush_dcache_range((ulong)packet,
1608 (ulong)packet + ALIGN(length, PKTALIGN));
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001609
1610 /* First and Last descriptor */
1611 tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC;
1612 mvneta_txq_pend_desc_add(pp, txq, 1);
1613
1614 /* Wait for packet to be sent (queue might help with speed here) */
1615 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1616 while (!sent_desc) {
1617 if (timeout++ > 10000) {
1618 printf("timeout: packet not sent\n");
1619 return -1;
1620 }
1621 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1622 }
1623
1624 /* txDone has increased - hw sent packet */
1625 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001626
1627 return 0;
1628}
1629
Stefan Roesee3b9c982015-11-19 07:46:15 +01001630static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001631{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001632 struct mvneta_port *pp = dev_get_priv(dev);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001633 int rx_done;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001634 struct mvneta_rx_queue *rxq;
Stefan Roesee3b9c982015-11-19 07:46:15 +01001635 int rx_bytes = 0;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001636
1637 /* get rx queue */
1638 rxq = mvneta_rxq_handle_get(pp, rxq_def);
1639 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001640
Stefan Roesee3b9c982015-11-19 07:46:15 +01001641 if (rx_done) {
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001642 struct mvneta_rx_desc *rx_desc;
1643 unsigned char *data;
1644 u32 rx_status;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001645
1646 /*
1647 * No cache invalidation needed here, since the desc's are
1648 * located in a uncached memory region
1649 */
1650 rx_desc = mvneta_rxq_next_desc_get(rxq);
1651
1652 rx_status = rx_desc->status;
1653 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1654 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1655 mvneta_rx_error(pp, rx_desc);
1656 /* leave the descriptor untouched */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001657 return -EIO;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001658 }
1659
1660 /* 2 bytes for marvell header. 4 bytes for crc */
1661 rx_bytes = rx_desc->data_size - 6;
1662
1663 /* give packet to stack - skip on first 2 bytes */
Stefan Roese3cbc11d2016-05-19 18:09:17 +02001664 data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001665 /*
1666 * No cache invalidation needed here, since the rx_buffer's are
1667 * located in a uncached memory region
1668 */
Stefan Roesee3b9c982015-11-19 07:46:15 +01001669 *packetp = data;
1670
Jason Brown32ac8b02017-11-28 11:12:43 -08001671 /*
1672 * Only mark one descriptor as free
1673 * since only one was processed
1674 */
1675 mvneta_rxq_desc_num_update(pp, rxq, 1, 1);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001676 }
1677
Stefan Roesee3b9c982015-11-19 07:46:15 +01001678 return rx_bytes;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001679}
1680
Stefan Roesee3b9c982015-11-19 07:46:15 +01001681static int mvneta_probe(struct udevice *dev)
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001682{
Stefan Roesee3b9c982015-11-19 07:46:15 +01001683 struct eth_pdata *pdata = dev_get_platdata(dev);
1684 struct mvneta_port *pp = dev_get_priv(dev);
1685 void *blob = (void *)gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -07001686 int node = dev_of_offset(dev);
Stefan Roesee3b9c982015-11-19 07:46:15 +01001687 struct mii_dev *bus;
1688 unsigned long addr;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001689 void *bd_space;
Konstantin Porotchkin976feda2017-02-16 13:52:27 +02001690 int ret;
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001691 int fl_node;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001692
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001693 /*
1694 * Allocate buffer area for descs and rx_buffers. This is only
1695 * done once for all interfaces. As only one interface can
Chris Packham6723b232016-08-29 20:54:02 +12001696 * be active. Make this area DMA safe by disabling the D-cache
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001697 */
1698 if (!buffer_loc.tx_descs) {
Jon Nettleton199b27b2018-05-30 08:52:29 +03001699 u32 size;
1700
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001701 /* Align buffer area for descs and rx_buffers to 1MiB */
1702 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Rabeeh Khoury0f8888b2018-06-19 21:36:50 +03001703 flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE);
Stefan Roese3cbc11d2016-05-19 18:09:17 +02001704 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001705 DCACHE_OFF);
1706 buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
Jon Nettleton199b27b2018-05-30 08:52:29 +03001707 size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
1708 ARCH_DMA_MINALIGN);
Rabeeh Khoury318b5d72018-06-19 21:36:51 +03001709 memset(buffer_loc.tx_descs, 0, size);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001710 buffer_loc.rx_descs = (struct mvneta_rx_desc *)
Jon Nettleton199b27b2018-05-30 08:52:29 +03001711 ((phys_addr_t)bd_space + size);
1712 size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
1713 ARCH_DMA_MINALIGN);
1714 buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001715 }
1716
Stefan Roesee3b9c982015-11-19 07:46:15 +01001717 pp->base = (void __iomem *)pdata->iobase;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001718
Stefan Roesee3b9c982015-11-19 07:46:15 +01001719 /* Configure MBUS address windows */
Simon Glass911f3ae2017-05-18 20:08:57 -06001720 if (device_is_compatible(dev, "marvell,armada-3700-neta"))
Stefan Roese544eefe2016-05-19 17:46:36 +02001721 mvneta_bypass_mbus_windows(pp);
1722 else
1723 mvneta_conf_mbus_windows(pp);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001724
Stefan Roesee3b9c982015-11-19 07:46:15 +01001725 /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */
1726 pp->phy_interface = pdata->phy_interface;
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001727
Konstantin Porotchkin278d30c2017-02-16 13:52:28 +02001728 /* fetch 'fixed-link' property from 'neta' node */
1729 fl_node = fdt_subnode_offset(blob, node, "fixed-link");
1730 if (fl_node != -FDT_ERR_NOTFOUND) {
1731 /* set phy_addr to invalid value for fixed link */
1732 pp->phyaddr = PHY_MAX_ADDR + 1;
1733 pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
1734 pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
1735 } else {
1736 /* Now read phyaddr from DT */
1737 addr = fdtdec_get_int(blob, node, "phy", 0);
1738 addr = fdt_node_offset_by_phandle(blob, addr);
1739 pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
1740 }
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001741
Stefan Roesee3b9c982015-11-19 07:46:15 +01001742 bus = mdio_alloc();
1743 if (!bus) {
1744 printf("Failed to allocate MDIO bus\n");
1745 return -ENOMEM;
1746 }
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001747
Stefan Roesee3b9c982015-11-19 07:46:15 +01001748 bus->read = mvneta_mdio_read;
1749 bus->write = mvneta_mdio_write;
1750 snprintf(bus->name, sizeof(bus->name), dev->name);
1751 bus->priv = (void *)pp;
1752 pp->bus = bus;
1753
Konstantin Porotchkin976feda2017-02-16 13:52:27 +02001754 ret = mdio_register(bus);
1755 if (ret)
1756 return ret;
1757
Simon Glassbcee8d62019-12-06 21:41:35 -07001758#if CONFIG_IS_ENABLED(DM_GPIO)
Aditya Prayoga18bfc8f2018-12-05 00:39:23 +08001759 gpio_request_by_name(dev, "phy-reset-gpios", 0,
1760 &pp->phy_reset_gpio, GPIOD_IS_OUT);
1761
1762 if (dm_gpio_is_valid(&pp->phy_reset_gpio)) {
1763 dm_gpio_set_value(&pp->phy_reset_gpio, 1);
1764 mdelay(10);
1765 dm_gpio_set_value(&pp->phy_reset_gpio, 0);
1766 }
1767#endif
1768
Konstantin Porotchkin976feda2017-02-16 13:52:27 +02001769 return board_network_enable(bus);
Stefan Roese19fc2ea2014-10-22 12:13:14 +02001770}
Stefan Roesee3b9c982015-11-19 07:46:15 +01001771
1772static void mvneta_stop(struct udevice *dev)
1773{
1774 struct mvneta_port *pp = dev_get_priv(dev);
1775
1776 mvneta_port_down(pp);
1777 mvneta_port_disable(pp);
1778}
1779
1780static const struct eth_ops mvneta_ops = {
1781 .start = mvneta_start,
1782 .send = mvneta_send,
1783 .recv = mvneta_recv,
1784 .stop = mvneta_stop,
Matt Pelland0a85f022018-03-27 13:18:25 -04001785 .write_hwaddr = mvneta_write_hwaddr,
Stefan Roesee3b9c982015-11-19 07:46:15 +01001786};
1787
1788static int mvneta_ofdata_to_platdata(struct udevice *dev)
1789{
1790 struct eth_pdata *pdata = dev_get_platdata(dev);
1791 const char *phy_mode;
1792
Simon Glassa821c4a2017-05-17 17:18:05 -06001793 pdata->iobase = devfdt_get_addr(dev);
Stefan Roesee3b9c982015-11-19 07:46:15 +01001794
1795 /* Get phy-mode / phy_interface from DT */
1796 pdata->phy_interface = -1;
Simon Glasse160f7d2017-01-17 16:52:55 -07001797 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
1798 NULL);
Stefan Roesee3b9c982015-11-19 07:46:15 +01001799 if (phy_mode)
1800 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1801 if (pdata->phy_interface == -1) {
1802 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1803 return -EINVAL;
1804 }
1805
1806 return 0;
1807}
1808
1809static const struct udevice_id mvneta_ids[] = {
1810 { .compatible = "marvell,armada-370-neta" },
1811 { .compatible = "marvell,armada-xp-neta" },
Stefan Roese544eefe2016-05-19 17:46:36 +02001812 { .compatible = "marvell,armada-3700-neta" },
Stefan Roesee3b9c982015-11-19 07:46:15 +01001813 { }
1814};
1815
1816U_BOOT_DRIVER(mvneta) = {
1817 .name = "mvneta",
1818 .id = UCLASS_ETH,
1819 .of_match = mvneta_ids,
1820 .ofdata_to_platdata = mvneta_ofdata_to_platdata,
1821 .probe = mvneta_probe,
1822 .ops = &mvneta_ops,
1823 .priv_auto_alloc_size = sizeof(struct mvneta_port),
1824 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
1825};