blob: 9683439d231c224c0fa84da75982497f402e0b28 [file] [log] [blame]
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
9 * Copyright (C) 2016 Stefan Roese <sr@denx.de>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <dm/device-internal.h>
19#include <dm/lists.h>
20#include <net.h>
21#include <netdev.h>
22#include <config.h>
23#include <malloc.h>
24#include <asm/io.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090025#include <linux/errno.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010026#include <phy.h>
27#include <miiphy.h>
28#include <watchdog.h>
29#include <asm/arch/cpu.h>
30#include <asm/arch/soc.h>
31#include <linux/compat.h>
32#include <linux/mbus.h>
33
34DECLARE_GLOBAL_DATA_PTR;
35
36/* Some linux -> U-Boot compatibility stuff */
37#define netdev_err(dev, fmt, args...) \
38 printf(fmt, ##args)
39#define netdev_warn(dev, fmt, args...) \
40 printf(fmt, ##args)
41#define netdev_info(dev, fmt, args...) \
42 printf(fmt, ##args)
43#define netdev_dbg(dev, fmt, args...) \
44 printf(fmt, ##args)
45
46#define ETH_ALEN 6 /* Octets in one ethernet addr */
47
48#define __verify_pcpu_ptr(ptr) \
49do { \
50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
51 (void)__vpp_verify; \
52} while (0)
53
54#define VERIFY_PERCPU_PTR(__p) \
55({ \
56 __verify_pcpu_ptr(__p); \
57 (typeof(*(__p)) __kernel __force *)(__p); \
58})
59
60#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
61#define smp_processor_id() 0
62#define num_present_cpus() 1
63#define for_each_present_cpu(cpu) \
64 for ((cpu) = 0; (cpu) < 1; (cpu)++)
65
66#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
67
68#define CONFIG_NR_CPUS 1
69#define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */
70
71/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
72#define WRAP (2 + ETH_HLEN + 4 + 32)
73#define MTU 1500
74#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
75
76#define MVPP2_SMI_TIMEOUT 10000
77
78/* RX Fifo Registers */
79#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
80#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
81#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
82#define MVPP2_RX_FIFO_INIT_REG 0x64
83
84/* RX DMA Top Registers */
85#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
86#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
87#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
88#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
89#define MVPP2_POOL_BUF_SIZE_OFFSET 5
90#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
91#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
92#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
93#define MVPP2_RXQ_POOL_SHORT_OFFS 20
94#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
95#define MVPP2_RXQ_POOL_LONG_OFFS 24
96#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
97#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
98#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
99#define MVPP2_RXQ_DISABLE_MASK BIT(31)
100
101/* Parser Registers */
102#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
103#define MVPP2_PRS_PORT_LU_MAX 0xf
104#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
105#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
106#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
107#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
108#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
109#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
110#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
111#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
112#define MVPP2_PRS_TCAM_IDX_REG 0x1100
113#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
114#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
115#define MVPP2_PRS_SRAM_IDX_REG 0x1200
116#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
117#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
118#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
119
120/* Classifier Registers */
121#define MVPP2_CLS_MODE_REG 0x1800
122#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
123#define MVPP2_CLS_PORT_WAY_REG 0x1810
124#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
125#define MVPP2_CLS_LKP_INDEX_REG 0x1814
126#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
127#define MVPP2_CLS_LKP_TBL_REG 0x1818
128#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
129#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
130#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
131#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
132#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
133#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
134#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
135#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
136#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
137#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
138#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
139#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
140
141/* Descriptor Manager Top Registers */
142#define MVPP2_RXQ_NUM_REG 0x2040
143#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
144#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
145#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
146#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
147#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
148#define MVPP2_RXQ_NUM_NEW_OFFSET 16
149#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
150#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
151#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
152#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
153#define MVPP2_RXQ_THRESH_REG 0x204c
154#define MVPP2_OCCUPIED_THRESH_OFFSET 0
155#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
156#define MVPP2_RXQ_INDEX_REG 0x2050
157#define MVPP2_TXQ_NUM_REG 0x2080
158#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
159#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
160#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
161#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
162#define MVPP2_TXQ_THRESH_REG 0x2094
163#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
164#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
165#define MVPP2_TXQ_INDEX_REG 0x2098
166#define MVPP2_TXQ_PREF_BUF_REG 0x209c
167#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
168#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
169#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
170#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
171#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
172#define MVPP2_TXQ_PENDING_REG 0x20a0
173#define MVPP2_TXQ_PENDING_MASK 0x3fff
174#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
175#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
176#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
177#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
178#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
179#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
180#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
181#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
182#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
183#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
184#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
185#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
186#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
187#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
188#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
189#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
190
191/* MBUS bridge registers */
192#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
193#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
194#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
195#define MVPP2_BASE_ADDR_ENABLE 0x4060
196
197/* Interrupt Cause and Mask registers */
198#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
199#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
200#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
201#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
202#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
203#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
204#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
205#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
206#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
207#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
208#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
209#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
210#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
211#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
212#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
213#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
214#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
215#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
216#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
217#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
218
219/* Buffer Manager registers */
220#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
221#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
222#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
223#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
224#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
225#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
226#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
227#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
228#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
229#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
230#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
231#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
232#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
233#define MVPP2_BM_START_MASK BIT(0)
234#define MVPP2_BM_STOP_MASK BIT(1)
235#define MVPP2_BM_STATE_MASK BIT(4)
236#define MVPP2_BM_LOW_THRESH_OFFS 8
237#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
238#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
239 MVPP2_BM_LOW_THRESH_OFFS)
240#define MVPP2_BM_HIGH_THRESH_OFFS 16
241#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
242#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
243 MVPP2_BM_HIGH_THRESH_OFFS)
244#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
245#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
246#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
247#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
248#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
249#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
250#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
251#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
252#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
253#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
254#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
255#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
256#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
257#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
258#define MVPP2_BM_VIRT_RLS_REG 0x64c0
259#define MVPP2_BM_MC_RLS_REG 0x64c4
260#define MVPP2_BM_MC_ID_MASK 0xfff
261#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
262
263/* TX Scheduler registers */
264#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
265#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
266#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
267#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
268#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
269#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
270#define MVPP2_TXP_SCHED_MTU_REG 0x801c
271#define MVPP2_TXP_MTU_MAX 0x7FFFF
272#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
273#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
274#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
275#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
276#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
277#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
278#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
279#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
280#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
281#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
282#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
283#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
284#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
285#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
286
287/* TX general registers */
288#define MVPP2_TX_SNOOP_REG 0x8800
289#define MVPP2_TX_PORT_FLUSH_REG 0x8810
290#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
291
292/* LMS registers */
293#define MVPP2_SRC_ADDR_MIDDLE 0x24
294#define MVPP2_SRC_ADDR_HIGH 0x28
295#define MVPP2_PHY_AN_CFG0_REG 0x34
296#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100297#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni6b28f422017-02-15 12:16:23 +0100298#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100299
300/* Per-port registers */
301#define MVPP2_GMAC_CTRL_0_REG 0x0
302#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
303#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
304#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
305#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
306#define MVPP2_GMAC_CTRL_1_REG 0x4
307#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
308#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
309#define MVPP2_GMAC_PCS_LB_EN_BIT 6
310#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
311#define MVPP2_GMAC_SA_LOW_OFFS 7
312#define MVPP2_GMAC_CTRL_2_REG 0x8
313#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
314#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
315#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
316#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
317#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
318#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
319#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
320#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
321#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
322#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
323#define MVPP2_GMAC_FC_ADV_EN BIT(9)
324#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
325#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
326#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
327#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
328#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
329#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
330 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
331
332#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
333
334/* Descriptor ring Macros */
335#define MVPP2_QUEUE_NEXT_DESC(q, index) \
336 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
337
338/* SMI: 0xc0054 -> offset 0x54 to lms_base */
339#define MVPP2_SMI 0x0054
340#define MVPP2_PHY_REG_MASK 0x1f
341/* SMI register fields */
342#define MVPP2_SMI_DATA_OFFS 0 /* Data */
343#define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS)
344#define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */
345#define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/
346#define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */
347#define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS)
348#define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */
349#define MVPP2_SMI_BUSY (1 << 28) /* Busy */
350
351#define MVPP2_PHY_ADDR_MASK 0x1f
352#define MVPP2_PHY_REG_MASK 0x1f
353
354/* Various constants */
355
356/* Coalescing */
357#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
358#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
359#define MVPP2_RX_COAL_PKTS 32
360#define MVPP2_RX_COAL_USEC 100
361
362/* The two bytes Marvell header. Either contains a special value used
363 * by Marvell switches when a specific hardware mode is enabled (not
364 * supported by this driver) or is filled automatically by zeroes on
365 * the RX side. Those two bytes being at the front of the Ethernet
366 * header, they allow to have the IP header aligned on a 4 bytes
367 * boundary automatically: the hardware skips those two bytes on its
368 * own.
369 */
370#define MVPP2_MH_SIZE 2
371#define MVPP2_ETH_TYPE_LEN 2
372#define MVPP2_PPPOE_HDR_SIZE 8
373#define MVPP2_VLAN_TAG_LEN 4
374
375/* Lbtd 802.3 type */
376#define MVPP2_IP_LBDT_TYPE 0xfffa
377
378#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
379#define MVPP2_TX_CSUM_MAX_SIZE 9800
380
381/* Timeout constants */
382#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
383#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
384
385#define MVPP2_TX_MTU_MAX 0x7ffff
386
387/* Maximum number of T-CONTs of PON port */
388#define MVPP2_MAX_TCONT 16
389
390/* Maximum number of supported ports */
391#define MVPP2_MAX_PORTS 4
392
393/* Maximum number of TXQs used by single port */
394#define MVPP2_MAX_TXQ 8
395
396/* Maximum number of RXQs used by single port */
397#define MVPP2_MAX_RXQ 8
398
399/* Default number of TXQs in use */
400#define MVPP2_DEFAULT_TXQ 1
401
402/* Dfault number of RXQs in use */
403#define MVPP2_DEFAULT_RXQ 1
404#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
405
406/* Total number of RXQs available to all ports */
407#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
408
409/* Max number of Rx descriptors */
410#define MVPP2_MAX_RXD 16
411
412/* Max number of Tx descriptors */
413#define MVPP2_MAX_TXD 16
414
415/* Amount of Tx descriptors that can be reserved at once by CPU */
416#define MVPP2_CPU_DESC_CHUNK 64
417
418/* Max number of Tx descriptors in each aggregated queue */
419#define MVPP2_AGGR_TXQ_SIZE 256
420
421/* Descriptor aligned size */
422#define MVPP2_DESC_ALIGNED_SIZE 32
423
424/* Descriptor alignment mask */
425#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
426
427/* RX FIFO constants */
428#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
429#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
430#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
431
432/* RX buffer constants */
433#define MVPP2_SKB_SHINFO_SIZE \
434 0
435
436#define MVPP2_RX_PKT_SIZE(mtu) \
437 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
438 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
439
440#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
441#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
442#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
443 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
444
445#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
446
447/* IPv6 max L3 address size */
448#define MVPP2_MAX_L3_ADDR_SIZE 16
449
450/* Port flags */
451#define MVPP2_F_LOOPBACK BIT(0)
452
453/* Marvell tag types */
454enum mvpp2_tag_type {
455 MVPP2_TAG_TYPE_NONE = 0,
456 MVPP2_TAG_TYPE_MH = 1,
457 MVPP2_TAG_TYPE_DSA = 2,
458 MVPP2_TAG_TYPE_EDSA = 3,
459 MVPP2_TAG_TYPE_VLAN = 4,
460 MVPP2_TAG_TYPE_LAST = 5
461};
462
463/* Parser constants */
464#define MVPP2_PRS_TCAM_SRAM_SIZE 256
465#define MVPP2_PRS_TCAM_WORDS 6
466#define MVPP2_PRS_SRAM_WORDS 4
467#define MVPP2_PRS_FLOW_ID_SIZE 64
468#define MVPP2_PRS_FLOW_ID_MASK 0x3f
469#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
470#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
471#define MVPP2_PRS_IPV4_HEAD 0x40
472#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
473#define MVPP2_PRS_IPV4_MC 0xe0
474#define MVPP2_PRS_IPV4_MC_MASK 0xf0
475#define MVPP2_PRS_IPV4_BC_MASK 0xff
476#define MVPP2_PRS_IPV4_IHL 0x5
477#define MVPP2_PRS_IPV4_IHL_MASK 0xf
478#define MVPP2_PRS_IPV6_MC 0xff
479#define MVPP2_PRS_IPV6_MC_MASK 0xff
480#define MVPP2_PRS_IPV6_HOP_MASK 0xff
481#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
482#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
483#define MVPP2_PRS_DBL_VLANS_MAX 100
484
485/* Tcam structure:
486 * - lookup ID - 4 bits
487 * - port ID - 1 byte
488 * - additional information - 1 byte
489 * - header data - 8 bytes
490 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
491 */
492#define MVPP2_PRS_AI_BITS 8
493#define MVPP2_PRS_PORT_MASK 0xff
494#define MVPP2_PRS_LU_MASK 0xf
495#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
496 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
497#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
498 (((offs) * 2) - ((offs) % 2) + 2)
499#define MVPP2_PRS_TCAM_AI_BYTE 16
500#define MVPP2_PRS_TCAM_PORT_BYTE 17
501#define MVPP2_PRS_TCAM_LU_BYTE 20
502#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
503#define MVPP2_PRS_TCAM_INV_WORD 5
504/* Tcam entries ID */
505#define MVPP2_PE_DROP_ALL 0
506#define MVPP2_PE_FIRST_FREE_TID 1
507#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
508#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
509#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
510#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
511#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
512#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
513#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
514#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
515#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
516#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
517#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
518#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
519#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
520#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
521#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
522#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
523#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
524#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
525#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
526#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
527#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
528#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
529#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
530#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
531#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
532
533/* Sram structure
534 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
535 */
536#define MVPP2_PRS_SRAM_RI_OFFS 0
537#define MVPP2_PRS_SRAM_RI_WORD 0
538#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
539#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
540#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
541#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
542#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
543#define MVPP2_PRS_SRAM_UDF_OFFS 73
544#define MVPP2_PRS_SRAM_UDF_BITS 8
545#define MVPP2_PRS_SRAM_UDF_MASK 0xff
546#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
547#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
548#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
549#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
550#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
551#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
552#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
553#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
554#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
555#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
556#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
557#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
558#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
559#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
560#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
561#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
562#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
563#define MVPP2_PRS_SRAM_AI_OFFS 90
564#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
565#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
566#define MVPP2_PRS_SRAM_AI_MASK 0xff
567#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
568#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
569#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
570#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
571
572/* Sram result info bits assignment */
573#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
574#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100575#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
576#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100577#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
578#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
579#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
580#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
581#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100582#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
583#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100584#define MVPP2_PRS_RI_L2_MCAST BIT(9)
585#define MVPP2_PRS_RI_L2_BCAST BIT(10)
586#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100587#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
588#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100589#define MVPP2_PRS_RI_L3_IP4 BIT(12)
590#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
591#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
592#define MVPP2_PRS_RI_L3_IP6 BIT(14)
593#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
594#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100595#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
596#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100597#define MVPP2_PRS_RI_L3_MCAST BIT(15)
598#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
599#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
600#define MVPP2_PRS_RI_UDF3_MASK 0x300000
601#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
602#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
603#define MVPP2_PRS_RI_L4_TCP BIT(22)
604#define MVPP2_PRS_RI_L4_UDP BIT(23)
605#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
606#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
607#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
608#define MVPP2_PRS_RI_DROP_MASK 0x80000000
609
610/* Sram additional info bits assignment */
611#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
612#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
613#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
614#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
615#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
616#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
617#define MVPP2_PRS_SINGLE_VLAN_AI 0
618#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
619
620/* DSA/EDSA type */
621#define MVPP2_PRS_TAGGED true
622#define MVPP2_PRS_UNTAGGED false
623#define MVPP2_PRS_EDSA true
624#define MVPP2_PRS_DSA false
625
626/* MAC entries, shadow udf */
627enum mvpp2_prs_udf {
628 MVPP2_PRS_UDF_MAC_DEF,
629 MVPP2_PRS_UDF_MAC_RANGE,
630 MVPP2_PRS_UDF_L2_DEF,
631 MVPP2_PRS_UDF_L2_DEF_COPY,
632 MVPP2_PRS_UDF_L2_USER,
633};
634
635/* Lookup ID */
636enum mvpp2_prs_lookup {
637 MVPP2_PRS_LU_MH,
638 MVPP2_PRS_LU_MAC,
639 MVPP2_PRS_LU_DSA,
640 MVPP2_PRS_LU_VLAN,
641 MVPP2_PRS_LU_L2,
642 MVPP2_PRS_LU_PPPOE,
643 MVPP2_PRS_LU_IP4,
644 MVPP2_PRS_LU_IP6,
645 MVPP2_PRS_LU_FLOWS,
646 MVPP2_PRS_LU_LAST,
647};
648
649/* L3 cast enum */
650enum mvpp2_prs_l3_cast {
651 MVPP2_PRS_L3_UNI_CAST,
652 MVPP2_PRS_L3_MULTI_CAST,
653 MVPP2_PRS_L3_BROAD_CAST
654};
655
656/* Classifier constants */
657#define MVPP2_CLS_FLOWS_TBL_SIZE 512
658#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
659#define MVPP2_CLS_LKP_TBL_SIZE 64
660
661/* BM constants */
662#define MVPP2_BM_POOLS_NUM 1
663#define MVPP2_BM_LONG_BUF_NUM 16
664#define MVPP2_BM_SHORT_BUF_NUM 16
665#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
666#define MVPP2_BM_POOL_PTR_ALIGN 128
667#define MVPP2_BM_SWF_LONG_POOL(port) 0
668
669/* BM cookie (32 bits) definition */
670#define MVPP2_BM_COOKIE_POOL_OFFS 8
671#define MVPP2_BM_COOKIE_CPU_OFFS 24
672
673/* BM short pool packet size
674 * These value assure that for SWF the total number
675 * of bytes allocated for each buffer will be 512
676 */
677#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
678
679enum mvpp2_bm_type {
680 MVPP2_BM_FREE,
681 MVPP2_BM_SWF_LONG,
682 MVPP2_BM_SWF_SHORT
683};
684
685/* Definitions */
686
687/* Shared Packet Processor resources */
688struct mvpp2 {
689 /* Shared registers' base addresses */
690 void __iomem *base;
691 void __iomem *lms_base;
692
693 /* List of pointers to port structures */
694 struct mvpp2_port **port_list;
695
696 /* Aggregated TXQs */
697 struct mvpp2_tx_queue *aggr_txqs;
698
699 /* BM pools */
700 struct mvpp2_bm_pool *bm_pools;
701
702 /* PRS shadow table */
703 struct mvpp2_prs_shadow *prs_shadow;
704 /* PRS auxiliary table for double vlan entries control */
705 bool *prs_double_vlans;
706
707 /* Tclk value */
708 u32 tclk;
709
Thomas Petazzoni16a98982017-02-15 14:08:59 +0100710 /* HW version */
711 enum { MVPP21, MVPP22 } hw_version;
712
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100713 struct mii_dev *bus;
714};
715
716struct mvpp2_pcpu_stats {
717 u64 rx_packets;
718 u64 rx_bytes;
719 u64 tx_packets;
720 u64 tx_bytes;
721};
722
723struct mvpp2_port {
724 u8 id;
725
726 int irq;
727
728 struct mvpp2 *priv;
729
730 /* Per-port registers' base address */
731 void __iomem *base;
732
733 struct mvpp2_rx_queue **rxqs;
734 struct mvpp2_tx_queue **txqs;
735
736 int pkt_size;
737
738 u32 pending_cause_rx;
739
740 /* Per-CPU port control */
741 struct mvpp2_port_pcpu __percpu *pcpu;
742
743 /* Flags */
744 unsigned long flags;
745
746 u16 tx_ring_size;
747 u16 rx_ring_size;
748 struct mvpp2_pcpu_stats __percpu *stats;
749
750 struct phy_device *phy_dev;
751 phy_interface_t phy_interface;
752 int phy_node;
753 int phyaddr;
754 int init;
755 unsigned int link;
756 unsigned int duplex;
757 unsigned int speed;
758
759 struct mvpp2_bm_pool *pool_long;
760 struct mvpp2_bm_pool *pool_short;
761
762 /* Index of first port's physical RXQ */
763 u8 first_rxq;
764
765 u8 dev_addr[ETH_ALEN];
766};
767
768/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
769 * layout of the transmit and reception DMA descriptors, and their
770 * layout is therefore defined by the hardware design
771 */
772
773#define MVPP2_TXD_L3_OFF_SHIFT 0
774#define MVPP2_TXD_IP_HLEN_SHIFT 8
775#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
776#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
777#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
778#define MVPP2_TXD_PADDING_DISABLE BIT(23)
779#define MVPP2_TXD_L4_UDP BIT(24)
780#define MVPP2_TXD_L3_IP6 BIT(26)
781#define MVPP2_TXD_L_DESC BIT(28)
782#define MVPP2_TXD_F_DESC BIT(29)
783
784#define MVPP2_RXD_ERR_SUMMARY BIT(15)
785#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
786#define MVPP2_RXD_ERR_CRC 0x0
787#define MVPP2_RXD_ERR_OVERRUN BIT(13)
788#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
789#define MVPP2_RXD_BM_POOL_ID_OFFS 16
790#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
791#define MVPP2_RXD_HWF_SYNC BIT(21)
792#define MVPP2_RXD_L4_CSUM_OK BIT(22)
793#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
794#define MVPP2_RXD_L4_TCP BIT(25)
795#define MVPP2_RXD_L4_UDP BIT(26)
796#define MVPP2_RXD_L3_IP4 BIT(28)
797#define MVPP2_RXD_L3_IP6 BIT(30)
798#define MVPP2_RXD_BUF_HDR BIT(31)
799
800struct mvpp2_tx_desc {
801 u32 command; /* Options used by HW for packet transmitting.*/
802 u8 packet_offset; /* the offset from the buffer beginning */
803 u8 phys_txq; /* destination queue ID */
804 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +0100805 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100806 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
807 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
808 u32 reserved2; /* reserved (for future use) */
809};
810
811struct mvpp2_rx_desc {
812 u32 status; /* info about received packet */
813 u16 reserved1; /* parser_info (for future use, PnC) */
814 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +0100815 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100816 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
817 u16 reserved2; /* gem_port_id (for future use, PON) */
818 u16 reserved3; /* csum_l4 (for future use, PnC) */
819 u8 reserved4; /* bm_qset (for future use, BM) */
820 u8 reserved5;
821 u16 reserved6; /* classify_info (for future use, PnC) */
822 u32 reserved7; /* flow_id (for future use, PnC) */
823 u32 reserved8;
824};
825
826/* Per-CPU Tx queue control */
827struct mvpp2_txq_pcpu {
828 int cpu;
829
830 /* Number of Tx DMA descriptors in the descriptor ring */
831 int size;
832
833 /* Number of currently used Tx DMA descriptor in the
834 * descriptor ring
835 */
836 int count;
837
838 /* Number of Tx DMA descriptors reserved for each CPU */
839 int reserved_num;
840
841 /* Index of last TX DMA descriptor that was inserted */
842 int txq_put_index;
843
844 /* Index of the TX DMA descriptor to be cleaned up */
845 int txq_get_index;
846};
847
848struct mvpp2_tx_queue {
849 /* Physical number of this Tx queue */
850 u8 id;
851
852 /* Logical number of this Tx queue */
853 u8 log_id;
854
855 /* Number of Tx DMA descriptors in the descriptor ring */
856 int size;
857
858 /* Number of currently used Tx DMA descriptor in the descriptor ring */
859 int count;
860
861 /* Per-CPU control of physical Tx queues */
862 struct mvpp2_txq_pcpu __percpu *pcpu;
863
864 u32 done_pkts_coal;
865
866 /* Virtual address of thex Tx DMA descriptors array */
867 struct mvpp2_tx_desc *descs;
868
869 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +0100870 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100871
872 /* Index of the last Tx DMA descriptor */
873 int last_desc;
874
875 /* Index of the next Tx DMA descriptor to process */
876 int next_desc_to_proc;
877};
878
879struct mvpp2_rx_queue {
880 /* RX queue number, in the range 0-31 for physical RXQs */
881 u8 id;
882
883 /* Num of rx descriptors in the rx descriptor ring */
884 int size;
885
886 u32 pkts_coal;
887 u32 time_coal;
888
889 /* Virtual address of the RX DMA descriptors array */
890 struct mvpp2_rx_desc *descs;
891
892 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +0100893 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100894
895 /* Index of the last RX DMA descriptor */
896 int last_desc;
897
898 /* Index of the next RX DMA descriptor to process */
899 int next_desc_to_proc;
900
901 /* ID of port to which physical RXQ is mapped */
902 int port;
903
904 /* Port's logic RXQ number to which physical RXQ is mapped */
905 int logic_rxq;
906};
907
908union mvpp2_prs_tcam_entry {
909 u32 word[MVPP2_PRS_TCAM_WORDS];
910 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
911};
912
913union mvpp2_prs_sram_entry {
914 u32 word[MVPP2_PRS_SRAM_WORDS];
915 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
916};
917
918struct mvpp2_prs_entry {
919 u32 index;
920 union mvpp2_prs_tcam_entry tcam;
921 union mvpp2_prs_sram_entry sram;
922};
923
924struct mvpp2_prs_shadow {
925 bool valid;
926 bool finish;
927
928 /* Lookup ID */
929 int lu;
930
931 /* User defined offset */
932 int udf;
933
934 /* Result info */
935 u32 ri;
936 u32 ri_mask;
937};
938
939struct mvpp2_cls_flow_entry {
940 u32 index;
941 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
942};
943
944struct mvpp2_cls_lookup_entry {
945 u32 lkpid;
946 u32 way;
947 u32 data;
948};
949
950struct mvpp2_bm_pool {
951 /* Pool number in the range 0-7 */
952 int id;
953 enum mvpp2_bm_type type;
954
955 /* Buffer Pointers Pool External (BPPE) size */
956 int size;
957 /* Number of buffers for this pool */
958 int buf_num;
959 /* Pool buffer size */
960 int buf_size;
961 /* Packet size */
962 int pkt_size;
963
964 /* BPPE virtual base address */
Stefan Roesea7c28ff2017-02-15 12:46:18 +0100965 unsigned long *virt_addr;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +0100966 /* BPPE DMA base address */
967 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100968
969 /* Ports using BM pool */
970 u32 port_map;
971
972 /* Occupied buffers indicator */
973 int in_use_thresh;
974};
975
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100976/* Static declaractions */
977
978/* Number of RXQs used by single port */
979static int rxq_number = MVPP2_DEFAULT_RXQ;
980/* Number of TXQs used by single port */
981static int txq_number = MVPP2_DEFAULT_TXQ;
982
983#define MVPP2_DRIVER_NAME "mvpp2"
984#define MVPP2_DRIVER_VERSION "1.0"
985
986/*
987 * U-Boot internal data, mostly uncached buffers for descriptors and data
988 */
989struct buffer_location {
990 struct mvpp2_tx_desc *aggr_tx_descs;
991 struct mvpp2_tx_desc *tx_descs;
992 struct mvpp2_rx_desc *rx_descs;
Stefan Roesea7c28ff2017-02-15 12:46:18 +0100993 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
994 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100995 int first_rxq;
996};
997
998/*
999 * All 4 interfaces use the same global buffer, since only one interface
1000 * can be enabled at once
1001 */
1002static struct buffer_location buffer_loc;
1003
1004/*
1005 * Page table entries are set to 1MB, or multiples of 1MB
1006 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1007 */
1008#define BD_SPACE (1 << 20)
1009
1010/* Utility/helper methods */
1011
1012static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1013{
1014 writel(data, priv->base + offset);
1015}
1016
1017static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1018{
1019 return readl(priv->base + offset);
1020}
1021
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001022static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1023 struct mvpp2_tx_desc *tx_desc,
1024 dma_addr_t dma_addr)
1025{
1026 tx_desc->buf_dma_addr = dma_addr;
1027}
1028
1029static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1030 struct mvpp2_tx_desc *tx_desc,
1031 size_t size)
1032{
1033 tx_desc->data_size = size;
1034}
1035
1036static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1037 struct mvpp2_tx_desc *tx_desc,
1038 unsigned int txq)
1039{
1040 tx_desc->phys_txq = txq;
1041}
1042
1043static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1044 struct mvpp2_tx_desc *tx_desc,
1045 unsigned int command)
1046{
1047 tx_desc->command = command;
1048}
1049
1050static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1051 struct mvpp2_tx_desc *tx_desc,
1052 unsigned int offset)
1053{
1054 tx_desc->packet_offset = offset;
1055}
1056
1057static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1058 struct mvpp2_rx_desc *rx_desc)
1059{
1060 return rx_desc->buf_dma_addr;
1061}
1062
1063static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1064 struct mvpp2_rx_desc *rx_desc)
1065{
1066 return rx_desc->buf_cookie;
1067}
1068
1069static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1070 struct mvpp2_rx_desc *rx_desc)
1071{
1072 return rx_desc->data_size;
1073}
1074
1075static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1076 struct mvpp2_rx_desc *rx_desc)
1077{
1078 return rx_desc->status;
1079}
1080
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001081static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1082{
1083 txq_pcpu->txq_get_index++;
1084 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1085 txq_pcpu->txq_get_index = 0;
1086}
1087
1088/* Get number of physical egress port */
1089static inline int mvpp2_egress_port(struct mvpp2_port *port)
1090{
1091 return MVPP2_MAX_TCONT + port->id;
1092}
1093
1094/* Get number of physical TXQ */
1095static inline int mvpp2_txq_phys(int port, int txq)
1096{
1097 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1098}
1099
1100/* Parser configuration routines */
1101
1102/* Update parser tcam and sram hw entries */
1103static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1104{
1105 int i;
1106
1107 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1108 return -EINVAL;
1109
1110 /* Clear entry invalidation bit */
1111 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1112
1113 /* Write tcam index - indirect access */
1114 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1115 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1116 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1117
1118 /* Write sram index - indirect access */
1119 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1120 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1121 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1122
1123 return 0;
1124}
1125
1126/* Read tcam entry from hw */
1127static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1128{
1129 int i;
1130
1131 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1132 return -EINVAL;
1133
1134 /* Write tcam index - indirect access */
1135 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1136
1137 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1138 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1139 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1140 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1141
1142 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1143 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1144
1145 /* Write sram index - indirect access */
1146 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1147 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1148 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1149
1150 return 0;
1151}
1152
1153/* Invalidate tcam hw entry */
1154static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1155{
1156 /* Write index - indirect access */
1157 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1158 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1159 MVPP2_PRS_TCAM_INV_MASK);
1160}
1161
1162/* Enable shadow table entry and set its lookup ID */
1163static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1164{
1165 priv->prs_shadow[index].valid = true;
1166 priv->prs_shadow[index].lu = lu;
1167}
1168
1169/* Update ri fields in shadow table entry */
1170static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1171 unsigned int ri, unsigned int ri_mask)
1172{
1173 priv->prs_shadow[index].ri_mask = ri_mask;
1174 priv->prs_shadow[index].ri = ri;
1175}
1176
1177/* Update lookup field in tcam sw entry */
1178static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1179{
1180 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1181
1182 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1183 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1184}
1185
1186/* Update mask for single port in tcam sw entry */
1187static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1188 unsigned int port, bool add)
1189{
1190 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1191
1192 if (add)
1193 pe->tcam.byte[enable_off] &= ~(1 << port);
1194 else
1195 pe->tcam.byte[enable_off] |= 1 << port;
1196}
1197
1198/* Update port map in tcam sw entry */
1199static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1200 unsigned int ports)
1201{
1202 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1203 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1204
1205 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1206 pe->tcam.byte[enable_off] &= ~port_mask;
1207 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1208}
1209
1210/* Obtain port map from tcam sw entry */
1211static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1212{
1213 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1214
1215 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1216}
1217
1218/* Set byte of data and its enable bits in tcam sw entry */
1219static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1220 unsigned int offs, unsigned char byte,
1221 unsigned char enable)
1222{
1223 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1224 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1225}
1226
1227/* Get byte of data and its enable bits from tcam sw entry */
1228static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1229 unsigned int offs, unsigned char *byte,
1230 unsigned char *enable)
1231{
1232 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1233 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1234}
1235
1236/* Set ethertype in tcam sw entry */
1237static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1238 unsigned short ethertype)
1239{
1240 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1241 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1242}
1243
1244/* Set bits in sram sw entry */
1245static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1246 int val)
1247{
1248 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1249}
1250
1251/* Clear bits in sram sw entry */
1252static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1253 int val)
1254{
1255 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1256}
1257
1258/* Update ri bits in sram sw entry */
1259static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1260 unsigned int bits, unsigned int mask)
1261{
1262 unsigned int i;
1263
1264 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1265 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1266
1267 if (!(mask & BIT(i)))
1268 continue;
1269
1270 if (bits & BIT(i))
1271 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1272 else
1273 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1274
1275 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1276 }
1277}
1278
1279/* Update ai bits in sram sw entry */
1280static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1281 unsigned int bits, unsigned int mask)
1282{
1283 unsigned int i;
1284 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1285
1286 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1287
1288 if (!(mask & BIT(i)))
1289 continue;
1290
1291 if (bits & BIT(i))
1292 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1293 else
1294 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1295
1296 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1297 }
1298}
1299
1300/* Read ai bits from sram sw entry */
1301static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1302{
1303 u8 bits;
1304 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1305 int ai_en_off = ai_off + 1;
1306 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1307
1308 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1309 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1310
1311 return bits;
1312}
1313
1314/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1315 * lookup interation
1316 */
1317static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1318 unsigned int lu)
1319{
1320 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1321
1322 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1323 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1324 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1325}
1326
1327/* In the sram sw entry set sign and value of the next lookup offset
1328 * and the offset value generated to the classifier
1329 */
1330static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1331 unsigned int op)
1332{
1333 /* Set sign */
1334 if (shift < 0) {
1335 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1336 shift = 0 - shift;
1337 } else {
1338 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1339 }
1340
1341 /* Set value */
1342 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1343 (unsigned char)shift;
1344
1345 /* Reset and set operation */
1346 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1347 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1348 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1349
1350 /* Set base offset as current */
1351 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1352}
1353
1354/* In the sram sw entry set sign and value of the user defined offset
1355 * generated to the classifier
1356 */
1357static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1358 unsigned int type, int offset,
1359 unsigned int op)
1360{
1361 /* Set sign */
1362 if (offset < 0) {
1363 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1364 offset = 0 - offset;
1365 } else {
1366 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1367 }
1368
1369 /* Set value */
1370 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1371 MVPP2_PRS_SRAM_UDF_MASK);
1372 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1373 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1374 MVPP2_PRS_SRAM_UDF_BITS)] &=
1375 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1376 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1377 MVPP2_PRS_SRAM_UDF_BITS)] |=
1378 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1379
1380 /* Set offset type */
1381 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1382 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1383 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1384
1385 /* Set offset operation */
1386 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1387 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1388 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1389
1390 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1391 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1392 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1393 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1394
1395 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1396 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1397 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1398
1399 /* Set base offset as current */
1400 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1401}
1402
1403/* Find parser flow entry */
1404static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1405{
1406 struct mvpp2_prs_entry *pe;
1407 int tid;
1408
1409 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1410 if (!pe)
1411 return NULL;
1412 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1413
1414 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1415 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1416 u8 bits;
1417
1418 if (!priv->prs_shadow[tid].valid ||
1419 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1420 continue;
1421
1422 pe->index = tid;
1423 mvpp2_prs_hw_read(priv, pe);
1424 bits = mvpp2_prs_sram_ai_get(pe);
1425
1426 /* Sram store classification lookup ID in AI bits [5:0] */
1427 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1428 return pe;
1429 }
1430 kfree(pe);
1431
1432 return NULL;
1433}
1434
1435/* Return first free tcam index, seeking from start to end */
1436static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1437 unsigned char end)
1438{
1439 int tid;
1440
1441 if (start > end)
1442 swap(start, end);
1443
1444 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1445 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1446
1447 for (tid = start; tid <= end; tid++) {
1448 if (!priv->prs_shadow[tid].valid)
1449 return tid;
1450 }
1451
1452 return -EINVAL;
1453}
1454
1455/* Enable/disable dropping all mac da's */
1456static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1457{
1458 struct mvpp2_prs_entry pe;
1459
1460 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1461 /* Entry exist - update port only */
1462 pe.index = MVPP2_PE_DROP_ALL;
1463 mvpp2_prs_hw_read(priv, &pe);
1464 } else {
1465 /* Entry doesn't exist - create new */
1466 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1467 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1468 pe.index = MVPP2_PE_DROP_ALL;
1469
1470 /* Non-promiscuous mode for all ports - DROP unknown packets */
1471 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1472 MVPP2_PRS_RI_DROP_MASK);
1473
1474 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1475 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1476
1477 /* Update shadow table */
1478 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1479
1480 /* Mask all ports */
1481 mvpp2_prs_tcam_port_map_set(&pe, 0);
1482 }
1483
1484 /* Update port mask */
1485 mvpp2_prs_tcam_port_set(&pe, port, add);
1486
1487 mvpp2_prs_hw_write(priv, &pe);
1488}
1489
1490/* Set port to promiscuous mode */
1491static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1492{
1493 struct mvpp2_prs_entry pe;
1494
1495 /* Promiscuous mode - Accept unknown packets */
1496
1497 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1498 /* Entry exist - update port only */
1499 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1500 mvpp2_prs_hw_read(priv, &pe);
1501 } else {
1502 /* Entry doesn't exist - create new */
1503 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1504 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1505 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1506
1507 /* Continue - set next lookup */
1508 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1509
1510 /* Set result info bits */
1511 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1512 MVPP2_PRS_RI_L2_CAST_MASK);
1513
1514 /* Shift to ethertype */
1515 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1516 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1517
1518 /* Mask all ports */
1519 mvpp2_prs_tcam_port_map_set(&pe, 0);
1520
1521 /* Update shadow table */
1522 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1523 }
1524
1525 /* Update port mask */
1526 mvpp2_prs_tcam_port_set(&pe, port, add);
1527
1528 mvpp2_prs_hw_write(priv, &pe);
1529}
1530
1531/* Accept multicast */
1532static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1533 bool add)
1534{
1535 struct mvpp2_prs_entry pe;
1536 unsigned char da_mc;
1537
1538 /* Ethernet multicast address first byte is
1539 * 0x01 for IPv4 and 0x33 for IPv6
1540 */
1541 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1542
1543 if (priv->prs_shadow[index].valid) {
1544 /* Entry exist - update port only */
1545 pe.index = index;
1546 mvpp2_prs_hw_read(priv, &pe);
1547 } else {
1548 /* Entry doesn't exist - create new */
1549 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1550 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1551 pe.index = index;
1552
1553 /* Continue - set next lookup */
1554 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1555
1556 /* Set result info bits */
1557 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1558 MVPP2_PRS_RI_L2_CAST_MASK);
1559
1560 /* Update tcam entry data first byte */
1561 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1562
1563 /* Shift to ethertype */
1564 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1565 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1566
1567 /* Mask all ports */
1568 mvpp2_prs_tcam_port_map_set(&pe, 0);
1569
1570 /* Update shadow table */
1571 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1572 }
1573
1574 /* Update port mask */
1575 mvpp2_prs_tcam_port_set(&pe, port, add);
1576
1577 mvpp2_prs_hw_write(priv, &pe);
1578}
1579
1580/* Parser per-port initialization */
1581static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1582 int lu_max, int offset)
1583{
1584 u32 val;
1585
1586 /* Set lookup ID */
1587 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1588 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1589 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1590 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1591
1592 /* Set maximum number of loops for packet received from port */
1593 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1594 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1595 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1596 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1597
1598 /* Set initial offset for packet header extraction for the first
1599 * searching loop
1600 */
1601 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1602 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1603 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1604 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1605}
1606
1607/* Default flow entries initialization for all ports */
1608static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1609{
1610 struct mvpp2_prs_entry pe;
1611 int port;
1612
1613 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1614 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1615 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1616 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1617
1618 /* Mask all ports */
1619 mvpp2_prs_tcam_port_map_set(&pe, 0);
1620
1621 /* Set flow ID*/
1622 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1623 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1624
1625 /* Update shadow table and hw entry */
1626 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1627 mvpp2_prs_hw_write(priv, &pe);
1628 }
1629}
1630
1631/* Set default entry for Marvell Header field */
1632static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1633{
1634 struct mvpp2_prs_entry pe;
1635
1636 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1637
1638 pe.index = MVPP2_PE_MH_DEFAULT;
1639 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1640 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1641 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1642 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1643
1644 /* Unmask all ports */
1645 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1646
1647 /* Update shadow table and hw entry */
1648 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1649 mvpp2_prs_hw_write(priv, &pe);
1650}
1651
1652/* Set default entires (place holder) for promiscuous, non-promiscuous and
1653 * multicast MAC addresses
1654 */
1655static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1656{
1657 struct mvpp2_prs_entry pe;
1658
1659 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1660
1661 /* Non-promiscuous mode for all ports - DROP unknown packets */
1662 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1663 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1664
1665 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1666 MVPP2_PRS_RI_DROP_MASK);
1667 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1668 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1669
1670 /* Unmask all ports */
1671 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1672
1673 /* Update shadow table and hw entry */
1674 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1675 mvpp2_prs_hw_write(priv, &pe);
1676
1677 /* place holders only - no ports */
1678 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1679 mvpp2_prs_mac_promisc_set(priv, 0, false);
1680 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1681 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1682}
1683
1684/* Match basic ethertypes */
1685static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1686{
1687 struct mvpp2_prs_entry pe;
1688 int tid;
1689
1690 /* Ethertype: PPPoE */
1691 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1692 MVPP2_PE_LAST_FREE_TID);
1693 if (tid < 0)
1694 return tid;
1695
1696 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1697 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1698 pe.index = tid;
1699
1700 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1701
1702 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1703 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1704 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1705 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1706 MVPP2_PRS_RI_PPPOE_MASK);
1707
1708 /* Update shadow table and hw entry */
1709 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1710 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1711 priv->prs_shadow[pe.index].finish = false;
1712 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1713 MVPP2_PRS_RI_PPPOE_MASK);
1714 mvpp2_prs_hw_write(priv, &pe);
1715
1716 /* Ethertype: ARP */
1717 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1718 MVPP2_PE_LAST_FREE_TID);
1719 if (tid < 0)
1720 return tid;
1721
1722 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1723 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1724 pe.index = tid;
1725
1726 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
1727
1728 /* Generate flow in the next iteration*/
1729 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1730 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1731 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1732 MVPP2_PRS_RI_L3_PROTO_MASK);
1733 /* Set L3 offset */
1734 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1735 MVPP2_ETH_TYPE_LEN,
1736 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1737
1738 /* Update shadow table and hw entry */
1739 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1740 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1741 priv->prs_shadow[pe.index].finish = true;
1742 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1743 MVPP2_PRS_RI_L3_PROTO_MASK);
1744 mvpp2_prs_hw_write(priv, &pe);
1745
1746 /* Ethertype: LBTD */
1747 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1748 MVPP2_PE_LAST_FREE_TID);
1749 if (tid < 0)
1750 return tid;
1751
1752 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1753 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1754 pe.index = tid;
1755
1756 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1757
1758 /* Generate flow in the next iteration*/
1759 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1760 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1761 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1762 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1763 MVPP2_PRS_RI_CPU_CODE_MASK |
1764 MVPP2_PRS_RI_UDF3_MASK);
1765 /* Set L3 offset */
1766 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1767 MVPP2_ETH_TYPE_LEN,
1768 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1769
1770 /* Update shadow table and hw entry */
1771 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1772 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1773 priv->prs_shadow[pe.index].finish = true;
1774 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1775 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1776 MVPP2_PRS_RI_CPU_CODE_MASK |
1777 MVPP2_PRS_RI_UDF3_MASK);
1778 mvpp2_prs_hw_write(priv, &pe);
1779
1780 /* Ethertype: IPv4 without options */
1781 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1782 MVPP2_PE_LAST_FREE_TID);
1783 if (tid < 0)
1784 return tid;
1785
1786 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1787 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1788 pe.index = tid;
1789
1790 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
1791 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1792 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1793 MVPP2_PRS_IPV4_HEAD_MASK |
1794 MVPP2_PRS_IPV4_IHL_MASK);
1795
1796 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1797 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1798 MVPP2_PRS_RI_L3_PROTO_MASK);
1799 /* Skip eth_type + 4 bytes of IP header */
1800 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1801 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1802 /* Set L3 offset */
1803 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1804 MVPP2_ETH_TYPE_LEN,
1805 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1806
1807 /* Update shadow table and hw entry */
1808 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1809 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1810 priv->prs_shadow[pe.index].finish = false;
1811 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1812 MVPP2_PRS_RI_L3_PROTO_MASK);
1813 mvpp2_prs_hw_write(priv, &pe);
1814
1815 /* Ethertype: IPv4 with options */
1816 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1817 MVPP2_PE_LAST_FREE_TID);
1818 if (tid < 0)
1819 return tid;
1820
1821 pe.index = tid;
1822
1823 /* Clear tcam data before updating */
1824 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1825 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1826
1827 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1828 MVPP2_PRS_IPV4_HEAD,
1829 MVPP2_PRS_IPV4_HEAD_MASK);
1830
1831 /* Clear ri before updating */
1832 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1833 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1834 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1835 MVPP2_PRS_RI_L3_PROTO_MASK);
1836
1837 /* Update shadow table and hw entry */
1838 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1839 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1840 priv->prs_shadow[pe.index].finish = false;
1841 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1842 MVPP2_PRS_RI_L3_PROTO_MASK);
1843 mvpp2_prs_hw_write(priv, &pe);
1844
1845 /* Ethertype: IPv6 without options */
1846 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1847 MVPP2_PE_LAST_FREE_TID);
1848 if (tid < 0)
1849 return tid;
1850
1851 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1852 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1853 pe.index = tid;
1854
1855 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
1856
1857 /* Skip DIP of IPV6 header */
1858 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1859 MVPP2_MAX_L3_ADDR_SIZE,
1860 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1861 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1862 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1863 MVPP2_PRS_RI_L3_PROTO_MASK);
1864 /* Set L3 offset */
1865 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1866 MVPP2_ETH_TYPE_LEN,
1867 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1868
1869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1870 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1871 priv->prs_shadow[pe.index].finish = false;
1872 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1873 MVPP2_PRS_RI_L3_PROTO_MASK);
1874 mvpp2_prs_hw_write(priv, &pe);
1875
1876 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1877 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1878 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1879 pe.index = MVPP2_PE_ETH_TYPE_UN;
1880
1881 /* Unmask all ports */
1882 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1883
1884 /* Generate flow in the next iteration*/
1885 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1886 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1887 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1888 MVPP2_PRS_RI_L3_PROTO_MASK);
1889 /* Set L3 offset even it's unknown L3 */
1890 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1891 MVPP2_ETH_TYPE_LEN,
1892 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1893
1894 /* Update shadow table and hw entry */
1895 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1896 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1897 priv->prs_shadow[pe.index].finish = true;
1898 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1899 MVPP2_PRS_RI_L3_PROTO_MASK);
1900 mvpp2_prs_hw_write(priv, &pe);
1901
1902 return 0;
1903}
1904
1905/* Parser default initialization */
1906static int mvpp2_prs_default_init(struct udevice *dev,
1907 struct mvpp2 *priv)
1908{
1909 int err, index, i;
1910
1911 /* Enable tcam table */
1912 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
1913
1914 /* Clear all tcam and sram entries */
1915 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
1916 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1917 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1918 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
1919
1920 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
1921 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1922 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
1923 }
1924
1925 /* Invalidate all tcam entries */
1926 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
1927 mvpp2_prs_hw_inv(priv, index);
1928
1929 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
1930 sizeof(struct mvpp2_prs_shadow),
1931 GFP_KERNEL);
1932 if (!priv->prs_shadow)
1933 return -ENOMEM;
1934
1935 /* Always start from lookup = 0 */
1936 for (index = 0; index < MVPP2_MAX_PORTS; index++)
1937 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
1938 MVPP2_PRS_PORT_LU_MAX, 0);
1939
1940 mvpp2_prs_def_flow_init(priv);
1941
1942 mvpp2_prs_mh_init(priv);
1943
1944 mvpp2_prs_mac_init(priv);
1945
1946 err = mvpp2_prs_etype_init(priv);
1947 if (err)
1948 return err;
1949
1950 return 0;
1951}
1952
1953/* Compare MAC DA with tcam entry data */
1954static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
1955 const u8 *da, unsigned char *mask)
1956{
1957 unsigned char tcam_byte, tcam_mask;
1958 int index;
1959
1960 for (index = 0; index < ETH_ALEN; index++) {
1961 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
1962 if (tcam_mask != mask[index])
1963 return false;
1964
1965 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
1966 return false;
1967 }
1968
1969 return true;
1970}
1971
1972/* Find tcam entry with matched pair <MAC DA, port> */
1973static struct mvpp2_prs_entry *
1974mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
1975 unsigned char *mask, int udf_type)
1976{
1977 struct mvpp2_prs_entry *pe;
1978 int tid;
1979
1980 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1981 if (!pe)
1982 return NULL;
1983 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
1984
1985 /* Go through the all entires with MVPP2_PRS_LU_MAC */
1986 for (tid = MVPP2_PE_FIRST_FREE_TID;
1987 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1988 unsigned int entry_pmap;
1989
1990 if (!priv->prs_shadow[tid].valid ||
1991 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
1992 (priv->prs_shadow[tid].udf != udf_type))
1993 continue;
1994
1995 pe->index = tid;
1996 mvpp2_prs_hw_read(priv, pe);
1997 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
1998
1999 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2000 entry_pmap == pmap)
2001 return pe;
2002 }
2003 kfree(pe);
2004
2005 return NULL;
2006}
2007
2008/* Update parser's mac da entry */
2009static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2010 const u8 *da, bool add)
2011{
2012 struct mvpp2_prs_entry *pe;
2013 unsigned int pmap, len, ri;
2014 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2015 int tid;
2016
2017 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2018 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2019 MVPP2_PRS_UDF_MAC_DEF);
2020
2021 /* No such entry */
2022 if (!pe) {
2023 if (!add)
2024 return 0;
2025
2026 /* Create new TCAM entry */
2027 /* Find first range mac entry*/
2028 for (tid = MVPP2_PE_FIRST_FREE_TID;
2029 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2030 if (priv->prs_shadow[tid].valid &&
2031 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2032 (priv->prs_shadow[tid].udf ==
2033 MVPP2_PRS_UDF_MAC_RANGE))
2034 break;
2035
2036 /* Go through the all entries from first to last */
2037 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2038 tid - 1);
2039 if (tid < 0)
2040 return tid;
2041
2042 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2043 if (!pe)
2044 return -1;
2045 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2046 pe->index = tid;
2047
2048 /* Mask all ports */
2049 mvpp2_prs_tcam_port_map_set(pe, 0);
2050 }
2051
2052 /* Update port mask */
2053 mvpp2_prs_tcam_port_set(pe, port, add);
2054
2055 /* Invalidate the entry if no ports are left enabled */
2056 pmap = mvpp2_prs_tcam_port_map_get(pe);
2057 if (pmap == 0) {
2058 if (add) {
2059 kfree(pe);
2060 return -1;
2061 }
2062 mvpp2_prs_hw_inv(priv, pe->index);
2063 priv->prs_shadow[pe->index].valid = false;
2064 kfree(pe);
2065 return 0;
2066 }
2067
2068 /* Continue - set next lookup */
2069 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2070
2071 /* Set match on DA */
2072 len = ETH_ALEN;
2073 while (len--)
2074 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2075
2076 /* Set result info bits */
2077 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2078
2079 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2080 MVPP2_PRS_RI_MAC_ME_MASK);
2081 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2082 MVPP2_PRS_RI_MAC_ME_MASK);
2083
2084 /* Shift to ethertype */
2085 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2086 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2087
2088 /* Update shadow table and hw entry */
2089 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2090 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2091 mvpp2_prs_hw_write(priv, pe);
2092
2093 kfree(pe);
2094
2095 return 0;
2096}
2097
2098static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2099{
2100 int err;
2101
2102 /* Remove old parser entry */
2103 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2104 false);
2105 if (err)
2106 return err;
2107
2108 /* Add new parser entry */
2109 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2110 if (err)
2111 return err;
2112
2113 /* Set addr in the device */
2114 memcpy(port->dev_addr, da, ETH_ALEN);
2115
2116 return 0;
2117}
2118
2119/* Set prs flow for the port */
2120static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2121{
2122 struct mvpp2_prs_entry *pe;
2123 int tid;
2124
2125 pe = mvpp2_prs_flow_find(port->priv, port->id);
2126
2127 /* Such entry not exist */
2128 if (!pe) {
2129 /* Go through the all entires from last to first */
2130 tid = mvpp2_prs_tcam_first_free(port->priv,
2131 MVPP2_PE_LAST_FREE_TID,
2132 MVPP2_PE_FIRST_FREE_TID);
2133 if (tid < 0)
2134 return tid;
2135
2136 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2137 if (!pe)
2138 return -ENOMEM;
2139
2140 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2141 pe->index = tid;
2142
2143 /* Set flow ID*/
2144 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2145 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2146
2147 /* Update shadow table */
2148 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2149 }
2150
2151 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2152 mvpp2_prs_hw_write(port->priv, pe);
2153 kfree(pe);
2154
2155 return 0;
2156}
2157
2158/* Classifier configuration routines */
2159
2160/* Update classification flow table registers */
2161static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2162 struct mvpp2_cls_flow_entry *fe)
2163{
2164 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2165 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2166 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2167 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2168}
2169
2170/* Update classification lookup table register */
2171static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2172 struct mvpp2_cls_lookup_entry *le)
2173{
2174 u32 val;
2175
2176 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2177 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2178 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2179}
2180
2181/* Classifier default initialization */
2182static void mvpp2_cls_init(struct mvpp2 *priv)
2183{
2184 struct mvpp2_cls_lookup_entry le;
2185 struct mvpp2_cls_flow_entry fe;
2186 int index;
2187
2188 /* Enable classifier */
2189 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2190
2191 /* Clear classifier flow table */
2192 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2193 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2194 fe.index = index;
2195 mvpp2_cls_flow_write(priv, &fe);
2196 }
2197
2198 /* Clear classifier lookup table */
2199 le.data = 0;
2200 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2201 le.lkpid = index;
2202 le.way = 0;
2203 mvpp2_cls_lookup_write(priv, &le);
2204
2205 le.way = 1;
2206 mvpp2_cls_lookup_write(priv, &le);
2207 }
2208}
2209
2210static void mvpp2_cls_port_config(struct mvpp2_port *port)
2211{
2212 struct mvpp2_cls_lookup_entry le;
2213 u32 val;
2214
2215 /* Set way for the port */
2216 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2217 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2218 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2219
2220 /* Pick the entry to be accessed in lookup ID decoding table
2221 * according to the way and lkpid.
2222 */
2223 le.lkpid = port->id;
2224 le.way = 0;
2225 le.data = 0;
2226
2227 /* Set initial CPU queue for receiving packets */
2228 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2229 le.data |= port->first_rxq;
2230
2231 /* Disable classification engines */
2232 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2233
2234 /* Update lookup ID table entry */
2235 mvpp2_cls_lookup_write(port->priv, &le);
2236}
2237
2238/* Set CPU queue number for oversize packets */
2239static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2240{
2241 u32 val;
2242
2243 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2244 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2245
2246 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2247 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2248
2249 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2250 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2251 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2252}
2253
2254/* Buffer Manager configuration routines */
2255
2256/* Create pool */
2257static int mvpp2_bm_pool_create(struct udevice *dev,
2258 struct mvpp2 *priv,
2259 struct mvpp2_bm_pool *bm_pool, int size)
2260{
2261 u32 val;
2262
2263 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002264 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002265 if (!bm_pool->virt_addr)
2266 return -ENOMEM;
2267
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002268 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2269 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002270 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2271 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2272 return -ENOMEM;
2273 }
2274
2275 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002276 bm_pool->dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002277 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2278
2279 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2280 val |= MVPP2_BM_START_MASK;
2281 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2282
2283 bm_pool->type = MVPP2_BM_FREE;
2284 bm_pool->size = size;
2285 bm_pool->pkt_size = 0;
2286 bm_pool->buf_num = 0;
2287
2288 return 0;
2289}
2290
2291/* Set pool buffer size */
2292static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2293 struct mvpp2_bm_pool *bm_pool,
2294 int buf_size)
2295{
2296 u32 val;
2297
2298 bm_pool->buf_size = buf_size;
2299
2300 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2301 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2302}
2303
2304/* Free all buffers from the pool */
2305static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2306 struct mvpp2_bm_pool *bm_pool)
2307{
2308 bm_pool->buf_num = 0;
2309}
2310
2311/* Cleanup pool */
2312static int mvpp2_bm_pool_destroy(struct udevice *dev,
2313 struct mvpp2 *priv,
2314 struct mvpp2_bm_pool *bm_pool)
2315{
2316 u32 val;
2317
2318 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2319 if (bm_pool->buf_num) {
2320 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2321 return 0;
2322 }
2323
2324 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2325 val |= MVPP2_BM_STOP_MASK;
2326 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2327
2328 return 0;
2329}
2330
2331static int mvpp2_bm_pools_init(struct udevice *dev,
2332 struct mvpp2 *priv)
2333{
2334 int i, err, size;
2335 struct mvpp2_bm_pool *bm_pool;
2336
2337 /* Create all pools with maximum size */
2338 size = MVPP2_BM_POOL_SIZE_MAX;
2339 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2340 bm_pool = &priv->bm_pools[i];
2341 bm_pool->id = i;
2342 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2343 if (err)
2344 goto err_unroll_pools;
2345 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
2346 }
2347 return 0;
2348
2349err_unroll_pools:
2350 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2351 for (i = i - 1; i >= 0; i--)
2352 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2353 return err;
2354}
2355
2356static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2357{
2358 int i, err;
2359
2360 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2361 /* Mask BM all interrupts */
2362 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2363 /* Clear BM cause register */
2364 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2365 }
2366
2367 /* Allocate and initialize BM pools */
2368 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2369 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2370 if (!priv->bm_pools)
2371 return -ENOMEM;
2372
2373 err = mvpp2_bm_pools_init(dev, priv);
2374 if (err < 0)
2375 return err;
2376 return 0;
2377}
2378
2379/* Attach long pool to rxq */
2380static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2381 int lrxq, int long_pool)
2382{
2383 u32 val;
2384 int prxq;
2385
2386 /* Get queue physical ID */
2387 prxq = port->rxqs[lrxq]->id;
2388
2389 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2390 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
2391 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
2392 MVPP2_RXQ_POOL_LONG_MASK);
2393
2394 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2395}
2396
2397/* Set pool number in a BM cookie */
2398static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2399{
2400 u32 bm;
2401
2402 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2403 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2404
2405 return bm;
2406}
2407
2408/* Get pool number from a BM cookie */
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002409static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002410{
2411 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2412}
2413
2414/* Release buffer to BM */
2415static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002416 dma_addr_t buf_dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002417 unsigned long buf_phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002418{
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002419 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2420 * returned in the "cookie" field of the RX
2421 * descriptor. Instead of storing the virtual address, we
2422 * store the physical address
2423 */
2424 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002425 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002426}
2427
2428/* Refill BM pool */
2429static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002430 dma_addr_t dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002431 phys_addr_t phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002432{
2433 int pool = mvpp2_bm_cookie_pool_get(bm);
2434
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002435 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002436}
2437
2438/* Allocate buffers for the pool */
2439static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2440 struct mvpp2_bm_pool *bm_pool, int buf_num)
2441{
2442 int i;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002443
2444 if (buf_num < 0 ||
2445 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2446 netdev_err(port->dev,
2447 "cannot allocate %d buffers for pool %d\n",
2448 buf_num, bm_pool->id);
2449 return 0;
2450 }
2451
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002452 for (i = 0; i < buf_num; i++) {
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002453 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002454 (dma_addr_t)buffer_loc.rx_buffer[i],
2455 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002456
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002457 }
2458
2459 /* Update BM driver with number of buffers added to pool */
2460 bm_pool->buf_num += i;
2461 bm_pool->in_use_thresh = bm_pool->buf_num / 4;
2462
2463 return i;
2464}
2465
2466/* Notify the driver that BM pool is being used as specific type and return the
2467 * pool pointer on success
2468 */
2469static struct mvpp2_bm_pool *
2470mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2471 int pkt_size)
2472{
2473 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2474 int num;
2475
2476 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2477 netdev_err(port->dev, "mixing pool types is forbidden\n");
2478 return NULL;
2479 }
2480
2481 if (new_pool->type == MVPP2_BM_FREE)
2482 new_pool->type = type;
2483
2484 /* Allocate buffers in case BM pool is used as long pool, but packet
2485 * size doesn't match MTU or BM pool hasn't being used yet
2486 */
2487 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2488 (new_pool->pkt_size == 0)) {
2489 int pkts_num;
2490
2491 /* Set default buffer number or free all the buffers in case
2492 * the pool is not empty
2493 */
2494 pkts_num = new_pool->buf_num;
2495 if (pkts_num == 0)
2496 pkts_num = type == MVPP2_BM_SWF_LONG ?
2497 MVPP2_BM_LONG_BUF_NUM :
2498 MVPP2_BM_SHORT_BUF_NUM;
2499 else
2500 mvpp2_bm_bufs_free(NULL,
2501 port->priv, new_pool);
2502
2503 new_pool->pkt_size = pkt_size;
2504
2505 /* Allocate buffers for this pool */
2506 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2507 if (num != pkts_num) {
2508 dev_err(dev, "pool %d: %d of %d allocated\n",
2509 new_pool->id, num, pkts_num);
2510 return NULL;
2511 }
2512 }
2513
2514 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
2515 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
2516
2517 return new_pool;
2518}
2519
2520/* Initialize pools for swf */
2521static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2522{
2523 int rxq;
2524
2525 if (!port->pool_long) {
2526 port->pool_long =
2527 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2528 MVPP2_BM_SWF_LONG,
2529 port->pkt_size);
2530 if (!port->pool_long)
2531 return -ENOMEM;
2532
2533 port->pool_long->port_map |= (1 << port->id);
2534
2535 for (rxq = 0; rxq < rxq_number; rxq++)
2536 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2537 }
2538
2539 return 0;
2540}
2541
2542/* Port configuration routines */
2543
2544static void mvpp2_port_mii_set(struct mvpp2_port *port)
2545{
2546 u32 val;
2547
2548 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2549
2550 switch (port->phy_interface) {
2551 case PHY_INTERFACE_MODE_SGMII:
2552 val |= MVPP2_GMAC_INBAND_AN_MASK;
2553 break;
2554 case PHY_INTERFACE_MODE_RGMII:
2555 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2556 default:
2557 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2558 }
2559
2560 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2561}
2562
2563static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2564{
2565 u32 val;
2566
2567 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2568 val |= MVPP2_GMAC_FC_ADV_EN;
2569 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2570}
2571
2572static void mvpp2_port_enable(struct mvpp2_port *port)
2573{
2574 u32 val;
2575
2576 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2577 val |= MVPP2_GMAC_PORT_EN_MASK;
2578 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2579 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2580}
2581
2582static void mvpp2_port_disable(struct mvpp2_port *port)
2583{
2584 u32 val;
2585
2586 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2587 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2588 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2589}
2590
2591/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2592static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2593{
2594 u32 val;
2595
2596 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2597 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2598 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2599}
2600
2601/* Configure loopback port */
2602static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2603{
2604 u32 val;
2605
2606 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2607
2608 if (port->speed == 1000)
2609 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2610 else
2611 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2612
2613 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2614 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2615 else
2616 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2617
2618 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2619}
2620
2621static void mvpp2_port_reset(struct mvpp2_port *port)
2622{
2623 u32 val;
2624
2625 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2626 ~MVPP2_GMAC_PORT_RESET_MASK;
2627 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2628
2629 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2630 MVPP2_GMAC_PORT_RESET_MASK)
2631 continue;
2632}
2633
2634/* Change maximum receive size of the port */
2635static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2636{
2637 u32 val;
2638
2639 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2640 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2641 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2642 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2643 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2644}
2645
2646/* Set defaults to the MVPP2 port */
2647static void mvpp2_defaults_set(struct mvpp2_port *port)
2648{
2649 int tx_port_num, val, queue, ptxq, lrxq;
2650
2651 /* Configure port to loopback if needed */
2652 if (port->flags & MVPP2_F_LOOPBACK)
2653 mvpp2_port_loopback_set(port);
2654
2655 /* Update TX FIFO MIN Threshold */
2656 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2657 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2658 /* Min. TX threshold must be less than minimal packet length */
2659 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2660 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2661
2662 /* Disable Legacy WRR, Disable EJP, Release from reset */
2663 tx_port_num = mvpp2_egress_port(port);
2664 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2665 tx_port_num);
2666 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2667
2668 /* Close bandwidth for all queues */
2669 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
2670 ptxq = mvpp2_txq_phys(port->id, queue);
2671 mvpp2_write(port->priv,
2672 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
2673 }
2674
2675 /* Set refill period to 1 usec, refill tokens
2676 * and bucket size to maximum
2677 */
2678 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
2679 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2680 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2681 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2682 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2683 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2684 val = MVPP2_TXP_TOKEN_SIZE_MAX;
2685 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2686
2687 /* Set MaximumLowLatencyPacketSize value to 256 */
2688 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2689 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2690 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2691
2692 /* Enable Rx cache snoop */
2693 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2694 queue = port->rxqs[lrxq]->id;
2695 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2696 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2697 MVPP2_SNOOP_BUF_HDR_MASK;
2698 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2699 }
2700}
2701
2702/* Enable/disable receiving packets */
2703static void mvpp2_ingress_enable(struct mvpp2_port *port)
2704{
2705 u32 val;
2706 int lrxq, queue;
2707
2708 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2709 queue = port->rxqs[lrxq]->id;
2710 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2711 val &= ~MVPP2_RXQ_DISABLE_MASK;
2712 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2713 }
2714}
2715
2716static void mvpp2_ingress_disable(struct mvpp2_port *port)
2717{
2718 u32 val;
2719 int lrxq, queue;
2720
2721 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
2722 queue = port->rxqs[lrxq]->id;
2723 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2724 val |= MVPP2_RXQ_DISABLE_MASK;
2725 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2726 }
2727}
2728
2729/* Enable transmit via physical egress queue
2730 * - HW starts take descriptors from DRAM
2731 */
2732static void mvpp2_egress_enable(struct mvpp2_port *port)
2733{
2734 u32 qmap;
2735 int queue;
2736 int tx_port_num = mvpp2_egress_port(port);
2737
2738 /* Enable all initialized TXs. */
2739 qmap = 0;
2740 for (queue = 0; queue < txq_number; queue++) {
2741 struct mvpp2_tx_queue *txq = port->txqs[queue];
2742
2743 if (txq->descs != NULL)
2744 qmap |= (1 << queue);
2745 }
2746
2747 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2748 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2749}
2750
2751/* Disable transmit via physical egress queue
2752 * - HW doesn't take descriptors from DRAM
2753 */
2754static void mvpp2_egress_disable(struct mvpp2_port *port)
2755{
2756 u32 reg_data;
2757 int delay;
2758 int tx_port_num = mvpp2_egress_port(port);
2759
2760 /* Issue stop command for active channels only */
2761 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2762 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2763 MVPP2_TXP_SCHED_ENQ_MASK;
2764 if (reg_data != 0)
2765 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2766 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2767
2768 /* Wait for all Tx activity to terminate. */
2769 delay = 0;
2770 do {
2771 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2772 netdev_warn(port->dev,
2773 "Tx stop timed out, status=0x%08x\n",
2774 reg_data);
2775 break;
2776 }
2777 mdelay(1);
2778 delay++;
2779
2780 /* Check port TX Command register that all
2781 * Tx queues are stopped
2782 */
2783 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2784 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2785}
2786
2787/* Rx descriptors helper methods */
2788
2789/* Get number of Rx descriptors occupied by received packets */
2790static inline int
2791mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2792{
2793 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2794
2795 return val & MVPP2_RXQ_OCCUPIED_MASK;
2796}
2797
2798/* Update Rx queue status with the number of occupied and available
2799 * Rx descriptor slots.
2800 */
2801static inline void
2802mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2803 int used_count, int free_count)
2804{
2805 /* Decrement the number of used descriptors and increment count
2806 * increment the number of free descriptors.
2807 */
2808 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2809
2810 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2811}
2812
2813/* Get pointer to next RX descriptor to be processed by SW */
2814static inline struct mvpp2_rx_desc *
2815mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2816{
2817 int rx_desc = rxq->next_desc_to_proc;
2818
2819 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2820 prefetch(rxq->descs + rxq->next_desc_to_proc);
2821 return rxq->descs + rx_desc;
2822}
2823
2824/* Set rx queue offset */
2825static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2826 int prxq, int offset)
2827{
2828 u32 val;
2829
2830 /* Convert offset from bytes to units of 32 bytes */
2831 offset = offset >> 5;
2832
2833 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2834 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2835
2836 /* Offset is in */
2837 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2838 MVPP2_RXQ_PACKET_OFFSET_MASK);
2839
2840 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2841}
2842
2843/* Obtain BM cookie information from descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01002844static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
2845 struct mvpp2_rx_desc *rx_desc)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002846{
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002847 int cpu = smp_processor_id();
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01002848 int pool;
2849
2850 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
2851 MVPP2_RXD_BM_POOL_ID_MASK) >>
2852 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002853
2854 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
2855 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
2856}
2857
2858/* Tx descriptors helper methods */
2859
2860/* Get number of Tx descriptors waiting to be transmitted by HW */
2861static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
2862 struct mvpp2_tx_queue *txq)
2863{
2864 u32 val;
2865
2866 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
2867 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
2868
2869 return val & MVPP2_TXQ_PENDING_MASK;
2870}
2871
2872/* Get pointer to next Tx descriptor to be processed (send) by HW */
2873static struct mvpp2_tx_desc *
2874mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2875{
2876 int tx_desc = txq->next_desc_to_proc;
2877
2878 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2879 return txq->descs + tx_desc;
2880}
2881
2882/* Update HW with number of aggregated Tx descriptors to be sent */
2883static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2884{
2885 /* aggregated access - relevant TXQ number is written in TX desc */
2886 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2887}
2888
2889/* Get number of sent descriptors and decrement counter.
2890 * The number of sent descriptors is returned.
2891 * Per-CPU access
2892 */
2893static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2894 struct mvpp2_tx_queue *txq)
2895{
2896 u32 val;
2897
2898 /* Reading status reg resets transmitted descriptor counter */
2899 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
2900
2901 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2902 MVPP2_TRANSMITTED_COUNT_OFFSET;
2903}
2904
2905static void mvpp2_txq_sent_counter_clear(void *arg)
2906{
2907 struct mvpp2_port *port = arg;
2908 int queue;
2909
2910 for (queue = 0; queue < txq_number; queue++) {
2911 int id = port->txqs[queue]->id;
2912
2913 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
2914 }
2915}
2916
2917/* Set max sizes for Tx queues */
2918static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2919{
2920 u32 val, size, mtu;
2921 int txq, tx_port_num;
2922
2923 mtu = port->pkt_size * 8;
2924 if (mtu > MVPP2_TXP_MTU_MAX)
2925 mtu = MVPP2_TXP_MTU_MAX;
2926
2927 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2928 mtu = 3 * mtu;
2929
2930 /* Indirect access to registers */
2931 tx_port_num = mvpp2_egress_port(port);
2932 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2933
2934 /* Set MTU */
2935 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2936 val &= ~MVPP2_TXP_MTU_MAX;
2937 val |= mtu;
2938 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2939
2940 /* TXP token size and all TXQs token size must be larger that MTU */
2941 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2942 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2943 if (size < mtu) {
2944 size = mtu;
2945 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2946 val |= size;
2947 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2948 }
2949
2950 for (txq = 0; txq < txq_number; txq++) {
2951 val = mvpp2_read(port->priv,
2952 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2953 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2954
2955 if (size < mtu) {
2956 size = mtu;
2957 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2958 val |= size;
2959 mvpp2_write(port->priv,
2960 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2961 val);
2962 }
2963 }
2964}
2965
2966/* Free Tx queue skbuffs */
2967static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2968 struct mvpp2_tx_queue *txq,
2969 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2970{
2971 int i;
2972
2973 for (i = 0; i < num; i++)
2974 mvpp2_txq_inc_get(txq_pcpu);
2975}
2976
2977static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2978 u32 cause)
2979{
2980 int queue = fls(cause) - 1;
2981
2982 return port->rxqs[queue];
2983}
2984
2985static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2986 u32 cause)
2987{
2988 int queue = fls(cause) - 1;
2989
2990 return port->txqs[queue];
2991}
2992
2993/* Rx/Tx queue initialization/cleanup methods */
2994
2995/* Allocate and initialize descriptors for aggr TXQ */
2996static int mvpp2_aggr_txq_init(struct udevice *dev,
2997 struct mvpp2_tx_queue *aggr_txq,
2998 int desc_num, int cpu,
2999 struct mvpp2 *priv)
3000{
3001 /* Allocate memory for TX descriptors */
3002 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003003 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003004 if (!aggr_txq->descs)
3005 return -ENOMEM;
3006
3007 /* Make sure descriptor address is cache line size aligned */
3008 BUG_ON(aggr_txq->descs !=
3009 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3010
3011 aggr_txq->last_desc = aggr_txq->size - 1;
3012
3013 /* Aggr TXQ no reset WA */
3014 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
3015 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
3016
3017 /* Set Tx descriptors queue starting address */
3018 /* indirect access */
3019 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003020 aggr_txq->descs_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003021 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
3022
3023 return 0;
3024}
3025
3026/* Create a specified Rx queue */
3027static int mvpp2_rxq_init(struct mvpp2_port *port,
3028 struct mvpp2_rx_queue *rxq)
3029
3030{
3031 rxq->size = port->rx_ring_size;
3032
3033 /* Allocate memory for RX descriptors */
3034 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003035 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003036 if (!rxq->descs)
3037 return -ENOMEM;
3038
3039 BUG_ON(rxq->descs !=
3040 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3041
3042 rxq->last_desc = rxq->size - 1;
3043
3044 /* Zero occupied and non-occupied counters - direct access */
3045 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3046
3047 /* Set Rx descriptors queue starting address - indirect access */
3048 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003049 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003050 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
3051 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
3052
3053 /* Set Offset */
3054 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
3055
3056 /* Add number of descriptors ready for receiving packets */
3057 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
3058
3059 return 0;
3060}
3061
3062/* Push packets received by the RXQ to BM pool */
3063static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3064 struct mvpp2_rx_queue *rxq)
3065{
3066 int rx_received, i;
3067
3068 rx_received = mvpp2_rxq_received(port, rxq->id);
3069 if (!rx_received)
3070 return;
3071
3072 for (i = 0; i < rx_received; i++) {
3073 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003074 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003075
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003076 mvpp2_pool_refill(port, bm,
3077 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3078 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003079 }
3080 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3081}
3082
3083/* Cleanup Rx queue */
3084static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3085 struct mvpp2_rx_queue *rxq)
3086{
3087 mvpp2_rxq_drop_pkts(port, rxq);
3088
3089 rxq->descs = NULL;
3090 rxq->last_desc = 0;
3091 rxq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003092 rxq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003093
3094 /* Clear Rx descriptors queue starting address and size;
3095 * free descriptor number
3096 */
3097 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3098 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
3099 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
3100 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
3101}
3102
3103/* Create and initialize a Tx queue */
3104static int mvpp2_txq_init(struct mvpp2_port *port,
3105 struct mvpp2_tx_queue *txq)
3106{
3107 u32 val;
3108 int cpu, desc, desc_per_txq, tx_port_num;
3109 struct mvpp2_txq_pcpu *txq_pcpu;
3110
3111 txq->size = port->tx_ring_size;
3112
3113 /* Allocate memory for Tx descriptors */
3114 txq->descs = buffer_loc.tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003115 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003116 if (!txq->descs)
3117 return -ENOMEM;
3118
3119 /* Make sure descriptor address is cache line size aligned */
3120 BUG_ON(txq->descs !=
3121 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
3122
3123 txq->last_desc = txq->size - 1;
3124
3125 /* Set Tx descriptors queue starting address - indirect access */
3126 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003127 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003128 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
3129 MVPP2_TXQ_DESC_SIZE_MASK);
3130 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
3131 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
3132 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3133 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3134 val &= ~MVPP2_TXQ_PENDING_MASK;
3135 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
3136
3137 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
3138 * for each existing TXQ.
3139 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3140 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
3141 */
3142 desc_per_txq = 16;
3143 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3144 (txq->log_id * desc_per_txq);
3145
3146 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
3147 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3148 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
3149
3150 /* WRR / EJP configuration - indirect access */
3151 tx_port_num = mvpp2_egress_port(port);
3152 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3153
3154 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3155 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3156 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3157 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3158 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3159
3160 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3161 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3162 val);
3163
3164 for_each_present_cpu(cpu) {
3165 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3166 txq_pcpu->size = txq->size;
3167 }
3168
3169 return 0;
3170}
3171
3172/* Free allocated TXQ resources */
3173static void mvpp2_txq_deinit(struct mvpp2_port *port,
3174 struct mvpp2_tx_queue *txq)
3175{
3176 txq->descs = NULL;
3177 txq->last_desc = 0;
3178 txq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003179 txq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003180
3181 /* Set minimum bandwidth for disabled TXQs */
3182 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
3183
3184 /* Set Tx descriptors queue starting address and size */
3185 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3186 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
3187 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
3188}
3189
3190/* Cleanup Tx ports */
3191static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3192{
3193 struct mvpp2_txq_pcpu *txq_pcpu;
3194 int delay, pending, cpu;
3195 u32 val;
3196
3197 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3198 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3199 val |= MVPP2_TXQ_DRAIN_EN_MASK;
3200 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3201
3202 /* The napi queue has been stopped so wait for all packets
3203 * to be transmitted.
3204 */
3205 delay = 0;
3206 do {
3207 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3208 netdev_warn(port->dev,
3209 "port %d: cleaning queue %d timed out\n",
3210 port->id, txq->log_id);
3211 break;
3212 }
3213 mdelay(1);
3214 delay++;
3215
3216 pending = mvpp2_txq_pend_desc_num_get(port, txq);
3217 } while (pending);
3218
3219 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3220 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
3221
3222 for_each_present_cpu(cpu) {
3223 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3224
3225 /* Release all packets */
3226 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3227
3228 /* Reset queue */
3229 txq_pcpu->count = 0;
3230 txq_pcpu->txq_put_index = 0;
3231 txq_pcpu->txq_get_index = 0;
3232 }
3233}
3234
3235/* Cleanup all Tx queues */
3236static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3237{
3238 struct mvpp2_tx_queue *txq;
3239 int queue;
3240 u32 val;
3241
3242 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3243
3244 /* Reset Tx ports and delete Tx queues */
3245 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3246 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3247
3248 for (queue = 0; queue < txq_number; queue++) {
3249 txq = port->txqs[queue];
3250 mvpp2_txq_clean(port, txq);
3251 mvpp2_txq_deinit(port, txq);
3252 }
3253
3254 mvpp2_txq_sent_counter_clear(port);
3255
3256 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3257 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3258}
3259
3260/* Cleanup all Rx queues */
3261static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3262{
3263 int queue;
3264
3265 for (queue = 0; queue < rxq_number; queue++)
3266 mvpp2_rxq_deinit(port, port->rxqs[queue]);
3267}
3268
3269/* Init all Rx queues for port */
3270static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3271{
3272 int queue, err;
3273
3274 for (queue = 0; queue < rxq_number; queue++) {
3275 err = mvpp2_rxq_init(port, port->rxqs[queue]);
3276 if (err)
3277 goto err_cleanup;
3278 }
3279 return 0;
3280
3281err_cleanup:
3282 mvpp2_cleanup_rxqs(port);
3283 return err;
3284}
3285
3286/* Init all tx queues for port */
3287static int mvpp2_setup_txqs(struct mvpp2_port *port)
3288{
3289 struct mvpp2_tx_queue *txq;
3290 int queue, err;
3291
3292 for (queue = 0; queue < txq_number; queue++) {
3293 txq = port->txqs[queue];
3294 err = mvpp2_txq_init(port, txq);
3295 if (err)
3296 goto err_cleanup;
3297 }
3298
3299 mvpp2_txq_sent_counter_clear(port);
3300 return 0;
3301
3302err_cleanup:
3303 mvpp2_cleanup_txqs(port);
3304 return err;
3305}
3306
3307/* Adjust link */
3308static void mvpp2_link_event(struct mvpp2_port *port)
3309{
3310 struct phy_device *phydev = port->phy_dev;
3311 int status_change = 0;
3312 u32 val;
3313
3314 if (phydev->link) {
3315 if ((port->speed != phydev->speed) ||
3316 (port->duplex != phydev->duplex)) {
3317 u32 val;
3318
3319 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3320 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
3321 MVPP2_GMAC_CONFIG_GMII_SPEED |
3322 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3323 MVPP2_GMAC_AN_SPEED_EN |
3324 MVPP2_GMAC_AN_DUPLEX_EN);
3325
3326 if (phydev->duplex)
3327 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
3328
3329 if (phydev->speed == SPEED_1000)
3330 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
3331 else if (phydev->speed == SPEED_100)
3332 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
3333
3334 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3335
3336 port->duplex = phydev->duplex;
3337 port->speed = phydev->speed;
3338 }
3339 }
3340
3341 if (phydev->link != port->link) {
3342 if (!phydev->link) {
3343 port->duplex = -1;
3344 port->speed = 0;
3345 }
3346
3347 port->link = phydev->link;
3348 status_change = 1;
3349 }
3350
3351 if (status_change) {
3352 if (phydev->link) {
3353 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3354 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
3355 MVPP2_GMAC_FORCE_LINK_DOWN);
3356 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3357 mvpp2_egress_enable(port);
3358 mvpp2_ingress_enable(port);
3359 } else {
3360 mvpp2_ingress_disable(port);
3361 mvpp2_egress_disable(port);
3362 }
3363 }
3364}
3365
3366/* Main RX/TX processing routines */
3367
3368/* Display more error info */
3369static void mvpp2_rx_error(struct mvpp2_port *port,
3370 struct mvpp2_rx_desc *rx_desc)
3371{
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003372 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3373 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003374
3375 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3376 case MVPP2_RXD_ERR_CRC:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003377 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
3378 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003379 break;
3380 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003381 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
3382 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003383 break;
3384 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003385 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
3386 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003387 break;
3388 }
3389}
3390
3391/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
3392static int mvpp2_rx_refill(struct mvpp2_port *port,
3393 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003394 u32 bm, dma_addr_t dma_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003395{
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003396 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003397 return 0;
3398}
3399
3400/* Set hw internals when starting port */
3401static void mvpp2_start_dev(struct mvpp2_port *port)
3402{
3403 mvpp2_gmac_max_rx_size_set(port);
3404 mvpp2_txp_max_tx_size_set(port);
3405
3406 mvpp2_port_enable(port);
3407}
3408
3409/* Set hw internals when stopping port */
3410static void mvpp2_stop_dev(struct mvpp2_port *port)
3411{
3412 /* Stop new packets from arriving to RXQs */
3413 mvpp2_ingress_disable(port);
3414
3415 mvpp2_egress_disable(port);
3416 mvpp2_port_disable(port);
3417}
3418
3419static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
3420{
3421 struct phy_device *phy_dev;
3422
3423 if (!port->init || port->link == 0) {
3424 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
3425 port->phy_interface);
3426 port->phy_dev = phy_dev;
3427 if (!phy_dev) {
3428 netdev_err(port->dev, "cannot connect to phy\n");
3429 return -ENODEV;
3430 }
3431 phy_dev->supported &= PHY_GBIT_FEATURES;
3432 phy_dev->advertising = phy_dev->supported;
3433
3434 port->phy_dev = phy_dev;
3435 port->link = 0;
3436 port->duplex = 0;
3437 port->speed = 0;
3438
3439 phy_config(phy_dev);
3440 phy_startup(phy_dev);
3441 if (!phy_dev->link) {
3442 printf("%s: No link\n", phy_dev->dev->name);
3443 return -1;
3444 }
3445
3446 port->init = 1;
3447 } else {
3448 mvpp2_egress_enable(port);
3449 mvpp2_ingress_enable(port);
3450 }
3451
3452 return 0;
3453}
3454
3455static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
3456{
3457 unsigned char mac_bcast[ETH_ALEN] = {
3458 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3459 int err;
3460
3461 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
3462 if (err) {
3463 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3464 return err;
3465 }
3466 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
3467 port->dev_addr, true);
3468 if (err) {
3469 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
3470 return err;
3471 }
3472 err = mvpp2_prs_def_flow(port);
3473 if (err) {
3474 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3475 return err;
3476 }
3477
3478 /* Allocate the Rx/Tx queues */
3479 err = mvpp2_setup_rxqs(port);
3480 if (err) {
3481 netdev_err(port->dev, "cannot allocate Rx queues\n");
3482 return err;
3483 }
3484
3485 err = mvpp2_setup_txqs(port);
3486 if (err) {
3487 netdev_err(port->dev, "cannot allocate Tx queues\n");
3488 return err;
3489 }
3490
3491 err = mvpp2_phy_connect(dev, port);
3492 if (err < 0)
3493 return err;
3494
3495 mvpp2_link_event(port);
3496
3497 mvpp2_start_dev(port);
3498
3499 return 0;
3500}
3501
3502/* No Device ops here in U-Boot */
3503
3504/* Driver initialization */
3505
3506static void mvpp2_port_power_up(struct mvpp2_port *port)
3507{
3508 mvpp2_port_mii_set(port);
3509 mvpp2_port_periodic_xon_disable(port);
3510 mvpp2_port_fc_adv_enable(port);
3511 mvpp2_port_reset(port);
3512}
3513
3514/* Initialize port HW */
3515static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
3516{
3517 struct mvpp2 *priv = port->priv;
3518 struct mvpp2_txq_pcpu *txq_pcpu;
3519 int queue, cpu, err;
3520
3521 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
3522 return -EINVAL;
3523
3524 /* Disable port */
3525 mvpp2_egress_disable(port);
3526 mvpp2_port_disable(port);
3527
3528 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
3529 GFP_KERNEL);
3530 if (!port->txqs)
3531 return -ENOMEM;
3532
3533 /* Associate physical Tx queues to this port and initialize.
3534 * The mapping is predefined.
3535 */
3536 for (queue = 0; queue < txq_number; queue++) {
3537 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
3538 struct mvpp2_tx_queue *txq;
3539
3540 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
3541 if (!txq)
3542 return -ENOMEM;
3543
3544 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
3545 GFP_KERNEL);
3546 if (!txq->pcpu)
3547 return -ENOMEM;
3548
3549 txq->id = queue_phy_id;
3550 txq->log_id = queue;
3551 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
3552 for_each_present_cpu(cpu) {
3553 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
3554 txq_pcpu->cpu = cpu;
3555 }
3556
3557 port->txqs[queue] = txq;
3558 }
3559
3560 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
3561 GFP_KERNEL);
3562 if (!port->rxqs)
3563 return -ENOMEM;
3564
3565 /* Allocate and initialize Rx queue for this port */
3566 for (queue = 0; queue < rxq_number; queue++) {
3567 struct mvpp2_rx_queue *rxq;
3568
3569 /* Map physical Rx queue to port's logical Rx queue */
3570 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
3571 if (!rxq)
3572 return -ENOMEM;
3573 /* Map this Rx queue to a physical queue */
3574 rxq->id = port->first_rxq + queue;
3575 rxq->port = port->id;
3576 rxq->logic_rxq = queue;
3577
3578 port->rxqs[queue] = rxq;
3579 }
3580
3581 /* Configure Rx queue group interrupt for this port */
3582 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ);
3583
3584 /* Create Rx descriptor rings */
3585 for (queue = 0; queue < rxq_number; queue++) {
3586 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3587
3588 rxq->size = port->rx_ring_size;
3589 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
3590 rxq->time_coal = MVPP2_RX_COAL_USEC;
3591 }
3592
3593 mvpp2_ingress_disable(port);
3594
3595 /* Port default configuration */
3596 mvpp2_defaults_set(port);
3597
3598 /* Port's classifier configuration */
3599 mvpp2_cls_oversize_rxq_set(port);
3600 mvpp2_cls_port_config(port);
3601
3602 /* Provide an initial Rx packet size */
3603 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
3604
3605 /* Initialize pools for swf */
3606 err = mvpp2_swf_bm_pool_init(port);
3607 if (err)
3608 return err;
3609
3610 return 0;
3611}
3612
3613/* Ports initialization */
3614static int mvpp2_port_probe(struct udevice *dev,
3615 struct mvpp2_port *port,
3616 int port_node,
3617 struct mvpp2 *priv,
3618 int *next_first_rxq)
3619{
3620 int phy_node;
3621 u32 id;
3622 u32 phyaddr;
3623 const char *phy_mode_str;
3624 int phy_mode = -1;
3625 int priv_common_regs_num = 2;
3626 int err;
3627
3628 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
3629 if (phy_node < 0) {
3630 dev_err(&pdev->dev, "missing phy\n");
3631 return -ENODEV;
3632 }
3633
3634 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
3635 if (phy_mode_str)
3636 phy_mode = phy_get_interface_by_name(phy_mode_str);
3637 if (phy_mode == -1) {
3638 dev_err(&pdev->dev, "incorrect phy mode\n");
3639 return -EINVAL;
3640 }
3641
3642 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
3643 if (id == -1) {
3644 dev_err(&pdev->dev, "missing port-id value\n");
3645 return -EINVAL;
3646 }
3647
3648 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
3649
3650 port->priv = priv;
3651 port->id = id;
3652 port->first_rxq = *next_first_rxq;
3653 port->phy_node = phy_node;
3654 port->phy_interface = phy_mode;
3655 port->phyaddr = phyaddr;
3656
3657 port->base = (void __iomem *)dev_get_addr_index(dev->parent,
3658 priv_common_regs_num
3659 + id);
3660 if (IS_ERR(port->base))
3661 return PTR_ERR(port->base);
3662
3663 port->tx_ring_size = MVPP2_MAX_TXD;
3664 port->rx_ring_size = MVPP2_MAX_RXD;
3665
3666 err = mvpp2_port_init(dev, port);
3667 if (err < 0) {
3668 dev_err(&pdev->dev, "failed to init port %d\n", id);
3669 return err;
3670 }
3671 mvpp2_port_power_up(port);
3672
3673 /* Increment the first Rx queue number to be used by the next port */
3674 *next_first_rxq += CONFIG_MV_ETH_RXQ;
3675 priv->port_list[id] = port;
3676 return 0;
3677}
3678
3679/* Initialize decoding windows */
3680static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
3681 struct mvpp2 *priv)
3682{
3683 u32 win_enable;
3684 int i;
3685
3686 for (i = 0; i < 6; i++) {
3687 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
3688 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
3689
3690 if (i < 4)
3691 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
3692 }
3693
3694 win_enable = 0;
3695
3696 for (i = 0; i < dram->num_cs; i++) {
3697 const struct mbus_dram_window *cs = dram->cs + i;
3698
3699 mvpp2_write(priv, MVPP2_WIN_BASE(i),
3700 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
3701 dram->mbus_dram_target_id);
3702
3703 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
3704 (cs->size - 1) & 0xffff0000);
3705
3706 win_enable |= (1 << i);
3707 }
3708
3709 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
3710}
3711
3712/* Initialize Rx FIFO's */
3713static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
3714{
3715 int port;
3716
3717 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
3718 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
3719 MVPP2_RX_FIFO_PORT_DATA_SIZE);
3720 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
3721 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
3722 }
3723
3724 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
3725 MVPP2_RX_FIFO_PORT_MIN_PKT);
3726 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
3727}
3728
3729/* Initialize network controller common part HW */
3730static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
3731{
3732 const struct mbus_dram_target_info *dram_target_info;
3733 int err, i;
3734 u32 val;
3735
3736 /* Checks for hardware constraints (U-Boot uses only one rxq) */
3737 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) {
3738 dev_err(&pdev->dev, "invalid queue size parameter\n");
3739 return -EINVAL;
3740 }
3741
3742 /* MBUS windows configuration */
3743 dram_target_info = mvebu_mbus_dram_info();
3744 if (dram_target_info)
3745 mvpp2_conf_mbus_windows(dram_target_info, priv);
3746
3747 /* Disable HW PHY polling */
3748 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3749 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
3750 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
3751
3752 /* Allocate and initialize aggregated TXQs */
3753 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
3754 sizeof(struct mvpp2_tx_queue),
3755 GFP_KERNEL);
3756 if (!priv->aggr_txqs)
3757 return -ENOMEM;
3758
3759 for_each_present_cpu(i) {
3760 priv->aggr_txqs[i].id = i;
3761 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
3762 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
3763 MVPP2_AGGR_TXQ_SIZE, i, priv);
3764 if (err < 0)
3765 return err;
3766 }
3767
3768 /* Rx Fifo Init */
3769 mvpp2_rx_fifo_init(priv);
3770
3771 /* Reset Rx queue group interrupt configuration */
3772 for (i = 0; i < MVPP2_MAX_PORTS; i++)
3773 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i),
3774 CONFIG_MV_ETH_RXQ);
3775
3776 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
3777 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
3778
3779 /* Allow cache snoop when transmiting packets */
3780 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
3781
3782 /* Buffer Manager initialization */
3783 err = mvpp2_bm_init(dev, priv);
3784 if (err < 0)
3785 return err;
3786
3787 /* Parser default initialization */
3788 err = mvpp2_prs_default_init(dev, priv);
3789 if (err < 0)
3790 return err;
3791
3792 /* Classifier default initialization */
3793 mvpp2_cls_init(priv);
3794
3795 return 0;
3796}
3797
3798/* SMI / MDIO functions */
3799
3800static int smi_wait_ready(struct mvpp2 *priv)
3801{
3802 u32 timeout = MVPP2_SMI_TIMEOUT;
3803 u32 smi_reg;
3804
3805 /* wait till the SMI is not busy */
3806 do {
3807 /* read smi register */
3808 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3809 if (timeout-- == 0) {
3810 printf("Error: SMI busy timeout\n");
3811 return -EFAULT;
3812 }
3813 } while (smi_reg & MVPP2_SMI_BUSY);
3814
3815 return 0;
3816}
3817
3818/*
3819 * mpp2_mdio_read - miiphy_read callback function.
3820 *
3821 * Returns 16bit phy register value, or 0xffff on error
3822 */
3823static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
3824{
3825 struct mvpp2 *priv = bus->priv;
3826 u32 smi_reg;
3827 u32 timeout;
3828
3829 /* check parameters */
3830 if (addr > MVPP2_PHY_ADDR_MASK) {
3831 printf("Error: Invalid PHY address %d\n", addr);
3832 return -EFAULT;
3833 }
3834
3835 if (reg > MVPP2_PHY_REG_MASK) {
3836 printf("Err: Invalid register offset %d\n", reg);
3837 return -EFAULT;
3838 }
3839
3840 /* wait till the SMI is not busy */
3841 if (smi_wait_ready(priv) < 0)
3842 return -EFAULT;
3843
3844 /* fill the phy address and regiser offset and read opcode */
3845 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3846 | (reg << MVPP2_SMI_REG_ADDR_OFFS)
3847 | MVPP2_SMI_OPCODE_READ;
3848
3849 /* write the smi register */
3850 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3851
3852 /* wait till read value is ready */
3853 timeout = MVPP2_SMI_TIMEOUT;
3854
3855 do {
3856 /* read smi register */
3857 smi_reg = readl(priv->lms_base + MVPP2_SMI);
3858 if (timeout-- == 0) {
3859 printf("Err: SMI read ready timeout\n");
3860 return -EFAULT;
3861 }
3862 } while (!(smi_reg & MVPP2_SMI_READ_VALID));
3863
3864 /* Wait for the data to update in the SMI register */
3865 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
3866 ;
3867
3868 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK;
3869}
3870
3871/*
3872 * mpp2_mdio_write - miiphy_write callback function.
3873 *
3874 * Returns 0 if write succeed, -EINVAL on bad parameters
3875 * -ETIME on timeout
3876 */
3877static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
3878 u16 value)
3879{
3880 struct mvpp2 *priv = bus->priv;
3881 u32 smi_reg;
3882
3883 /* check parameters */
3884 if (addr > MVPP2_PHY_ADDR_MASK) {
3885 printf("Error: Invalid PHY address %d\n", addr);
3886 return -EFAULT;
3887 }
3888
3889 if (reg > MVPP2_PHY_REG_MASK) {
3890 printf("Err: Invalid register offset %d\n", reg);
3891 return -EFAULT;
3892 }
3893
3894 /* wait till the SMI is not busy */
3895 if (smi_wait_ready(priv) < 0)
3896 return -EFAULT;
3897
3898 /* fill the phy addr and reg offset and write opcode and data */
3899 smi_reg = value << MVPP2_SMI_DATA_OFFS;
3900 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
3901 | (reg << MVPP2_SMI_REG_ADDR_OFFS);
3902 smi_reg &= ~MVPP2_SMI_OPCODE_READ;
3903
3904 /* write the smi register */
3905 writel(smi_reg, priv->lms_base + MVPP2_SMI);
3906
3907 return 0;
3908}
3909
3910static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
3911{
3912 struct mvpp2_port *port = dev_get_priv(dev);
3913 struct mvpp2_rx_desc *rx_desc;
3914 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003915 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003916 u32 bm, rx_status;
3917 int pool, rx_bytes, err;
3918 int rx_received;
3919 struct mvpp2_rx_queue *rxq;
3920 u32 cause_rx_tx, cause_rx, cause_misc;
3921 u8 *data;
3922
3923 cause_rx_tx = mvpp2_read(port->priv,
3924 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3925 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3926 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3927 if (!cause_rx_tx && !cause_misc)
3928 return 0;
3929
3930 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
3931
3932 /* Process RX packets */
3933 cause_rx |= port->pending_cause_rx;
3934 rxq = mvpp2_get_rx_queue(port, cause_rx);
3935
3936 /* Get number of received packets and clamp the to-do */
3937 rx_received = mvpp2_rxq_received(port, rxq->id);
3938
3939 /* Return if no packets are received */
3940 if (!rx_received)
3941 return 0;
3942
3943 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003944 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3945 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3946 rx_bytes -= MVPP2_MH_SIZE;
3947 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003948
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003949 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003950 pool = mvpp2_bm_cookie_pool_get(bm);
3951 bm_pool = &port->priv->bm_pools[pool];
3952
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003953 /* In case of an error, release the requested buffer pointer
3954 * to the Buffer Manager. This request process is controlled
3955 * by the hardware, and the information about the buffer is
3956 * comprised by the RX descriptor.
3957 */
3958 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
3959 mvpp2_rx_error(port, rx_desc);
3960 /* Return the buffer to the pool */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003961 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003962 return 0;
3963 }
3964
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003965 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003966 if (err) {
3967 netdev_err(port->dev, "failed to refill BM pools\n");
3968 return 0;
3969 }
3970
3971 /* Update Rx queue management counters */
3972 mb();
3973 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
3974
3975 /* give packet to stack - skip on first n bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01003976 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003977
3978 if (rx_bytes <= 0)
3979 return 0;
3980
3981 /*
3982 * No cache invalidation needed here, since the rx_buffer's are
3983 * located in a uncached memory region
3984 */
3985 *packetp = data;
3986
3987 return rx_bytes;
3988}
3989
3990/* Drain Txq */
3991static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3992 int enable)
3993{
3994 u32 val;
3995
3996 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3997 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
3998 if (enable)
3999 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4000 else
4001 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4002 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4003}
4004
4005static int mvpp2_send(struct udevice *dev, void *packet, int length)
4006{
4007 struct mvpp2_port *port = dev_get_priv(dev);
4008 struct mvpp2_tx_queue *txq, *aggr_txq;
4009 struct mvpp2_tx_desc *tx_desc;
4010 int tx_done;
4011 int timeout;
4012
4013 txq = port->txqs[0];
4014 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
4015
4016 /* Get a descriptor for the first part of the packet */
4017 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004018 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4019 mvpp2_txdesc_size_set(port, tx_desc, length);
4020 mvpp2_txdesc_offset_set(port, tx_desc,
4021 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
4022 mvpp2_txdesc_dma_addr_set(port, tx_desc,
4023 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004024 /* First and Last descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004025 mvpp2_txdesc_cmd_set(port, tx_desc,
4026 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
4027 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004028
4029 /* Flush tx data */
Stefan Roesef811e042017-02-16 13:58:37 +01004030 flush_dcache_range((unsigned long)packet,
4031 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004032
4033 /* Enable transmit */
4034 mb();
4035 mvpp2_aggr_txq_pend_desc_add(port, 1);
4036
4037 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4038
4039 timeout = 0;
4040 do {
4041 if (timeout++ > 10000) {
4042 printf("timeout: packet not sent from aggregated to phys TXQ\n");
4043 return 0;
4044 }
4045 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
4046 } while (tx_done);
4047
4048 /* Enable TXQ drain */
4049 mvpp2_txq_drain(port, txq, 1);
4050
4051 timeout = 0;
4052 do {
4053 if (timeout++ > 10000) {
4054 printf("timeout: packet not sent\n");
4055 return 0;
4056 }
4057 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4058 } while (!tx_done);
4059
4060 /* Disable TXQ drain */
4061 mvpp2_txq_drain(port, txq, 0);
4062
4063 return 0;
4064}
4065
4066static int mvpp2_start(struct udevice *dev)
4067{
4068 struct eth_pdata *pdata = dev_get_platdata(dev);
4069 struct mvpp2_port *port = dev_get_priv(dev);
4070
4071 /* Load current MAC address */
4072 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
4073
4074 /* Reconfigure parser accept the original MAC address */
4075 mvpp2_prs_update_mac_da(port, port->dev_addr);
4076
4077 mvpp2_port_power_up(port);
4078
4079 mvpp2_open(dev, port);
4080
4081 return 0;
4082}
4083
4084static void mvpp2_stop(struct udevice *dev)
4085{
4086 struct mvpp2_port *port = dev_get_priv(dev);
4087
4088 mvpp2_stop_dev(port);
4089 mvpp2_cleanup_rxqs(port);
4090 mvpp2_cleanup_txqs(port);
4091}
4092
4093static int mvpp2_probe(struct udevice *dev)
4094{
4095 struct mvpp2_port *port = dev_get_priv(dev);
4096 struct mvpp2 *priv = dev_get_priv(dev->parent);
4097 int err;
4098
4099 /* Initialize network controller */
4100 err = mvpp2_init(dev, priv);
4101 if (err < 0) {
4102 dev_err(&pdev->dev, "failed to initialize controller\n");
4103 return err;
4104 }
4105
Simon Glasse160f7d2017-01-17 16:52:55 -07004106 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv,
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004107 &buffer_loc.first_rxq);
4108}
4109
4110static const struct eth_ops mvpp2_ops = {
4111 .start = mvpp2_start,
4112 .send = mvpp2_send,
4113 .recv = mvpp2_recv,
4114 .stop = mvpp2_stop,
4115};
4116
4117static struct driver mvpp2_driver = {
4118 .name = "mvpp2",
4119 .id = UCLASS_ETH,
4120 .probe = mvpp2_probe,
4121 .ops = &mvpp2_ops,
4122 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
4123 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
4124};
4125
4126/*
4127 * Use a MISC device to bind the n instances (child nodes) of the
4128 * network base controller in UCLASS_ETH.
4129 */
4130static int mvpp2_base_probe(struct udevice *dev)
4131{
4132 struct mvpp2 *priv = dev_get_priv(dev);
4133 struct mii_dev *bus;
4134 void *bd_space;
4135 u32 size = 0;
4136 int i;
4137
Thomas Petazzoni16a98982017-02-15 14:08:59 +01004138 /* Save hw-version */
4139 priv->hw_version = dev_get_driver_data(dev);
4140
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004141 /*
4142 * U-Boot special buffer handling:
4143 *
4144 * Allocate buffer area for descs and rx_buffers. This is only
4145 * done once for all interfaces. As only one interface can
4146 * be active. Make this area DMA-safe by disabling the D-cache
4147 */
4148
4149 /* Align buffer area for descs and rx_buffers to 1MiB */
4150 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesea7c28ff2017-02-15 12:46:18 +01004151 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
4152 BD_SPACE, DCACHE_OFF);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004153
4154 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
4155 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
4156
Stefan Roesea7c28ff2017-02-15 12:46:18 +01004157 buffer_loc.tx_descs =
4158 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004159 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
4160
Stefan Roesea7c28ff2017-02-15 12:46:18 +01004161 buffer_loc.rx_descs =
4162 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004163 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
4164
4165 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01004166 buffer_loc.bm_pool[i] =
4167 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004168 size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32);
4169 }
4170
4171 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01004172 buffer_loc.rx_buffer[i] =
4173 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004174 size += RX_BUFFER_SIZE;
4175 }
4176
4177 /* Save base addresses for later use */
4178 priv->base = (void *)dev_get_addr_index(dev, 0);
4179 if (IS_ERR(priv->base))
4180 return PTR_ERR(priv->base);
4181
4182 priv->lms_base = (void *)dev_get_addr_index(dev, 1);
4183 if (IS_ERR(priv->lms_base))
4184 return PTR_ERR(priv->lms_base);
4185
4186 /* Finally create and register the MDIO bus driver */
4187 bus = mdio_alloc();
4188 if (!bus) {
4189 printf("Failed to allocate MDIO bus\n");
4190 return -ENOMEM;
4191 }
4192
4193 bus->read = mpp2_mdio_read;
4194 bus->write = mpp2_mdio_write;
4195 snprintf(bus->name, sizeof(bus->name), dev->name);
4196 bus->priv = (void *)priv;
4197 priv->bus = bus;
4198
4199 return mdio_register(bus);
4200}
4201
4202static int mvpp2_base_bind(struct udevice *parent)
4203{
4204 const void *blob = gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -07004205 int node = dev_of_offset(parent);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004206 struct uclass_driver *drv;
4207 struct udevice *dev;
4208 struct eth_pdata *plat;
4209 char *name;
4210 int subnode;
4211 u32 id;
4212
4213 /* Lookup eth driver */
4214 drv = lists_uclass_lookup(UCLASS_ETH);
4215 if (!drv) {
4216 puts("Cannot find eth driver\n");
4217 return -ENOENT;
4218 }
4219
Simon Glassdf87e6b2016-10-02 17:59:29 -06004220 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004221 /* Skip disabled ports */
4222 if (!fdtdec_get_is_enabled(blob, subnode))
4223 continue;
4224
4225 plat = calloc(1, sizeof(*plat));
4226 if (!plat)
4227 return -ENOMEM;
4228
4229 id = fdtdec_get_int(blob, subnode, "port-id", -1);
4230
4231 name = calloc(1, 16);
4232 sprintf(name, "mvpp2-%d", id);
4233
4234 /* Create child device UCLASS_ETH and bind it */
4235 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glasse160f7d2017-01-17 16:52:55 -07004236 dev_set_of_offset(dev, subnode);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004237 }
4238
4239 return 0;
4240}
4241
4242static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni16a98982017-02-15 14:08:59 +01004243 {
4244 .compatible = "marvell,armada-375-pp2",
4245 .data = MVPP21,
4246 },
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004247 { }
4248};
4249
4250U_BOOT_DRIVER(mvpp2_base) = {
4251 .name = "mvpp2_base",
4252 .id = UCLASS_MISC,
4253 .of_match = mvpp2_ids,
4254 .bind = mvpp2_base_bind,
4255 .probe = mvpp2_base_probe,
4256 .priv_auto_alloc_size = sizeof(struct mvpp2),
4257};