blob: 105fdc994abd5e40fb4d7752a02329ca5817bd46 [file] [log] [blame]
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
Stefan Roesec9607c92017-02-24 10:12:41 +01009 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010010 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010018#include <dm.h>
19#include <dm/device-internal.h>
Simon Glass61b29b82020-02-03 07:36:15 -070020#include <dm/devres.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010021#include <dm/lists.h>
22#include <net.h>
23#include <netdev.h>
24#include <config.h>
25#include <malloc.h>
26#include <asm/io.h>
Simon Glass61b29b82020-02-03 07:36:15 -070027#include <linux/err.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090028#include <linux/errno.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010029#include <phy.h>
30#include <miiphy.h>
31#include <watchdog.h>
32#include <asm/arch/cpu.h>
33#include <asm/arch/soc.h>
34#include <linux/compat.h>
35#include <linux/mbus.h>
Stefan Chulski41893732017-08-09 10:37:43 +030036#include <asm-generic/gpio.h>
Stefan Chulski377883f2017-08-09 10:37:44 +030037#include <fdt_support.h>
Nevo Hed2a428702019-08-15 18:08:44 -040038#include <linux/mdio.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010039
40DECLARE_GLOBAL_DATA_PTR;
41
Stefan Roese99d4c6d2016-02-10 07:22:10 +010042#define __verify_pcpu_ptr(ptr) \
43do { \
44 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
45 (void)__vpp_verify; \
46} while (0)
47
48#define VERIFY_PERCPU_PTR(__p) \
49({ \
50 __verify_pcpu_ptr(__p); \
51 (typeof(*(__p)) __kernel __force *)(__p); \
52})
53
54#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
55#define smp_processor_id() 0
56#define num_present_cpus() 1
57#define for_each_present_cpu(cpu) \
58 for ((cpu) = 0; (cpu) < 1; (cpu)++)
59
60#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
61
62#define CONFIG_NR_CPUS 1
Stefan Roese99d4c6d2016-02-10 07:22:10 +010063
64/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
65#define WRAP (2 + ETH_HLEN + 4 + 32)
66#define MTU 1500
67#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
68
Stefan Roese99d4c6d2016-02-10 07:22:10 +010069/* RX Fifo Registers */
70#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
71#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
72#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
73#define MVPP2_RX_FIFO_INIT_REG 0x64
74
75/* RX DMA Top Registers */
76#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
77#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
78#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
79#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
80#define MVPP2_POOL_BUF_SIZE_OFFSET 5
81#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
82#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
83#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
84#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +010085#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
86#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Stefan Roese99d4c6d2016-02-10 07:22:10 +010087#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +010088#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
89#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Stefan Roese99d4c6d2016-02-10 07:22:10 +010090#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
91#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
92#define MVPP2_RXQ_DISABLE_MASK BIT(31)
93
94/* Parser Registers */
95#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
96#define MVPP2_PRS_PORT_LU_MAX 0xf
97#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
98#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
99#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
100#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
101#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
102#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
103#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
104#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
105#define MVPP2_PRS_TCAM_IDX_REG 0x1100
106#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
107#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
108#define MVPP2_PRS_SRAM_IDX_REG 0x1200
109#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
110#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
111#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
112
113/* Classifier Registers */
114#define MVPP2_CLS_MODE_REG 0x1800
115#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
116#define MVPP2_CLS_PORT_WAY_REG 0x1810
117#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
118#define MVPP2_CLS_LKP_INDEX_REG 0x1814
119#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
120#define MVPP2_CLS_LKP_TBL_REG 0x1818
121#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
122#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
123#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
124#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
125#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
126#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
127#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
128#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
129#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
130#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
131#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
132#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
133
134/* Descriptor Manager Top Registers */
135#define MVPP2_RXQ_NUM_REG 0x2040
136#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzoni80350f52017-02-20 11:36:57 +0100137#define MVPP22_DESC_ADDR_OFFS 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100138#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
139#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
140#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
141#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
142#define MVPP2_RXQ_NUM_NEW_OFFSET 16
143#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
144#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
145#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
146#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
147#define MVPP2_RXQ_THRESH_REG 0x204c
148#define MVPP2_OCCUPIED_THRESH_OFFSET 0
149#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
150#define MVPP2_RXQ_INDEX_REG 0x2050
151#define MVPP2_TXQ_NUM_REG 0x2080
152#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
153#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
154#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
155#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
156#define MVPP2_TXQ_THRESH_REG 0x2094
157#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
158#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
159#define MVPP2_TXQ_INDEX_REG 0x2098
160#define MVPP2_TXQ_PREF_BUF_REG 0x209c
161#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
162#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
163#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
164#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
165#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
166#define MVPP2_TXQ_PENDING_REG 0x20a0
167#define MVPP2_TXQ_PENDING_MASK 0x3fff
168#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
169#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
170#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
171#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
172#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
173#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
174#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
175#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
176#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
177#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
178#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzoni80350f52017-02-20 11:36:57 +0100179#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100180#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
181#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
182#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
183#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
184#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
185
186/* MBUS bridge registers */
187#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
188#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
189#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
190#define MVPP2_BASE_ADDR_ENABLE 0x4060
191
Thomas Petazzonicdf77792017-02-16 08:41:07 +0100192/* AXI Bridge Registers */
193#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
194#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
195#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
196#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
197#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
198#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
199#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
200#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
201#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
202#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
203#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
204#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
205
206/* Values for AXI Bridge registers */
207#define MVPP22_AXI_ATTR_CACHE_OFFS 0
208#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
209
210#define MVPP22_AXI_CODE_CACHE_OFFS 0
211#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
212
213#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
214#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
215#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
216
217#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
218#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
219
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100220/* Interrupt Cause and Mask registers */
221#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzonibc0bbf42017-02-16 08:46:37 +0100222#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
223
224#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
225#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
226#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
227#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
228
229#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
230#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
231
232#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
233#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
234#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
235#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
236
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100237#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
238#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
239#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
240#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
241#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
242#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
243#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
244#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
245#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
246#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
247#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
248#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
249#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
250#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
251#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
252#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
253#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
254#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
255
256/* Buffer Manager registers */
257#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
258#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
259#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
260#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
261#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
262#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
263#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
264#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
265#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
266#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
267#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
268#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
269#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
270#define MVPP2_BM_START_MASK BIT(0)
271#define MVPP2_BM_STOP_MASK BIT(1)
272#define MVPP2_BM_STATE_MASK BIT(4)
273#define MVPP2_BM_LOW_THRESH_OFFS 8
274#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
275#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
276 MVPP2_BM_LOW_THRESH_OFFS)
277#define MVPP2_BM_HIGH_THRESH_OFFS 16
278#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
279#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
280 MVPP2_BM_HIGH_THRESH_OFFS)
281#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
282#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
283#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
284#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
285#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
286#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
287#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
288#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
289#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
290#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100291#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
292#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
293#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
294#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100295#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
296#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
297#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
298#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
299#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100300#define MVPP21_BM_MC_RLS_REG 0x64c4
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100301#define MVPP2_BM_MC_ID_MASK 0xfff
302#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100303#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
304#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
305#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
306#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
307#define MVPP22_BM_MC_RLS_REG 0x64d4
Stefan Chulski783e7852017-08-09 10:37:50 +0300308#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
309#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100310
311/* TX Scheduler registers */
312#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
313#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
314#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
315#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
316#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
317#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
318#define MVPP2_TXP_SCHED_MTU_REG 0x801c
319#define MVPP2_TXP_MTU_MAX 0x7FFFF
320#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
321#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
322#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
323#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
324#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
325#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
326#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
327#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
328#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
329#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
330#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
331#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
332#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
333#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
334
335/* TX general registers */
336#define MVPP2_TX_SNOOP_REG 0x8800
337#define MVPP2_TX_PORT_FLUSH_REG 0x8810
338#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
339
340/* LMS registers */
341#define MVPP2_SRC_ADDR_MIDDLE 0x24
342#define MVPP2_SRC_ADDR_HIGH 0x28
343#define MVPP2_PHY_AN_CFG0_REG 0x34
344#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100345#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni6b28f422017-02-15 12:16:23 +0100346#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100347
348/* Per-port registers */
349#define MVPP2_GMAC_CTRL_0_REG 0x0
350#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100351#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100352#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
353#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
354#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
355#define MVPP2_GMAC_CTRL_1_REG 0x4
356#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
357#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
358#define MVPP2_GMAC_PCS_LB_EN_BIT 6
359#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
360#define MVPP2_GMAC_SA_LOW_OFFS 7
361#define MVPP2_GMAC_CTRL_2_REG 0x8
362#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100363#define MVPP2_GMAC_SGMII_MODE_MASK BIT(0)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100364#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
365#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100366#define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100367#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100368#define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100369#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
370#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
371#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100372#define MVPP2_GMAC_EN_PCS_AN BIT(2)
373#define MVPP2_GMAC_AN_BYPASS_EN BIT(3)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100374#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
375#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
376#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
377#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100378#define MVPP2_GMAC_EN_FC_AN BIT(11)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100379#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
380#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100381#define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100382#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
383#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
384#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
385#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
386 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100387#define MVPP2_GMAC_CTRL_4_REG 0x90
388#define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0)
389#define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5)
390#define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6)
391#define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100392
Stefan Roese31aa1e32017-03-22 15:07:30 +0100393/*
394 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
395 * relative to port->base.
396 */
397
398/* Port Mac Control0 */
399#define MVPP22_XLG_CTRL0_REG 0x100
400#define MVPP22_XLG_PORT_EN BIT(0)
401#define MVPP22_XLG_MAC_RESETN BIT(1)
402#define MVPP22_XLG_RX_FC_EN BIT(7)
403#define MVPP22_XLG_MIBCNT_DIS BIT(13)
404/* Port Mac Control1 */
405#define MVPP22_XLG_CTRL1_REG 0x104
406#define MVPP22_XLG_MAX_RX_SIZE_OFFS 0
407#define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff
408/* Port Interrupt Mask */
409#define MVPP22_XLG_INTERRUPT_MASK_REG 0x118
410#define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1)
411/* Port Mac Control3 */
412#define MVPP22_XLG_CTRL3_REG 0x11c
413#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
414#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
415#define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13)
416/* Port Mac Control4 */
417#define MVPP22_XLG_CTRL4_REG 0x184
418#define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5)
419#define MVPP22_XLG_FORWARD_PFC_EN BIT(6)
420#define MVPP22_XLG_MODE_DMA_1G BIT(12)
421#define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14)
422
423/* XPCS registers */
424
425/* Global Configuration 0 */
426#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
427#define MVPP22_XPCS_PCSRESET BIT(0)
428#define MVPP22_XPCS_PCSMODE_OFFS 3
429#define MVPP22_XPCS_PCSMODE_MASK (0x3 << \
430 MVPP22_XPCS_PCSMODE_OFFS)
431#define MVPP22_XPCS_LANEACTIVE_OFFS 5
432#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \
433 MVPP22_XPCS_LANEACTIVE_OFFS)
434
435/* MPCS registers */
436
437#define PCS40G_COMMON_CONTROL 0x14
Stefan Chulskie09d0c82017-04-06 15:39:08 +0200438#define FORWARD_ERROR_CORRECTION_MASK BIT(10)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100439
440#define PCS_CLOCK_RESET 0x14c
441#define TX_SD_CLK_RESET_MASK BIT(0)
442#define RX_SD_CLK_RESET_MASK BIT(1)
443#define MAC_CLK_RESET_MASK BIT(2)
444#define CLK_DIVISION_RATIO_OFFS 4
445#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS)
446#define CLK_DIV_PHASE_SET_MASK BIT(11)
447
448/* System Soft Reset 1 */
449#define GOP_SOFT_RESET_1_REG 0x108
450#define NETC_GOP_SOFT_RESET_OFFS 6
451#define NETC_GOP_SOFT_RESET_MASK (0x1 << \
452 NETC_GOP_SOFT_RESET_OFFS)
453
454/* Ports Control 0 */
455#define NETCOMP_PORTS_CONTROL_0_REG 0x110
456#define NETC_BUS_WIDTH_SELECT_OFFS 1
457#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \
458 NETC_BUS_WIDTH_SELECT_OFFS)
459#define NETC_GIG_RX_DATA_SAMPLE_OFFS 29
460#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \
461 NETC_GIG_RX_DATA_SAMPLE_OFFS)
462#define NETC_CLK_DIV_PHASE_OFFS 31
463#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS)
464/* Ports Control 1 */
465#define NETCOMP_PORTS_CONTROL_1_REG 0x114
466#define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p)
467#define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \
468 NETC_PORTS_ACTIVE_OFFSET(p))
469#define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p)
470#define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \
471 NETC_PORT_GIG_RF_RESET_OFFS(p))
472#define NETCOMP_CONTROL_0_REG 0x120
473#define NETC_GBE_PORT0_SGMII_MODE_OFFS 0
474#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \
475 NETC_GBE_PORT0_SGMII_MODE_OFFS)
476#define NETC_GBE_PORT1_SGMII_MODE_OFFS 1
477#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \
478 NETC_GBE_PORT1_SGMII_MODE_OFFS)
479#define NETC_GBE_PORT1_MII_MODE_OFFS 2
480#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \
481 NETC_GBE_PORT1_MII_MODE_OFFS)
482
483#define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04)
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +0100484#define MVPP22_SMI_POLLING_EN BIT(10)
485
Stefan Roese31aa1e32017-03-22 15:07:30 +0100486#define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \
487 (0x4 * (port)))
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100488
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100489#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
490
491/* Descriptor ring Macros */
492#define MVPP2_QUEUE_NEXT_DESC(q, index) \
493 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
494
Stefan Roese0a61e9a2017-02-16 08:31:32 +0100495/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
496#define MVPP22_SMI 0x1200
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100497
Stefan Roese31aa1e32017-03-22 15:07:30 +0100498/* Additional PPv2.2 offsets */
499#define MVPP22_MPCS 0x007000
500#define MVPP22_XPCS 0x007400
501#define MVPP22_PORT_BASE 0x007e00
502#define MVPP22_PORT_OFFSET 0x001000
503#define MVPP22_RFU1 0x318000
504
505/* Maximum number of ports */
506#define MVPP22_GOP_MAC_NUM 4
507
508/* Sets the field located at the specified in data */
509#define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41
510#define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5
511#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb
512
513/* Net Complex */
514enum mv_netc_topology {
515 MV_NETC_GE_MAC2_SGMII = BIT(0),
516 MV_NETC_GE_MAC3_SGMII = BIT(1),
517 MV_NETC_GE_MAC3_RGMII = BIT(2),
518};
519
520enum mv_netc_phase {
521 MV_NETC_FIRST_PHASE,
522 MV_NETC_SECOND_PHASE,
523};
524
525enum mv_netc_sgmii_xmi_mode {
526 MV_NETC_GBE_SGMII,
527 MV_NETC_GBE_XMII,
528};
529
530enum mv_netc_mii_mode {
531 MV_NETC_GBE_RGMII,
532 MV_NETC_GBE_MII,
533};
534
535enum mv_netc_lanes {
536 MV_NETC_LANE_23,
537 MV_NETC_LANE_45,
538};
539
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100540/* Various constants */
541
542/* Coalescing */
543#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
544#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
545#define MVPP2_RX_COAL_PKTS 32
546#define MVPP2_RX_COAL_USEC 100
547
548/* The two bytes Marvell header. Either contains a special value used
549 * by Marvell switches when a specific hardware mode is enabled (not
550 * supported by this driver) or is filled automatically by zeroes on
551 * the RX side. Those two bytes being at the front of the Ethernet
552 * header, they allow to have the IP header aligned on a 4 bytes
553 * boundary automatically: the hardware skips those two bytes on its
554 * own.
555 */
556#define MVPP2_MH_SIZE 2
557#define MVPP2_ETH_TYPE_LEN 2
558#define MVPP2_PPPOE_HDR_SIZE 8
559#define MVPP2_VLAN_TAG_LEN 4
560
561/* Lbtd 802.3 type */
562#define MVPP2_IP_LBDT_TYPE 0xfffa
563
564#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
565#define MVPP2_TX_CSUM_MAX_SIZE 9800
566
567/* Timeout constants */
568#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
569#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
570
571#define MVPP2_TX_MTU_MAX 0x7ffff
572
573/* Maximum number of T-CONTs of PON port */
574#define MVPP2_MAX_TCONT 16
575
576/* Maximum number of supported ports */
577#define MVPP2_MAX_PORTS 4
578
579/* Maximum number of TXQs used by single port */
580#define MVPP2_MAX_TXQ 8
581
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100582/* Default number of TXQs in use */
583#define MVPP2_DEFAULT_TXQ 1
584
Flavio Suligoidad9af52020-01-29 09:38:56 +0100585/* Default number of RXQs in use */
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100586#define MVPP2_DEFAULT_RXQ 1
587#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
588
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100589/* Max number of Rx descriptors */
590#define MVPP2_MAX_RXD 16
591
592/* Max number of Tx descriptors */
593#define MVPP2_MAX_TXD 16
594
595/* Amount of Tx descriptors that can be reserved at once by CPU */
Stefan Chulskif0e970f2017-08-09 10:37:47 +0300596#define MVPP2_CPU_DESC_CHUNK 16
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100597
598/* Max number of Tx descriptors in each aggregated queue */
Stefan Chulskif0e970f2017-08-09 10:37:47 +0300599#define MVPP2_AGGR_TXQ_SIZE 16
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100600
601/* Descriptor aligned size */
602#define MVPP2_DESC_ALIGNED_SIZE 32
603
604/* Descriptor alignment mask */
605#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
606
607/* RX FIFO constants */
Stefan Roeseff572c62017-03-01 13:09:42 +0100608#define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000
609#define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80
610#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000
611#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000
612#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000
613#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200
614#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80
615#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40
616#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
617
618/* TX general registers */
619#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
620#define MVPP22_TX_FIFO_SIZE_MASK 0xf
621
622/* TX FIFO constants */
623#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
624#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100625
626/* RX buffer constants */
627#define MVPP2_SKB_SHINFO_SIZE \
628 0
629
630#define MVPP2_RX_PKT_SIZE(mtu) \
631 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
632 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
633
634#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
635#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
636#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
637 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
638
639#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
640
641/* IPv6 max L3 address size */
642#define MVPP2_MAX_L3_ADDR_SIZE 16
643
644/* Port flags */
645#define MVPP2_F_LOOPBACK BIT(0)
646
647/* Marvell tag types */
648enum mvpp2_tag_type {
649 MVPP2_TAG_TYPE_NONE = 0,
650 MVPP2_TAG_TYPE_MH = 1,
651 MVPP2_TAG_TYPE_DSA = 2,
652 MVPP2_TAG_TYPE_EDSA = 3,
653 MVPP2_TAG_TYPE_VLAN = 4,
654 MVPP2_TAG_TYPE_LAST = 5
655};
656
657/* Parser constants */
658#define MVPP2_PRS_TCAM_SRAM_SIZE 256
659#define MVPP2_PRS_TCAM_WORDS 6
660#define MVPP2_PRS_SRAM_WORDS 4
661#define MVPP2_PRS_FLOW_ID_SIZE 64
662#define MVPP2_PRS_FLOW_ID_MASK 0x3f
663#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
664#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
665#define MVPP2_PRS_IPV4_HEAD 0x40
666#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
667#define MVPP2_PRS_IPV4_MC 0xe0
668#define MVPP2_PRS_IPV4_MC_MASK 0xf0
669#define MVPP2_PRS_IPV4_BC_MASK 0xff
670#define MVPP2_PRS_IPV4_IHL 0x5
671#define MVPP2_PRS_IPV4_IHL_MASK 0xf
672#define MVPP2_PRS_IPV6_MC 0xff
673#define MVPP2_PRS_IPV6_MC_MASK 0xff
674#define MVPP2_PRS_IPV6_HOP_MASK 0xff
675#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
676#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
677#define MVPP2_PRS_DBL_VLANS_MAX 100
678
679/* Tcam structure:
680 * - lookup ID - 4 bits
681 * - port ID - 1 byte
682 * - additional information - 1 byte
683 * - header data - 8 bytes
684 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
685 */
686#define MVPP2_PRS_AI_BITS 8
687#define MVPP2_PRS_PORT_MASK 0xff
688#define MVPP2_PRS_LU_MASK 0xf
689#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
690 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
691#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
692 (((offs) * 2) - ((offs) % 2) + 2)
693#define MVPP2_PRS_TCAM_AI_BYTE 16
694#define MVPP2_PRS_TCAM_PORT_BYTE 17
695#define MVPP2_PRS_TCAM_LU_BYTE 20
696#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
697#define MVPP2_PRS_TCAM_INV_WORD 5
698/* Tcam entries ID */
699#define MVPP2_PE_DROP_ALL 0
700#define MVPP2_PE_FIRST_FREE_TID 1
701#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
702#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
703#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
704#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
705#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
706#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
707#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
708#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
709#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
710#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
711#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
712#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
713#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
714#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
715#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
716#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
717#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
718#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
719#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
720#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
721#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
722#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
723#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
724#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
725#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
726
727/* Sram structure
728 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
729 */
730#define MVPP2_PRS_SRAM_RI_OFFS 0
731#define MVPP2_PRS_SRAM_RI_WORD 0
732#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
733#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
734#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
735#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
736#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
737#define MVPP2_PRS_SRAM_UDF_OFFS 73
738#define MVPP2_PRS_SRAM_UDF_BITS 8
739#define MVPP2_PRS_SRAM_UDF_MASK 0xff
740#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
741#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
742#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
743#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
744#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
745#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
746#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
747#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
748#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
749#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
750#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
751#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
752#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
753#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
754#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
755#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
756#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
757#define MVPP2_PRS_SRAM_AI_OFFS 90
758#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
759#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
760#define MVPP2_PRS_SRAM_AI_MASK 0xff
761#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
762#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
763#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
764#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
765
766/* Sram result info bits assignment */
767#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
768#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100769#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
770#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100771#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
772#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
773#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
774#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
775#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100776#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
777#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100778#define MVPP2_PRS_RI_L2_MCAST BIT(9)
779#define MVPP2_PRS_RI_L2_BCAST BIT(10)
780#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100781#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
782#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100783#define MVPP2_PRS_RI_L3_IP4 BIT(12)
784#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
785#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
786#define MVPP2_PRS_RI_L3_IP6 BIT(14)
787#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
788#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100789#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
790#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100791#define MVPP2_PRS_RI_L3_MCAST BIT(15)
792#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
793#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
794#define MVPP2_PRS_RI_UDF3_MASK 0x300000
795#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
796#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
797#define MVPP2_PRS_RI_L4_TCP BIT(22)
798#define MVPP2_PRS_RI_L4_UDP BIT(23)
799#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
800#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
801#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
802#define MVPP2_PRS_RI_DROP_MASK 0x80000000
803
804/* Sram additional info bits assignment */
805#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
806#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
807#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
808#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
809#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
810#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
811#define MVPP2_PRS_SINGLE_VLAN_AI 0
812#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
813
814/* DSA/EDSA type */
815#define MVPP2_PRS_TAGGED true
816#define MVPP2_PRS_UNTAGGED false
817#define MVPP2_PRS_EDSA true
818#define MVPP2_PRS_DSA false
819
820/* MAC entries, shadow udf */
821enum mvpp2_prs_udf {
822 MVPP2_PRS_UDF_MAC_DEF,
823 MVPP2_PRS_UDF_MAC_RANGE,
824 MVPP2_PRS_UDF_L2_DEF,
825 MVPP2_PRS_UDF_L2_DEF_COPY,
826 MVPP2_PRS_UDF_L2_USER,
827};
828
829/* Lookup ID */
830enum mvpp2_prs_lookup {
831 MVPP2_PRS_LU_MH,
832 MVPP2_PRS_LU_MAC,
833 MVPP2_PRS_LU_DSA,
834 MVPP2_PRS_LU_VLAN,
835 MVPP2_PRS_LU_L2,
836 MVPP2_PRS_LU_PPPOE,
837 MVPP2_PRS_LU_IP4,
838 MVPP2_PRS_LU_IP6,
839 MVPP2_PRS_LU_FLOWS,
840 MVPP2_PRS_LU_LAST,
841};
842
843/* L3 cast enum */
844enum mvpp2_prs_l3_cast {
845 MVPP2_PRS_L3_UNI_CAST,
846 MVPP2_PRS_L3_MULTI_CAST,
847 MVPP2_PRS_L3_BROAD_CAST
848};
849
850/* Classifier constants */
851#define MVPP2_CLS_FLOWS_TBL_SIZE 512
852#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
853#define MVPP2_CLS_LKP_TBL_SIZE 64
854
855/* BM constants */
856#define MVPP2_BM_POOLS_NUM 1
857#define MVPP2_BM_LONG_BUF_NUM 16
858#define MVPP2_BM_SHORT_BUF_NUM 16
859#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
860#define MVPP2_BM_POOL_PTR_ALIGN 128
861#define MVPP2_BM_SWF_LONG_POOL(port) 0
862
863/* BM cookie (32 bits) definition */
864#define MVPP2_BM_COOKIE_POOL_OFFS 8
865#define MVPP2_BM_COOKIE_CPU_OFFS 24
866
867/* BM short pool packet size
868 * These value assure that for SWF the total number
869 * of bytes allocated for each buffer will be 512
870 */
871#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
872
873enum mvpp2_bm_type {
874 MVPP2_BM_FREE,
875 MVPP2_BM_SWF_LONG,
876 MVPP2_BM_SWF_SHORT
877};
878
879/* Definitions */
880
881/* Shared Packet Processor resources */
882struct mvpp2 {
883 /* Shared registers' base addresses */
884 void __iomem *base;
885 void __iomem *lms_base;
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100886 void __iomem *iface_base;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100887
Stefan Roese31aa1e32017-03-22 15:07:30 +0100888 void __iomem *mpcs_base;
889 void __iomem *xpcs_base;
890 void __iomem *rfu1_base;
891
892 u32 netc_config;
893
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100894 /* List of pointers to port structures */
895 struct mvpp2_port **port_list;
896
897 /* Aggregated TXQs */
898 struct mvpp2_tx_queue *aggr_txqs;
899
900 /* BM pools */
901 struct mvpp2_bm_pool *bm_pools;
902
903 /* PRS shadow table */
904 struct mvpp2_prs_shadow *prs_shadow;
905 /* PRS auxiliary table for double vlan entries control */
906 bool *prs_double_vlans;
907
908 /* Tclk value */
909 u32 tclk;
910
Thomas Petazzoni16a98982017-02-15 14:08:59 +0100911 /* HW version */
912 enum { MVPP21, MVPP22 } hw_version;
913
Thomas Petazzoni09b3f942017-02-16 09:03:16 +0100914 /* Maximum number of RXQs per port */
915 unsigned int max_port_rxqs;
916
Stefan Roese1fabbd02017-02-16 15:26:06 +0100917 int probe_done;
Stefan Chulskibb915c82017-08-09 10:37:46 +0300918 u8 num_ports;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100919};
920
921struct mvpp2_pcpu_stats {
922 u64 rx_packets;
923 u64 rx_bytes;
924 u64 tx_packets;
925 u64 tx_bytes;
926};
927
928struct mvpp2_port {
929 u8 id;
930
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100931 /* Index of the port from the "group of ports" complex point
932 * of view
933 */
934 int gop_id;
935
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100936 int irq;
937
938 struct mvpp2 *priv;
939
940 /* Per-port registers' base address */
941 void __iomem *base;
942
943 struct mvpp2_rx_queue **rxqs;
944 struct mvpp2_tx_queue **txqs;
945
946 int pkt_size;
947
948 u32 pending_cause_rx;
949
950 /* Per-CPU port control */
951 struct mvpp2_port_pcpu __percpu *pcpu;
952
953 /* Flags */
954 unsigned long flags;
955
956 u16 tx_ring_size;
957 u16 rx_ring_size;
958 struct mvpp2_pcpu_stats __percpu *stats;
959
960 struct phy_device *phy_dev;
961 phy_interface_t phy_interface;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100962 int phyaddr;
Nevo Hed2a428702019-08-15 18:08:44 -0400963 struct udevice *mdio_dev;
Simon Glassbcee8d62019-12-06 21:41:35 -0700964 struct mii_dev *bus;
965#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +0300966 struct gpio_desc phy_reset_gpio;
967 struct gpio_desc phy_tx_disable_gpio;
968#endif
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100969 int init;
970 unsigned int link;
971 unsigned int duplex;
972 unsigned int speed;
973
Stefan Roese9acb7da2017-03-22 14:15:40 +0100974 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */
975
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100976 struct mvpp2_bm_pool *pool_long;
977 struct mvpp2_bm_pool *pool_short;
978
979 /* Index of first port's physical RXQ */
980 u8 first_rxq;
981
982 u8 dev_addr[ETH_ALEN];
983};
984
985/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
986 * layout of the transmit and reception DMA descriptors, and their
987 * layout is therefore defined by the hardware design
988 */
989
990#define MVPP2_TXD_L3_OFF_SHIFT 0
991#define MVPP2_TXD_IP_HLEN_SHIFT 8
992#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
993#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
994#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
995#define MVPP2_TXD_PADDING_DISABLE BIT(23)
996#define MVPP2_TXD_L4_UDP BIT(24)
997#define MVPP2_TXD_L3_IP6 BIT(26)
998#define MVPP2_TXD_L_DESC BIT(28)
999#define MVPP2_TXD_F_DESC BIT(29)
1000
1001#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1002#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1003#define MVPP2_RXD_ERR_CRC 0x0
1004#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1005#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1006#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1007#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1008#define MVPP2_RXD_HWF_SYNC BIT(21)
1009#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1010#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1011#define MVPP2_RXD_L4_TCP BIT(25)
1012#define MVPP2_RXD_L4_UDP BIT(26)
1013#define MVPP2_RXD_L3_IP4 BIT(28)
1014#define MVPP2_RXD_L3_IP6 BIT(30)
1015#define MVPP2_RXD_BUF_HDR BIT(31)
1016
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001017/* HW TX descriptor for PPv2.1 */
1018struct mvpp21_tx_desc {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001019 u32 command; /* Options used by HW for packet transmitting.*/
1020 u8 packet_offset; /* the offset from the buffer beginning */
1021 u8 phys_txq; /* destination queue ID */
1022 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001023 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001024 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1025 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1026 u32 reserved2; /* reserved (for future use) */
1027};
1028
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001029/* HW RX descriptor for PPv2.1 */
1030struct mvpp21_rx_desc {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001031 u32 status; /* info about received packet */
1032 u16 reserved1; /* parser_info (for future use, PnC) */
1033 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001034 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001035 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1036 u16 reserved2; /* gem_port_id (for future use, PON) */
1037 u16 reserved3; /* csum_l4 (for future use, PnC) */
1038 u8 reserved4; /* bm_qset (for future use, BM) */
1039 u8 reserved5;
1040 u16 reserved6; /* classify_info (for future use, PnC) */
1041 u32 reserved7; /* flow_id (for future use, PnC) */
1042 u32 reserved8;
1043};
1044
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001045/* HW TX descriptor for PPv2.2 */
1046struct mvpp22_tx_desc {
1047 u32 command;
1048 u8 packet_offset;
1049 u8 phys_txq;
1050 u16 data_size;
1051 u64 reserved1;
1052 u64 buf_dma_addr_ptp;
1053 u64 buf_cookie_misc;
1054};
1055
1056/* HW RX descriptor for PPv2.2 */
1057struct mvpp22_rx_desc {
1058 u32 status;
1059 u16 reserved1;
1060 u16 data_size;
1061 u32 reserved2;
1062 u32 reserved3;
1063 u64 buf_dma_addr_key_hash;
1064 u64 buf_cookie_misc;
1065};
1066
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001067/* Opaque type used by the driver to manipulate the HW TX and RX
1068 * descriptors
1069 */
1070struct mvpp2_tx_desc {
1071 union {
1072 struct mvpp21_tx_desc pp21;
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001073 struct mvpp22_tx_desc pp22;
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001074 };
1075};
1076
1077struct mvpp2_rx_desc {
1078 union {
1079 struct mvpp21_rx_desc pp21;
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001080 struct mvpp22_rx_desc pp22;
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001081 };
1082};
1083
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001084/* Per-CPU Tx queue control */
1085struct mvpp2_txq_pcpu {
1086 int cpu;
1087
1088 /* Number of Tx DMA descriptors in the descriptor ring */
1089 int size;
1090
1091 /* Number of currently used Tx DMA descriptor in the
1092 * descriptor ring
1093 */
1094 int count;
1095
1096 /* Number of Tx DMA descriptors reserved for each CPU */
1097 int reserved_num;
1098
1099 /* Index of last TX DMA descriptor that was inserted */
1100 int txq_put_index;
1101
1102 /* Index of the TX DMA descriptor to be cleaned up */
1103 int txq_get_index;
1104};
1105
1106struct mvpp2_tx_queue {
1107 /* Physical number of this Tx queue */
1108 u8 id;
1109
1110 /* Logical number of this Tx queue */
1111 u8 log_id;
1112
1113 /* Number of Tx DMA descriptors in the descriptor ring */
1114 int size;
1115
1116 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1117 int count;
1118
1119 /* Per-CPU control of physical Tx queues */
1120 struct mvpp2_txq_pcpu __percpu *pcpu;
1121
1122 u32 done_pkts_coal;
1123
1124 /* Virtual address of thex Tx DMA descriptors array */
1125 struct mvpp2_tx_desc *descs;
1126
1127 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001128 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001129
1130 /* Index of the last Tx DMA descriptor */
1131 int last_desc;
1132
1133 /* Index of the next Tx DMA descriptor to process */
1134 int next_desc_to_proc;
1135};
1136
1137struct mvpp2_rx_queue {
1138 /* RX queue number, in the range 0-31 for physical RXQs */
1139 u8 id;
1140
1141 /* Num of rx descriptors in the rx descriptor ring */
1142 int size;
1143
1144 u32 pkts_coal;
1145 u32 time_coal;
1146
1147 /* Virtual address of the RX DMA descriptors array */
1148 struct mvpp2_rx_desc *descs;
1149
1150 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001151 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001152
1153 /* Index of the last RX DMA descriptor */
1154 int last_desc;
1155
1156 /* Index of the next RX DMA descriptor to process */
1157 int next_desc_to_proc;
1158
1159 /* ID of port to which physical RXQ is mapped */
1160 int port;
1161
1162 /* Port's logic RXQ number to which physical RXQ is mapped */
1163 int logic_rxq;
1164};
1165
1166union mvpp2_prs_tcam_entry {
1167 u32 word[MVPP2_PRS_TCAM_WORDS];
1168 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1169};
1170
1171union mvpp2_prs_sram_entry {
1172 u32 word[MVPP2_PRS_SRAM_WORDS];
1173 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1174};
1175
1176struct mvpp2_prs_entry {
1177 u32 index;
1178 union mvpp2_prs_tcam_entry tcam;
1179 union mvpp2_prs_sram_entry sram;
1180};
1181
1182struct mvpp2_prs_shadow {
1183 bool valid;
1184 bool finish;
1185
1186 /* Lookup ID */
1187 int lu;
1188
1189 /* User defined offset */
1190 int udf;
1191
1192 /* Result info */
1193 u32 ri;
1194 u32 ri_mask;
1195};
1196
1197struct mvpp2_cls_flow_entry {
1198 u32 index;
1199 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1200};
1201
1202struct mvpp2_cls_lookup_entry {
1203 u32 lkpid;
1204 u32 way;
1205 u32 data;
1206};
1207
1208struct mvpp2_bm_pool {
1209 /* Pool number in the range 0-7 */
1210 int id;
1211 enum mvpp2_bm_type type;
1212
1213 /* Buffer Pointers Pool External (BPPE) size */
1214 int size;
1215 /* Number of buffers for this pool */
1216 int buf_num;
1217 /* Pool buffer size */
1218 int buf_size;
1219 /* Packet size */
1220 int pkt_size;
1221
1222 /* BPPE virtual base address */
Stefan Roesea7c28ff2017-02-15 12:46:18 +01001223 unsigned long *virt_addr;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001224 /* BPPE DMA base address */
1225 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001226
1227 /* Ports using BM pool */
1228 u32 port_map;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001229};
1230
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001231/* Static declaractions */
1232
1233/* Number of RXQs used by single port */
1234static int rxq_number = MVPP2_DEFAULT_RXQ;
1235/* Number of TXQs used by single port */
1236static int txq_number = MVPP2_DEFAULT_TXQ;
1237
Stefan Roesec9607c92017-02-24 10:12:41 +01001238static int base_id;
1239
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001240#define MVPP2_DRIVER_NAME "mvpp2"
1241#define MVPP2_DRIVER_VERSION "1.0"
1242
1243/*
1244 * U-Boot internal data, mostly uncached buffers for descriptors and data
1245 */
1246struct buffer_location {
1247 struct mvpp2_tx_desc *aggr_tx_descs;
1248 struct mvpp2_tx_desc *tx_descs;
1249 struct mvpp2_rx_desc *rx_descs;
Stefan Roesea7c28ff2017-02-15 12:46:18 +01001250 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1251 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001252 int first_rxq;
1253};
1254
1255/*
1256 * All 4 interfaces use the same global buffer, since only one interface
1257 * can be enabled at once
1258 */
1259static struct buffer_location buffer_loc;
1260
1261/*
1262 * Page table entries are set to 1MB, or multiples of 1MB
1263 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1264 */
1265#define BD_SPACE (1 << 20)
1266
1267/* Utility/helper methods */
1268
1269static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1270{
1271 writel(data, priv->base + offset);
1272}
1273
1274static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1275{
1276 return readl(priv->base + offset);
1277}
1278
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001279static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1280 struct mvpp2_tx_desc *tx_desc,
1281 dma_addr_t dma_addr)
1282{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001283 if (port->priv->hw_version == MVPP21) {
1284 tx_desc->pp21.buf_dma_addr = dma_addr;
1285 } else {
1286 u64 val = (u64)dma_addr;
1287
1288 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1289 tx_desc->pp22.buf_dma_addr_ptp |= val;
1290 }
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001291}
1292
1293static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1294 struct mvpp2_tx_desc *tx_desc,
1295 size_t size)
1296{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001297 if (port->priv->hw_version == MVPP21)
1298 tx_desc->pp21.data_size = size;
1299 else
1300 tx_desc->pp22.data_size = size;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001301}
1302
1303static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1304 struct mvpp2_tx_desc *tx_desc,
1305 unsigned int txq)
1306{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001307 if (port->priv->hw_version == MVPP21)
1308 tx_desc->pp21.phys_txq = txq;
1309 else
1310 tx_desc->pp22.phys_txq = txq;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001311}
1312
1313static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1314 struct mvpp2_tx_desc *tx_desc,
1315 unsigned int command)
1316{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001317 if (port->priv->hw_version == MVPP21)
1318 tx_desc->pp21.command = command;
1319 else
1320 tx_desc->pp22.command = command;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001321}
1322
1323static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1324 struct mvpp2_tx_desc *tx_desc,
1325 unsigned int offset)
1326{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001327 if (port->priv->hw_version == MVPP21)
1328 tx_desc->pp21.packet_offset = offset;
1329 else
1330 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001331}
1332
1333static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1334 struct mvpp2_rx_desc *rx_desc)
1335{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001336 if (port->priv->hw_version == MVPP21)
1337 return rx_desc->pp21.buf_dma_addr;
1338 else
1339 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001340}
1341
1342static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1343 struct mvpp2_rx_desc *rx_desc)
1344{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001345 if (port->priv->hw_version == MVPP21)
1346 return rx_desc->pp21.buf_cookie;
1347 else
1348 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001349}
1350
1351static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1352 struct mvpp2_rx_desc *rx_desc)
1353{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001354 if (port->priv->hw_version == MVPP21)
1355 return rx_desc->pp21.data_size;
1356 else
1357 return rx_desc->pp22.data_size;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001358}
1359
1360static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1361 struct mvpp2_rx_desc *rx_desc)
1362{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001363 if (port->priv->hw_version == MVPP21)
1364 return rx_desc->pp21.status;
1365 else
1366 return rx_desc->pp22.status;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001367}
1368
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001369static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1370{
1371 txq_pcpu->txq_get_index++;
1372 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1373 txq_pcpu->txq_get_index = 0;
1374}
1375
1376/* Get number of physical egress port */
1377static inline int mvpp2_egress_port(struct mvpp2_port *port)
1378{
1379 return MVPP2_MAX_TCONT + port->id;
1380}
1381
1382/* Get number of physical TXQ */
1383static inline int mvpp2_txq_phys(int port, int txq)
1384{
1385 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1386}
1387
1388/* Parser configuration routines */
1389
1390/* Update parser tcam and sram hw entries */
1391static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1392{
1393 int i;
1394
1395 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1396 return -EINVAL;
1397
1398 /* Clear entry invalidation bit */
1399 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1400
1401 /* Write tcam index - indirect access */
1402 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1403 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1404 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1405
1406 /* Write sram index - indirect access */
1407 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1408 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1409 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1410
1411 return 0;
1412}
1413
1414/* Read tcam entry from hw */
1415static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1416{
1417 int i;
1418
1419 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1420 return -EINVAL;
1421
1422 /* Write tcam index - indirect access */
1423 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1424
1425 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1426 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1427 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1428 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1429
1430 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1431 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1432
1433 /* Write sram index - indirect access */
1434 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1435 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1436 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1437
1438 return 0;
1439}
1440
1441/* Invalidate tcam hw entry */
1442static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1443{
1444 /* Write index - indirect access */
1445 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1446 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1447 MVPP2_PRS_TCAM_INV_MASK);
1448}
1449
1450/* Enable shadow table entry and set its lookup ID */
1451static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1452{
1453 priv->prs_shadow[index].valid = true;
1454 priv->prs_shadow[index].lu = lu;
1455}
1456
1457/* Update ri fields in shadow table entry */
1458static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1459 unsigned int ri, unsigned int ri_mask)
1460{
1461 priv->prs_shadow[index].ri_mask = ri_mask;
1462 priv->prs_shadow[index].ri = ri;
1463}
1464
1465/* Update lookup field in tcam sw entry */
1466static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1467{
1468 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1469
1470 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1471 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1472}
1473
1474/* Update mask for single port in tcam sw entry */
1475static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1476 unsigned int port, bool add)
1477{
1478 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1479
1480 if (add)
1481 pe->tcam.byte[enable_off] &= ~(1 << port);
1482 else
1483 pe->tcam.byte[enable_off] |= 1 << port;
1484}
1485
1486/* Update port map in tcam sw entry */
1487static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1488 unsigned int ports)
1489{
1490 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1491 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1492
1493 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1494 pe->tcam.byte[enable_off] &= ~port_mask;
1495 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1496}
1497
1498/* Obtain port map from tcam sw entry */
1499static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1500{
1501 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1502
1503 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1504}
1505
1506/* Set byte of data and its enable bits in tcam sw entry */
1507static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1508 unsigned int offs, unsigned char byte,
1509 unsigned char enable)
1510{
1511 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1512 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1513}
1514
1515/* Get byte of data and its enable bits from tcam sw entry */
1516static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1517 unsigned int offs, unsigned char *byte,
1518 unsigned char *enable)
1519{
1520 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1521 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1522}
1523
1524/* Set ethertype in tcam sw entry */
1525static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1526 unsigned short ethertype)
1527{
1528 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1529 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1530}
1531
1532/* Set bits in sram sw entry */
1533static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1534 int val)
1535{
1536 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1537}
1538
1539/* Clear bits in sram sw entry */
1540static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1541 int val)
1542{
1543 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1544}
1545
1546/* Update ri bits in sram sw entry */
1547static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1548 unsigned int bits, unsigned int mask)
1549{
1550 unsigned int i;
1551
1552 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1553 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1554
1555 if (!(mask & BIT(i)))
1556 continue;
1557
1558 if (bits & BIT(i))
1559 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1560 else
1561 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1562
1563 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1564 }
1565}
1566
1567/* Update ai bits in sram sw entry */
1568static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1569 unsigned int bits, unsigned int mask)
1570{
1571 unsigned int i;
1572 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1573
1574 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1575
1576 if (!(mask & BIT(i)))
1577 continue;
1578
1579 if (bits & BIT(i))
1580 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1581 else
1582 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1583
1584 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1585 }
1586}
1587
1588/* Read ai bits from sram sw entry */
1589static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1590{
1591 u8 bits;
1592 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1593 int ai_en_off = ai_off + 1;
1594 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1595
1596 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1597 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1598
1599 return bits;
1600}
1601
1602/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1603 * lookup interation
1604 */
1605static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1606 unsigned int lu)
1607{
1608 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1609
1610 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1611 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1612 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1613}
1614
1615/* In the sram sw entry set sign and value of the next lookup offset
1616 * and the offset value generated to the classifier
1617 */
1618static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1619 unsigned int op)
1620{
1621 /* Set sign */
1622 if (shift < 0) {
1623 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1624 shift = 0 - shift;
1625 } else {
1626 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1627 }
1628
1629 /* Set value */
1630 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1631 (unsigned char)shift;
1632
1633 /* Reset and set operation */
1634 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1635 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1636 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1637
1638 /* Set base offset as current */
1639 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1640}
1641
1642/* In the sram sw entry set sign and value of the user defined offset
1643 * generated to the classifier
1644 */
1645static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1646 unsigned int type, int offset,
1647 unsigned int op)
1648{
1649 /* Set sign */
1650 if (offset < 0) {
1651 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1652 offset = 0 - offset;
1653 } else {
1654 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1655 }
1656
1657 /* Set value */
1658 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1659 MVPP2_PRS_SRAM_UDF_MASK);
1660 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1661 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1662 MVPP2_PRS_SRAM_UDF_BITS)] &=
1663 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1664 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1665 MVPP2_PRS_SRAM_UDF_BITS)] |=
1666 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1667
1668 /* Set offset type */
1669 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1670 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1671 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1672
1673 /* Set offset operation */
1674 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1675 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1676 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1677
1678 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1679 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1680 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1681 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1682
1683 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1684 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1685 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1686
1687 /* Set base offset as current */
1688 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1689}
1690
1691/* Find parser flow entry */
1692static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1693{
1694 struct mvpp2_prs_entry *pe;
1695 int tid;
1696
1697 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1698 if (!pe)
1699 return NULL;
1700 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1701
1702 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1703 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1704 u8 bits;
1705
1706 if (!priv->prs_shadow[tid].valid ||
1707 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1708 continue;
1709
1710 pe->index = tid;
1711 mvpp2_prs_hw_read(priv, pe);
1712 bits = mvpp2_prs_sram_ai_get(pe);
1713
1714 /* Sram store classification lookup ID in AI bits [5:0] */
1715 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1716 return pe;
1717 }
1718 kfree(pe);
1719
1720 return NULL;
1721}
1722
1723/* Return first free tcam index, seeking from start to end */
1724static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1725 unsigned char end)
1726{
1727 int tid;
1728
1729 if (start > end)
1730 swap(start, end);
1731
1732 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1733 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1734
1735 for (tid = start; tid <= end; tid++) {
1736 if (!priv->prs_shadow[tid].valid)
1737 return tid;
1738 }
1739
1740 return -EINVAL;
1741}
1742
1743/* Enable/disable dropping all mac da's */
1744static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1745{
1746 struct mvpp2_prs_entry pe;
1747
1748 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1749 /* Entry exist - update port only */
1750 pe.index = MVPP2_PE_DROP_ALL;
1751 mvpp2_prs_hw_read(priv, &pe);
1752 } else {
1753 /* Entry doesn't exist - create new */
1754 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1755 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1756 pe.index = MVPP2_PE_DROP_ALL;
1757
1758 /* Non-promiscuous mode for all ports - DROP unknown packets */
1759 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1760 MVPP2_PRS_RI_DROP_MASK);
1761
1762 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1763 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1764
1765 /* Update shadow table */
1766 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1767
1768 /* Mask all ports */
1769 mvpp2_prs_tcam_port_map_set(&pe, 0);
1770 }
1771
1772 /* Update port mask */
1773 mvpp2_prs_tcam_port_set(&pe, port, add);
1774
1775 mvpp2_prs_hw_write(priv, &pe);
1776}
1777
1778/* Set port to promiscuous mode */
1779static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1780{
1781 struct mvpp2_prs_entry pe;
1782
1783 /* Promiscuous mode - Accept unknown packets */
1784
1785 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1786 /* Entry exist - update port only */
1787 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1788 mvpp2_prs_hw_read(priv, &pe);
1789 } else {
1790 /* Entry doesn't exist - create new */
1791 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1792 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1793 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1794
1795 /* Continue - set next lookup */
1796 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1797
1798 /* Set result info bits */
1799 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1800 MVPP2_PRS_RI_L2_CAST_MASK);
1801
1802 /* Shift to ethertype */
1803 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1804 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1805
1806 /* Mask all ports */
1807 mvpp2_prs_tcam_port_map_set(&pe, 0);
1808
1809 /* Update shadow table */
1810 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1811 }
1812
1813 /* Update port mask */
1814 mvpp2_prs_tcam_port_set(&pe, port, add);
1815
1816 mvpp2_prs_hw_write(priv, &pe);
1817}
1818
1819/* Accept multicast */
1820static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1821 bool add)
1822{
1823 struct mvpp2_prs_entry pe;
1824 unsigned char da_mc;
1825
1826 /* Ethernet multicast address first byte is
1827 * 0x01 for IPv4 and 0x33 for IPv6
1828 */
1829 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1830
1831 if (priv->prs_shadow[index].valid) {
1832 /* Entry exist - update port only */
1833 pe.index = index;
1834 mvpp2_prs_hw_read(priv, &pe);
1835 } else {
1836 /* Entry doesn't exist - create new */
1837 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1838 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1839 pe.index = index;
1840
1841 /* Continue - set next lookup */
1842 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1843
1844 /* Set result info bits */
1845 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1846 MVPP2_PRS_RI_L2_CAST_MASK);
1847
1848 /* Update tcam entry data first byte */
1849 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1850
1851 /* Shift to ethertype */
1852 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1853 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1854
1855 /* Mask all ports */
1856 mvpp2_prs_tcam_port_map_set(&pe, 0);
1857
1858 /* Update shadow table */
1859 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1860 }
1861
1862 /* Update port mask */
1863 mvpp2_prs_tcam_port_set(&pe, port, add);
1864
1865 mvpp2_prs_hw_write(priv, &pe);
1866}
1867
1868/* Parser per-port initialization */
1869static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1870 int lu_max, int offset)
1871{
1872 u32 val;
1873
1874 /* Set lookup ID */
1875 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1876 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1877 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1878 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1879
1880 /* Set maximum number of loops for packet received from port */
1881 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1882 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1883 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1884 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1885
1886 /* Set initial offset for packet header extraction for the first
1887 * searching loop
1888 */
1889 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1890 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1891 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1892 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1893}
1894
1895/* Default flow entries initialization for all ports */
1896static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1897{
1898 struct mvpp2_prs_entry pe;
1899 int port;
1900
1901 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1902 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1903 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1904 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1905
1906 /* Mask all ports */
1907 mvpp2_prs_tcam_port_map_set(&pe, 0);
1908
1909 /* Set flow ID*/
1910 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1911 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1912
1913 /* Update shadow table and hw entry */
1914 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1915 mvpp2_prs_hw_write(priv, &pe);
1916 }
1917}
1918
1919/* Set default entry for Marvell Header field */
1920static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1921{
1922 struct mvpp2_prs_entry pe;
1923
1924 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1925
1926 pe.index = MVPP2_PE_MH_DEFAULT;
1927 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1928 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1929 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1930 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1931
1932 /* Unmask all ports */
1933 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1934
1935 /* Update shadow table and hw entry */
1936 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1937 mvpp2_prs_hw_write(priv, &pe);
1938}
1939
1940/* Set default entires (place holder) for promiscuous, non-promiscuous and
1941 * multicast MAC addresses
1942 */
1943static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1944{
1945 struct mvpp2_prs_entry pe;
1946
1947 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1948
1949 /* Non-promiscuous mode for all ports - DROP unknown packets */
1950 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1951 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1952
1953 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1954 MVPP2_PRS_RI_DROP_MASK);
1955 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1956 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1957
1958 /* Unmask all ports */
1959 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1960
1961 /* Update shadow table and hw entry */
1962 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1963 mvpp2_prs_hw_write(priv, &pe);
1964
1965 /* place holders only - no ports */
1966 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1967 mvpp2_prs_mac_promisc_set(priv, 0, false);
1968 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1969 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1970}
1971
1972/* Match basic ethertypes */
1973static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1974{
1975 struct mvpp2_prs_entry pe;
1976 int tid;
1977
1978 /* Ethertype: PPPoE */
1979 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1980 MVPP2_PE_LAST_FREE_TID);
1981 if (tid < 0)
1982 return tid;
1983
1984 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1985 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1986 pe.index = tid;
1987
1988 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1989
1990 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1991 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1992 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1993 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1994 MVPP2_PRS_RI_PPPOE_MASK);
1995
1996 /* Update shadow table and hw entry */
1997 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1998 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1999 priv->prs_shadow[pe.index].finish = false;
2000 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2001 MVPP2_PRS_RI_PPPOE_MASK);
2002 mvpp2_prs_hw_write(priv, &pe);
2003
2004 /* Ethertype: ARP */
2005 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2006 MVPP2_PE_LAST_FREE_TID);
2007 if (tid < 0)
2008 return tid;
2009
2010 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2011 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2012 pe.index = tid;
2013
2014 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2015
2016 /* Generate flow in the next iteration*/
2017 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2018 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2019 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2020 MVPP2_PRS_RI_L3_PROTO_MASK);
2021 /* Set L3 offset */
2022 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2023 MVPP2_ETH_TYPE_LEN,
2024 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2025
2026 /* Update shadow table and hw entry */
2027 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2028 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2029 priv->prs_shadow[pe.index].finish = true;
2030 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2031 MVPP2_PRS_RI_L3_PROTO_MASK);
2032 mvpp2_prs_hw_write(priv, &pe);
2033
2034 /* Ethertype: LBTD */
2035 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2036 MVPP2_PE_LAST_FREE_TID);
2037 if (tid < 0)
2038 return tid;
2039
2040 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2041 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2042 pe.index = tid;
2043
2044 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2045
2046 /* Generate flow in the next iteration*/
2047 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2048 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2049 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2050 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2051 MVPP2_PRS_RI_CPU_CODE_MASK |
2052 MVPP2_PRS_RI_UDF3_MASK);
2053 /* Set L3 offset */
2054 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2055 MVPP2_ETH_TYPE_LEN,
2056 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2057
2058 /* Update shadow table and hw entry */
2059 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2060 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2061 priv->prs_shadow[pe.index].finish = true;
2062 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2063 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2064 MVPP2_PRS_RI_CPU_CODE_MASK |
2065 MVPP2_PRS_RI_UDF3_MASK);
2066 mvpp2_prs_hw_write(priv, &pe);
2067
2068 /* Ethertype: IPv4 without options */
2069 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2070 MVPP2_PE_LAST_FREE_TID);
2071 if (tid < 0)
2072 return tid;
2073
2074 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2075 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2076 pe.index = tid;
2077
2078 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2079 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2080 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2081 MVPP2_PRS_IPV4_HEAD_MASK |
2082 MVPP2_PRS_IPV4_IHL_MASK);
2083
2084 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2085 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2086 MVPP2_PRS_RI_L3_PROTO_MASK);
2087 /* Skip eth_type + 4 bytes of IP header */
2088 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2089 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2090 /* Set L3 offset */
2091 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2092 MVPP2_ETH_TYPE_LEN,
2093 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2094
2095 /* Update shadow table and hw entry */
2096 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2097 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2098 priv->prs_shadow[pe.index].finish = false;
2099 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2100 MVPP2_PRS_RI_L3_PROTO_MASK);
2101 mvpp2_prs_hw_write(priv, &pe);
2102
2103 /* Ethertype: IPv4 with options */
2104 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2105 MVPP2_PE_LAST_FREE_TID);
2106 if (tid < 0)
2107 return tid;
2108
2109 pe.index = tid;
2110
2111 /* Clear tcam data before updating */
2112 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2113 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2114
2115 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2116 MVPP2_PRS_IPV4_HEAD,
2117 MVPP2_PRS_IPV4_HEAD_MASK);
2118
2119 /* Clear ri before updating */
2120 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2121 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2122 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2123 MVPP2_PRS_RI_L3_PROTO_MASK);
2124
2125 /* Update shadow table and hw entry */
2126 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2127 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2128 priv->prs_shadow[pe.index].finish = false;
2129 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2130 MVPP2_PRS_RI_L3_PROTO_MASK);
2131 mvpp2_prs_hw_write(priv, &pe);
2132
2133 /* Ethertype: IPv6 without options */
2134 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2135 MVPP2_PE_LAST_FREE_TID);
2136 if (tid < 0)
2137 return tid;
2138
2139 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2140 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2141 pe.index = tid;
2142
2143 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2144
2145 /* Skip DIP of IPV6 header */
2146 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2147 MVPP2_MAX_L3_ADDR_SIZE,
2148 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2149 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2150 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2151 MVPP2_PRS_RI_L3_PROTO_MASK);
2152 /* Set L3 offset */
2153 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2154 MVPP2_ETH_TYPE_LEN,
2155 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2156
2157 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2158 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2159 priv->prs_shadow[pe.index].finish = false;
2160 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2161 MVPP2_PRS_RI_L3_PROTO_MASK);
2162 mvpp2_prs_hw_write(priv, &pe);
2163
2164 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2165 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2166 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2167 pe.index = MVPP2_PE_ETH_TYPE_UN;
2168
2169 /* Unmask all ports */
2170 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2171
2172 /* Generate flow in the next iteration*/
2173 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2174 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2175 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2176 MVPP2_PRS_RI_L3_PROTO_MASK);
2177 /* Set L3 offset even it's unknown L3 */
2178 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2179 MVPP2_ETH_TYPE_LEN,
2180 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2181
2182 /* Update shadow table and hw entry */
2183 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2184 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2185 priv->prs_shadow[pe.index].finish = true;
2186 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2187 MVPP2_PRS_RI_L3_PROTO_MASK);
2188 mvpp2_prs_hw_write(priv, &pe);
2189
2190 return 0;
2191}
2192
2193/* Parser default initialization */
2194static int mvpp2_prs_default_init(struct udevice *dev,
2195 struct mvpp2 *priv)
2196{
2197 int err, index, i;
2198
2199 /* Enable tcam table */
2200 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2201
2202 /* Clear all tcam and sram entries */
2203 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2204 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2205 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2206 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2207
2208 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2209 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2210 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2211 }
2212
2213 /* Invalidate all tcam entries */
2214 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2215 mvpp2_prs_hw_inv(priv, index);
2216
2217 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2218 sizeof(struct mvpp2_prs_shadow),
2219 GFP_KERNEL);
2220 if (!priv->prs_shadow)
2221 return -ENOMEM;
2222
2223 /* Always start from lookup = 0 */
2224 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2225 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2226 MVPP2_PRS_PORT_LU_MAX, 0);
2227
2228 mvpp2_prs_def_flow_init(priv);
2229
2230 mvpp2_prs_mh_init(priv);
2231
2232 mvpp2_prs_mac_init(priv);
2233
2234 err = mvpp2_prs_etype_init(priv);
2235 if (err)
2236 return err;
2237
2238 return 0;
2239}
2240
2241/* Compare MAC DA with tcam entry data */
2242static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2243 const u8 *da, unsigned char *mask)
2244{
2245 unsigned char tcam_byte, tcam_mask;
2246 int index;
2247
2248 for (index = 0; index < ETH_ALEN; index++) {
2249 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2250 if (tcam_mask != mask[index])
2251 return false;
2252
2253 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2254 return false;
2255 }
2256
2257 return true;
2258}
2259
2260/* Find tcam entry with matched pair <MAC DA, port> */
2261static struct mvpp2_prs_entry *
2262mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2263 unsigned char *mask, int udf_type)
2264{
2265 struct mvpp2_prs_entry *pe;
2266 int tid;
2267
2268 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2269 if (!pe)
2270 return NULL;
2271 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2272
2273 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2274 for (tid = MVPP2_PE_FIRST_FREE_TID;
2275 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2276 unsigned int entry_pmap;
2277
2278 if (!priv->prs_shadow[tid].valid ||
2279 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2280 (priv->prs_shadow[tid].udf != udf_type))
2281 continue;
2282
2283 pe->index = tid;
2284 mvpp2_prs_hw_read(priv, pe);
2285 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2286
2287 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2288 entry_pmap == pmap)
2289 return pe;
2290 }
2291 kfree(pe);
2292
2293 return NULL;
2294}
2295
2296/* Update parser's mac da entry */
2297static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2298 const u8 *da, bool add)
2299{
2300 struct mvpp2_prs_entry *pe;
2301 unsigned int pmap, len, ri;
2302 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2303 int tid;
2304
2305 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2306 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2307 MVPP2_PRS_UDF_MAC_DEF);
2308
2309 /* No such entry */
2310 if (!pe) {
2311 if (!add)
2312 return 0;
2313
2314 /* Create new TCAM entry */
2315 /* Find first range mac entry*/
2316 for (tid = MVPP2_PE_FIRST_FREE_TID;
2317 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2318 if (priv->prs_shadow[tid].valid &&
2319 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2320 (priv->prs_shadow[tid].udf ==
2321 MVPP2_PRS_UDF_MAC_RANGE))
2322 break;
2323
2324 /* Go through the all entries from first to last */
2325 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2326 tid - 1);
2327 if (tid < 0)
2328 return tid;
2329
2330 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2331 if (!pe)
2332 return -1;
2333 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2334 pe->index = tid;
2335
2336 /* Mask all ports */
2337 mvpp2_prs_tcam_port_map_set(pe, 0);
2338 }
2339
2340 /* Update port mask */
2341 mvpp2_prs_tcam_port_set(pe, port, add);
2342
2343 /* Invalidate the entry if no ports are left enabled */
2344 pmap = mvpp2_prs_tcam_port_map_get(pe);
2345 if (pmap == 0) {
2346 if (add) {
2347 kfree(pe);
2348 return -1;
2349 }
2350 mvpp2_prs_hw_inv(priv, pe->index);
2351 priv->prs_shadow[pe->index].valid = false;
2352 kfree(pe);
2353 return 0;
2354 }
2355
2356 /* Continue - set next lookup */
2357 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2358
2359 /* Set match on DA */
2360 len = ETH_ALEN;
2361 while (len--)
2362 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2363
2364 /* Set result info bits */
2365 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2366
2367 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2368 MVPP2_PRS_RI_MAC_ME_MASK);
2369 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2370 MVPP2_PRS_RI_MAC_ME_MASK);
2371
2372 /* Shift to ethertype */
2373 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2374 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2375
2376 /* Update shadow table and hw entry */
2377 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2378 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2379 mvpp2_prs_hw_write(priv, pe);
2380
2381 kfree(pe);
2382
2383 return 0;
2384}
2385
2386static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2387{
2388 int err;
2389
2390 /* Remove old parser entry */
2391 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2392 false);
2393 if (err)
2394 return err;
2395
2396 /* Add new parser entry */
2397 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2398 if (err)
2399 return err;
2400
2401 /* Set addr in the device */
2402 memcpy(port->dev_addr, da, ETH_ALEN);
2403
2404 return 0;
2405}
2406
2407/* Set prs flow for the port */
2408static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2409{
2410 struct mvpp2_prs_entry *pe;
2411 int tid;
2412
2413 pe = mvpp2_prs_flow_find(port->priv, port->id);
2414
2415 /* Such entry not exist */
2416 if (!pe) {
2417 /* Go through the all entires from last to first */
2418 tid = mvpp2_prs_tcam_first_free(port->priv,
2419 MVPP2_PE_LAST_FREE_TID,
2420 MVPP2_PE_FIRST_FREE_TID);
2421 if (tid < 0)
2422 return tid;
2423
2424 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2425 if (!pe)
2426 return -ENOMEM;
2427
2428 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2429 pe->index = tid;
2430
2431 /* Set flow ID*/
2432 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2433 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2434
2435 /* Update shadow table */
2436 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2437 }
2438
2439 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2440 mvpp2_prs_hw_write(port->priv, pe);
2441 kfree(pe);
2442
2443 return 0;
2444}
2445
2446/* Classifier configuration routines */
2447
2448/* Update classification flow table registers */
2449static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2450 struct mvpp2_cls_flow_entry *fe)
2451{
2452 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2453 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2454 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2455 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2456}
2457
2458/* Update classification lookup table register */
2459static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2460 struct mvpp2_cls_lookup_entry *le)
2461{
2462 u32 val;
2463
2464 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2465 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2466 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2467}
2468
2469/* Classifier default initialization */
2470static void mvpp2_cls_init(struct mvpp2 *priv)
2471{
2472 struct mvpp2_cls_lookup_entry le;
2473 struct mvpp2_cls_flow_entry fe;
2474 int index;
2475
2476 /* Enable classifier */
2477 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2478
2479 /* Clear classifier flow table */
2480 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2481 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2482 fe.index = index;
2483 mvpp2_cls_flow_write(priv, &fe);
2484 }
2485
2486 /* Clear classifier lookup table */
2487 le.data = 0;
2488 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2489 le.lkpid = index;
2490 le.way = 0;
2491 mvpp2_cls_lookup_write(priv, &le);
2492
2493 le.way = 1;
2494 mvpp2_cls_lookup_write(priv, &le);
2495 }
2496}
2497
2498static void mvpp2_cls_port_config(struct mvpp2_port *port)
2499{
2500 struct mvpp2_cls_lookup_entry le;
2501 u32 val;
2502
2503 /* Set way for the port */
2504 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2505 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2506 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2507
2508 /* Pick the entry to be accessed in lookup ID decoding table
2509 * according to the way and lkpid.
2510 */
2511 le.lkpid = port->id;
2512 le.way = 0;
2513 le.data = 0;
2514
2515 /* Set initial CPU queue for receiving packets */
2516 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2517 le.data |= port->first_rxq;
2518
2519 /* Disable classification engines */
2520 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2521
2522 /* Update lookup ID table entry */
2523 mvpp2_cls_lookup_write(port->priv, &le);
2524}
2525
2526/* Set CPU queue number for oversize packets */
2527static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2528{
2529 u32 val;
2530
2531 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2532 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2533
2534 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2535 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2536
2537 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2538 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2539 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2540}
2541
2542/* Buffer Manager configuration routines */
2543
2544/* Create pool */
2545static int mvpp2_bm_pool_create(struct udevice *dev,
2546 struct mvpp2 *priv,
2547 struct mvpp2_bm_pool *bm_pool, int size)
2548{
2549 u32 val;
2550
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002551 /* Number of buffer pointers must be a multiple of 16, as per
2552 * hardware constraints
2553 */
2554 if (!IS_ALIGNED(size, 16))
2555 return -EINVAL;
2556
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002557 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002558 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002559 if (!bm_pool->virt_addr)
2560 return -ENOMEM;
2561
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002562 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2563 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002564 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2565 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2566 return -ENOMEM;
2567 }
2568
2569 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002570 lower_32_bits(bm_pool->dma_addr));
Stefan Chulski783e7852017-08-09 10:37:50 +03002571 if (priv->hw_version == MVPP22)
2572 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2573 (upper_32_bits(bm_pool->dma_addr) &
2574 MVPP22_BM_POOL_BASE_HIGH_MASK));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002575 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2576
2577 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2578 val |= MVPP2_BM_START_MASK;
2579 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2580
2581 bm_pool->type = MVPP2_BM_FREE;
2582 bm_pool->size = size;
2583 bm_pool->pkt_size = 0;
2584 bm_pool->buf_num = 0;
2585
2586 return 0;
2587}
2588
2589/* Set pool buffer size */
2590static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2591 struct mvpp2_bm_pool *bm_pool,
2592 int buf_size)
2593{
2594 u32 val;
2595
2596 bm_pool->buf_size = buf_size;
2597
2598 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2599 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2600}
2601
2602/* Free all buffers from the pool */
2603static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2604 struct mvpp2_bm_pool *bm_pool)
2605{
Stefan Roese2f720f12017-03-23 17:01:59 +01002606 int i;
2607
2608 for (i = 0; i < bm_pool->buf_num; i++) {
2609 /* Allocate buffer back from the buffer manager */
2610 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2611 }
2612
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002613 bm_pool->buf_num = 0;
2614}
2615
2616/* Cleanup pool */
2617static int mvpp2_bm_pool_destroy(struct udevice *dev,
2618 struct mvpp2 *priv,
2619 struct mvpp2_bm_pool *bm_pool)
2620{
2621 u32 val;
2622
2623 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2624 if (bm_pool->buf_num) {
2625 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2626 return 0;
2627 }
2628
2629 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2630 val |= MVPP2_BM_STOP_MASK;
2631 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2632
2633 return 0;
2634}
2635
2636static int mvpp2_bm_pools_init(struct udevice *dev,
2637 struct mvpp2 *priv)
2638{
2639 int i, err, size;
2640 struct mvpp2_bm_pool *bm_pool;
2641
2642 /* Create all pools with maximum size */
2643 size = MVPP2_BM_POOL_SIZE_MAX;
2644 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2645 bm_pool = &priv->bm_pools[i];
2646 bm_pool->id = i;
2647 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2648 if (err)
2649 goto err_unroll_pools;
Stefan Chulskiceec6c42017-08-09 10:37:52 +03002650 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002651 }
2652 return 0;
2653
2654err_unroll_pools:
2655 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2656 for (i = i - 1; i >= 0; i--)
2657 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2658 return err;
2659}
2660
2661static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2662{
2663 int i, err;
2664
2665 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2666 /* Mask BM all interrupts */
2667 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2668 /* Clear BM cause register */
2669 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2670 }
2671
2672 /* Allocate and initialize BM pools */
2673 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2674 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2675 if (!priv->bm_pools)
2676 return -ENOMEM;
2677
2678 err = mvpp2_bm_pools_init(dev, priv);
2679 if (err < 0)
2680 return err;
2681 return 0;
2682}
2683
2684/* Attach long pool to rxq */
2685static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2686 int lrxq, int long_pool)
2687{
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002688 u32 val, mask;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002689 int prxq;
2690
2691 /* Get queue physical ID */
2692 prxq = port->rxqs[lrxq]->id;
2693
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002694 if (port->priv->hw_version == MVPP21)
2695 mask = MVPP21_RXQ_POOL_LONG_MASK;
2696 else
2697 mask = MVPP22_RXQ_POOL_LONG_MASK;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002698
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002699 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2700 val &= ~mask;
2701 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002702 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2703}
2704
2705/* Set pool number in a BM cookie */
2706static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2707{
2708 u32 bm;
2709
2710 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2711 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2712
2713 return bm;
2714}
2715
2716/* Get pool number from a BM cookie */
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002717static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002718{
2719 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2720}
2721
2722/* Release buffer to BM */
2723static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002724 dma_addr_t buf_dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002725 unsigned long buf_phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002726{
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002727 if (port->priv->hw_version == MVPP22) {
2728 u32 val = 0;
2729
2730 if (sizeof(dma_addr_t) == 8)
2731 val |= upper_32_bits(buf_dma_addr) &
2732 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2733
2734 if (sizeof(phys_addr_t) == 8)
2735 val |= (upper_32_bits(buf_phys_addr)
2736 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2737 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2738
2739 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2740 }
2741
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002742 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2743 * returned in the "cookie" field of the RX
2744 * descriptor. Instead of storing the virtual address, we
2745 * store the physical address
2746 */
2747 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002748 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002749}
2750
2751/* Refill BM pool */
2752static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002753 dma_addr_t dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002754 phys_addr_t phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002755{
2756 int pool = mvpp2_bm_cookie_pool_get(bm);
2757
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002758 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002759}
2760
2761/* Allocate buffers for the pool */
2762static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2763 struct mvpp2_bm_pool *bm_pool, int buf_num)
2764{
2765 int i;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002766
2767 if (buf_num < 0 ||
2768 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2769 netdev_err(port->dev,
2770 "cannot allocate %d buffers for pool %d\n",
2771 buf_num, bm_pool->id);
2772 return 0;
2773 }
2774
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002775 for (i = 0; i < buf_num; i++) {
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002776 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002777 (dma_addr_t)buffer_loc.rx_buffer[i],
2778 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002779
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002780 }
2781
2782 /* Update BM driver with number of buffers added to pool */
2783 bm_pool->buf_num += i;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002784
2785 return i;
2786}
2787
2788/* Notify the driver that BM pool is being used as specific type and return the
2789 * pool pointer on success
2790 */
2791static struct mvpp2_bm_pool *
2792mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2793 int pkt_size)
2794{
2795 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2796 int num;
2797
2798 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2799 netdev_err(port->dev, "mixing pool types is forbidden\n");
2800 return NULL;
2801 }
2802
2803 if (new_pool->type == MVPP2_BM_FREE)
2804 new_pool->type = type;
2805
2806 /* Allocate buffers in case BM pool is used as long pool, but packet
2807 * size doesn't match MTU or BM pool hasn't being used yet
2808 */
2809 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2810 (new_pool->pkt_size == 0)) {
2811 int pkts_num;
2812
2813 /* Set default buffer number or free all the buffers in case
2814 * the pool is not empty
2815 */
2816 pkts_num = new_pool->buf_num;
2817 if (pkts_num == 0)
2818 pkts_num = type == MVPP2_BM_SWF_LONG ?
2819 MVPP2_BM_LONG_BUF_NUM :
2820 MVPP2_BM_SHORT_BUF_NUM;
2821 else
2822 mvpp2_bm_bufs_free(NULL,
2823 port->priv, new_pool);
2824
2825 new_pool->pkt_size = pkt_size;
2826
2827 /* Allocate buffers for this pool */
2828 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2829 if (num != pkts_num) {
2830 dev_err(dev, "pool %d: %d of %d allocated\n",
2831 new_pool->id, num, pkts_num);
2832 return NULL;
2833 }
2834 }
2835
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002836 return new_pool;
2837}
2838
2839/* Initialize pools for swf */
2840static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2841{
2842 int rxq;
2843
2844 if (!port->pool_long) {
2845 port->pool_long =
2846 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2847 MVPP2_BM_SWF_LONG,
2848 port->pkt_size);
2849 if (!port->pool_long)
2850 return -ENOMEM;
2851
2852 port->pool_long->port_map |= (1 << port->id);
2853
2854 for (rxq = 0; rxq < rxq_number; rxq++)
2855 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2856 }
2857
2858 return 0;
2859}
2860
2861/* Port configuration routines */
2862
2863static void mvpp2_port_mii_set(struct mvpp2_port *port)
2864{
2865 u32 val;
2866
2867 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2868
2869 switch (port->phy_interface) {
2870 case PHY_INTERFACE_MODE_SGMII:
2871 val |= MVPP2_GMAC_INBAND_AN_MASK;
2872 break;
2873 case PHY_INTERFACE_MODE_RGMII:
Stefan Roese025e5922017-03-22 15:11:00 +01002874 case PHY_INTERFACE_MODE_RGMII_ID:
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002875 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2876 default:
2877 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2878 }
2879
2880 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2881}
2882
2883static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2884{
2885 u32 val;
2886
2887 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2888 val |= MVPP2_GMAC_FC_ADV_EN;
2889 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2890}
2891
2892static void mvpp2_port_enable(struct mvpp2_port *port)
2893{
2894 u32 val;
2895
2896 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2897 val |= MVPP2_GMAC_PORT_EN_MASK;
2898 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2899 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2900}
2901
2902static void mvpp2_port_disable(struct mvpp2_port *port)
2903{
2904 u32 val;
2905
2906 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2907 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2908 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2909}
2910
2911/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2912static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2913{
2914 u32 val;
2915
2916 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2917 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2918 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2919}
2920
2921/* Configure loopback port */
2922static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2923{
2924 u32 val;
2925
2926 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2927
2928 if (port->speed == 1000)
2929 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2930 else
2931 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2932
2933 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2934 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2935 else
2936 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2937
2938 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2939}
2940
2941static void mvpp2_port_reset(struct mvpp2_port *port)
2942{
2943 u32 val;
2944
2945 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2946 ~MVPP2_GMAC_PORT_RESET_MASK;
2947 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2948
2949 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2950 MVPP2_GMAC_PORT_RESET_MASK)
2951 continue;
2952}
2953
2954/* Change maximum receive size of the port */
2955static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2956{
2957 u32 val;
2958
2959 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2960 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2961 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2962 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2963 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2964}
2965
Stefan Roese31aa1e32017-03-22 15:07:30 +01002966/* PPv2.2 GoP/GMAC config */
2967
2968/* Set the MAC to reset or exit from reset */
2969static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2970{
2971 u32 val;
2972
2973 /* read - modify - write */
2974 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2975 if (reset)
2976 val |= MVPP2_GMAC_PORT_RESET_MASK;
2977 else
2978 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2979 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2980
2981 return 0;
2982}
2983
2984/*
2985 * gop_gpcs_mode_cfg
2986 *
2987 * Configure port to working with Gig PCS or don't.
2988 */
2989static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
2990{
2991 u32 val;
2992
2993 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2994 if (en)
2995 val |= MVPP2_GMAC_PCS_ENABLE_MASK;
2996 else
2997 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2998 /* enable / disable PCS on this port */
2999 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3000
3001 return 0;
3002}
3003
3004static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3005{
3006 u32 val;
3007
3008 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3009 if (en)
3010 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3011 else
3012 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3013 /* enable / disable PCS on this port */
3014 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3015
3016 return 0;
3017}
3018
3019static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port)
3020{
3021 u32 val, thresh;
3022
3023 /*
3024 * Configure minimal level of the Tx FIFO before the lower part
3025 * starts to read a packet
3026 */
3027 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3028 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3029 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3030 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3031 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3032
3033 /* Disable bypass of sync module */
3034 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3035 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3036 /* configure DP clock select according to mode */
3037 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3038 /* configure QSGMII bypass according to mode */
3039 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3040 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3041
Stefan Roese31aa1e32017-03-22 15:07:30 +01003042 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3043 /*
3044 * Configure GIG MAC to 1000Base-X mode connected to a fiber
3045 * transceiver
3046 */
3047 val |= MVPP2_GMAC_PORT_TYPE_MASK;
3048 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3049
3050 /* configure AN 0x9268 */
3051 val = MVPP2_GMAC_EN_PCS_AN |
3052 MVPP2_GMAC_AN_BYPASS_EN |
3053 MVPP2_GMAC_CONFIG_MII_SPEED |
3054 MVPP2_GMAC_CONFIG_GMII_SPEED |
3055 MVPP2_GMAC_FC_ADV_EN |
3056 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3057 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3058 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3059}
3060
3061static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3062{
3063 u32 val, thresh;
3064
3065 /*
3066 * Configure minimal level of the Tx FIFO before the lower part
3067 * starts to read a packet
3068 */
3069 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3070 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3071 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3072 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3073 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3074
3075 /* Disable bypass of sync module */
3076 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3077 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3078 /* configure DP clock select according to mode */
3079 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3080 /* configure QSGMII bypass according to mode */
3081 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3082 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3083
Stefan Roese31aa1e32017-03-22 15:07:30 +01003084 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3085 /* configure GIG MAC to SGMII mode */
3086 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3087 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3088
3089 /* configure AN */
3090 val = MVPP2_GMAC_EN_PCS_AN |
3091 MVPP2_GMAC_AN_BYPASS_EN |
3092 MVPP2_GMAC_AN_SPEED_EN |
3093 MVPP2_GMAC_EN_FC_AN |
3094 MVPP2_GMAC_AN_DUPLEX_EN |
3095 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3096 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3097}
3098
3099static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3100{
3101 u32 val, thresh;
3102
3103 /*
3104 * Configure minimal level of the Tx FIFO before the lower part
3105 * starts to read a packet
3106 */
3107 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3108 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3109 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3110 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3111 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3112
3113 /* Disable bypass of sync module */
3114 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3115 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3116 /* configure DP clock select according to mode */
3117 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3118 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3119 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3120 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3121
Stefan Roese31aa1e32017-03-22 15:07:30 +01003122 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3123 /* configure GIG MAC to SGMII mode */
3124 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3125 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3126
3127 /* configure AN 0xb8e8 */
3128 val = MVPP2_GMAC_AN_BYPASS_EN |
3129 MVPP2_GMAC_AN_SPEED_EN |
3130 MVPP2_GMAC_EN_FC_AN |
3131 MVPP2_GMAC_AN_DUPLEX_EN |
3132 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3133 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3134}
3135
3136/* Set the internal mux's to the required MAC in the GOP */
3137static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3138{
3139 u32 val;
3140
3141 /* Set TX FIFO thresholds */
3142 switch (port->phy_interface) {
3143 case PHY_INTERFACE_MODE_SGMII:
3144 if (port->phy_speed == 2500)
3145 gop_gmac_sgmii2_5_cfg(port);
3146 else
3147 gop_gmac_sgmii_cfg(port);
3148 break;
3149
3150 case PHY_INTERFACE_MODE_RGMII:
3151 case PHY_INTERFACE_MODE_RGMII_ID:
3152 gop_gmac_rgmii_cfg(port);
3153 break;
3154
3155 default:
3156 return -1;
3157 }
3158
3159 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3160 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3161 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3162 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3163 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3164
3165 /* PeriodicXonEn disable */
3166 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3167 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3168 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3169
3170 return 0;
3171}
3172
3173static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3174{
3175 u32 val;
3176
3177 /* relevant only for MAC0 (XLG0 and GMAC0) */
3178 if (port->gop_id > 0)
3179 return;
3180
3181 /* configure 1Gig MAC mode */
3182 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3183 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3184 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3185 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3186}
3187
3188static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3189{
3190 u32 val;
3191
3192 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3193 if (reset)
3194 val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3195 else
3196 val |= MVPP2_GMAC_SGMII_MODE_MASK;
3197 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3198
3199 return 0;
3200}
3201
Stefan Roese2fe23042017-03-22 15:09:38 +01003202/* Set the internal mux's to the required PCS in the PI */
3203static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes)
3204{
3205 u32 val;
3206 int lane;
3207
3208 switch (num_of_lanes) {
3209 case 1:
3210 lane = 0;
3211 break;
3212 case 2:
3213 lane = 1;
3214 break;
3215 case 4:
3216 lane = 2;
3217 break;
3218 default:
3219 return -1;
3220 }
3221
3222 /* configure XG MAC mode */
3223 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
Stefan Chulskie09d0c82017-04-06 15:39:08 +02003224 val &= ~MVPP22_XPCS_PCSMODE_MASK;
Stefan Roese2fe23042017-03-22 15:09:38 +01003225 val &= ~MVPP22_XPCS_LANEACTIVE_MASK;
3226 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS;
3227 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3228
3229 return 0;
3230}
3231
3232static int gop_mpcs_mode(struct mvpp2_port *port)
3233{
3234 u32 val;
3235
3236 /* configure PCS40G COMMON CONTROL */
3237 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3238 val &= ~FORWARD_ERROR_CORRECTION_MASK;
3239 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3240
3241 /* configure PCS CLOCK RESET */
3242 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET);
3243 val &= ~CLK_DIVISION_RATIO_MASK;
3244 val |= 1 << CLK_DIVISION_RATIO_OFFS;
3245 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3246
3247 val &= ~CLK_DIV_PHASE_SET_MASK;
3248 val |= MAC_CLK_RESET_MASK;
3249 val |= RX_SD_CLK_RESET_MASK;
3250 val |= TX_SD_CLK_RESET_MASK;
3251 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3252
3253 return 0;
3254}
3255
3256/* Set the internal mux's to the required MAC in the GOP */
3257static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3258{
3259 u32 val;
3260
3261 /* configure 10G MAC mode */
3262 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3263 val |= MVPP22_XLG_RX_FC_EN;
3264 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3265
3266 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3267 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3268 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3269 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3270
3271 /* read - modify - write */
3272 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3273 val &= ~MVPP22_XLG_MODE_DMA_1G;
3274 val |= MVPP22_XLG_FORWARD_PFC_EN;
3275 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3276 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3277 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3278
3279 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3280 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3281 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3282 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3283 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3284
3285 /* unmask link change interrupt */
3286 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3287 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3288 val |= 1; /* unmask summary bit */
3289 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3290
3291 return 0;
3292}
3293
3294/* Set PCS to reset or exit from reset */
3295static int gop_xpcs_reset(struct mvpp2_port *port, int reset)
3296{
3297 u32 val;
3298
3299 /* read - modify - write */
3300 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3301 if (reset)
3302 val &= ~MVPP22_XPCS_PCSRESET;
3303 else
3304 val |= MVPP22_XPCS_PCSRESET;
3305 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3306
3307 return 0;
3308}
3309
3310/* Set the MAC to reset or exit from reset */
3311static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3312{
3313 u32 val;
3314
3315 /* read - modify - write */
3316 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3317 if (reset)
3318 val &= ~MVPP22_XLG_MAC_RESETN;
3319 else
3320 val |= MVPP22_XLG_MAC_RESETN;
3321 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3322
3323 return 0;
3324}
3325
Stefan Roese31aa1e32017-03-22 15:07:30 +01003326/*
3327 * gop_port_init
3328 *
3329 * Init physical port. Configures the port mode and all it's elements
3330 * accordingly.
3331 * Does not verify that the selected mode/port number is valid at the
3332 * core level.
3333 */
3334static int gop_port_init(struct mvpp2_port *port)
3335{
3336 int mac_num = port->gop_id;
Stefan Roese2fe23042017-03-22 15:09:38 +01003337 int num_of_act_lanes;
Stefan Roese31aa1e32017-03-22 15:07:30 +01003338
3339 if (mac_num >= MVPP22_GOP_MAC_NUM) {
3340 netdev_err(NULL, "%s: illegal port number %d", __func__,
3341 mac_num);
3342 return -1;
3343 }
3344
3345 switch (port->phy_interface) {
3346 case PHY_INTERFACE_MODE_RGMII:
3347 case PHY_INTERFACE_MODE_RGMII_ID:
3348 gop_gmac_reset(port, 1);
3349
3350 /* configure PCS */
3351 gop_gpcs_mode_cfg(port, 0);
3352 gop_bypass_clk_cfg(port, 1);
3353
3354 /* configure MAC */
3355 gop_gmac_mode_cfg(port);
3356 /* pcs unreset */
3357 gop_gpcs_reset(port, 0);
3358
3359 /* mac unreset */
3360 gop_gmac_reset(port, 0);
3361 break;
3362
3363 case PHY_INTERFACE_MODE_SGMII:
3364 /* configure PCS */
3365 gop_gpcs_mode_cfg(port, 1);
3366
3367 /* configure MAC */
3368 gop_gmac_mode_cfg(port);
3369 /* select proper Mac mode */
3370 gop_xlg_2_gig_mac_cfg(port);
3371
3372 /* pcs unreset */
3373 gop_gpcs_reset(port, 0);
3374 /* mac unreset */
3375 gop_gmac_reset(port, 0);
3376 break;
3377
Stefan Roese2fe23042017-03-22 15:09:38 +01003378 case PHY_INTERFACE_MODE_SFI:
3379 num_of_act_lanes = 2;
3380 mac_num = 0;
3381 /* configure PCS */
3382 gop_xpcs_mode(port, num_of_act_lanes);
3383 gop_mpcs_mode(port);
3384 /* configure MAC */
3385 gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3386
3387 /* pcs unreset */
3388 gop_xpcs_reset(port, 0);
3389
3390 /* mac unreset */
3391 gop_xlg_mac_reset(port, 0);
3392 break;
3393
Stefan Roese31aa1e32017-03-22 15:07:30 +01003394 default:
3395 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n",
3396 __func__, port->phy_interface);
3397 return -1;
3398 }
3399
3400 return 0;
3401}
3402
Stefan Roese2fe23042017-03-22 15:09:38 +01003403static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3404{
3405 u32 val;
3406
3407 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3408 if (enable) {
3409 /* Enable port and MIB counters update */
3410 val |= MVPP22_XLG_PORT_EN;
3411 val &= ~MVPP22_XLG_MIBCNT_DIS;
3412 } else {
3413 /* Disable port */
3414 val &= ~MVPP22_XLG_PORT_EN;
3415 }
3416 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3417}
3418
Stefan Roese31aa1e32017-03-22 15:07:30 +01003419static void gop_port_enable(struct mvpp2_port *port, int enable)
3420{
3421 switch (port->phy_interface) {
3422 case PHY_INTERFACE_MODE_RGMII:
3423 case PHY_INTERFACE_MODE_RGMII_ID:
3424 case PHY_INTERFACE_MODE_SGMII:
3425 if (enable)
3426 mvpp2_port_enable(port);
3427 else
3428 mvpp2_port_disable(port);
3429 break;
3430
Stefan Roese2fe23042017-03-22 15:09:38 +01003431 case PHY_INTERFACE_MODE_SFI:
3432 gop_xlg_mac_port_enable(port, enable);
3433
3434 break;
Stefan Roese31aa1e32017-03-22 15:07:30 +01003435 default:
3436 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__,
3437 port->phy_interface);
3438 return;
3439 }
3440}
3441
3442/* RFU1 functions */
3443static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3444{
3445 return readl(priv->rfu1_base + offset);
3446}
3447
3448static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3449{
3450 writel(data, priv->rfu1_base + offset);
3451}
3452
3453static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3454{
3455 u32 val = 0;
3456
3457 if (gop_id == 2) {
3458 if (phy_type == PHY_INTERFACE_MODE_SGMII)
3459 val |= MV_NETC_GE_MAC2_SGMII;
3460 }
3461
3462 if (gop_id == 3) {
3463 if (phy_type == PHY_INTERFACE_MODE_SGMII)
3464 val |= MV_NETC_GE_MAC3_SGMII;
3465 else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3466 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3467 val |= MV_NETC_GE_MAC3_RGMII;
3468 }
3469
3470 return val;
3471}
3472
3473static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3474{
3475 u32 reg;
3476
3477 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3478 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3479
3480 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3481 val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3482
3483 reg |= val;
3484
3485 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3486}
3487
3488static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3489{
3490 u32 reg;
3491
3492 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3493 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3494
3495 val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3496 val &= NETC_GBE_PORT1_MII_MODE_MASK;
3497
3498 reg |= val;
3499
3500 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3501}
3502
3503static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3504{
3505 u32 reg;
3506
3507 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3508 reg &= ~NETC_GOP_SOFT_RESET_MASK;
3509
3510 val <<= NETC_GOP_SOFT_RESET_OFFS;
3511 val &= NETC_GOP_SOFT_RESET_MASK;
3512
3513 reg |= val;
3514
3515 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3516}
3517
3518static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3519{
3520 u32 reg;
3521
3522 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3523 reg &= ~NETC_CLK_DIV_PHASE_MASK;
3524
3525 val <<= NETC_CLK_DIV_PHASE_OFFS;
3526 val &= NETC_CLK_DIV_PHASE_MASK;
3527
3528 reg |= val;
3529
3530 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3531}
3532
3533static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3534{
3535 u32 reg;
3536
3537 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3538 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3539
3540 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3541 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3542
3543 reg |= val;
3544
3545 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3546}
3547
3548static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3549 u32 val)
3550{
3551 u32 reg, mask, offset;
3552
3553 if (gop_id == 2) {
3554 mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3555 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3556 } else {
3557 mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3558 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3559 }
3560 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3561 reg &= ~mask;
3562
3563 val <<= offset;
3564 val &= mask;
3565
3566 reg |= val;
3567
3568 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3569}
3570
3571static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3572{
3573 u32 reg;
3574
3575 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3576 reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3577
3578 val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3579 val &= NETC_BUS_WIDTH_SELECT_MASK;
3580
3581 reg |= val;
3582
3583 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3584}
3585
3586static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3587{
3588 u32 reg;
3589
3590 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3591 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3592
3593 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3594 val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3595
3596 reg |= val;
3597
3598 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3599}
3600
3601static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3602 enum mv_netc_phase phase)
3603{
3604 switch (phase) {
3605 case MV_NETC_FIRST_PHASE:
3606 /* Set Bus Width to HB mode = 1 */
3607 gop_netc_bus_width_select(priv, 1);
3608 /* Select RGMII mode */
3609 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3610 break;
3611
3612 case MV_NETC_SECOND_PHASE:
3613 /* De-assert the relevant port HB reset */
3614 gop_netc_port_rf_reset(priv, gop_id, 1);
3615 break;
3616 }
3617}
3618
3619static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3620 enum mv_netc_phase phase)
3621{
3622 switch (phase) {
3623 case MV_NETC_FIRST_PHASE:
3624 /* Set Bus Width to HB mode = 1 */
3625 gop_netc_bus_width_select(priv, 1);
3626 /* Select SGMII mode */
3627 if (gop_id >= 1) {
3628 gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3629 MV_NETC_GBE_SGMII);
3630 }
3631
3632 /* Configure the sample stages */
3633 gop_netc_sample_stages_timing(priv, 0);
3634 /* Configure the ComPhy Selector */
3635 /* gop_netc_com_phy_selector_config(netComplex); */
3636 break;
3637
3638 case MV_NETC_SECOND_PHASE:
3639 /* De-assert the relevant port HB reset */
3640 gop_netc_port_rf_reset(priv, gop_id, 1);
3641 break;
3642 }
3643}
3644
3645static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3646{
3647 u32 c = priv->netc_config;
3648
3649 if (c & MV_NETC_GE_MAC2_SGMII)
3650 gop_netc_mac_to_sgmii(priv, 2, phase);
3651 else
3652 gop_netc_mac_to_xgmii(priv, 2, phase);
3653
3654 if (c & MV_NETC_GE_MAC3_SGMII) {
3655 gop_netc_mac_to_sgmii(priv, 3, phase);
3656 } else {
3657 gop_netc_mac_to_xgmii(priv, 3, phase);
3658 if (c & MV_NETC_GE_MAC3_RGMII)
3659 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3660 else
3661 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3662 }
3663
3664 /* Activate gop ports 0, 2, 3 */
3665 gop_netc_active_port(priv, 0, 1);
3666 gop_netc_active_port(priv, 2, 1);
3667 gop_netc_active_port(priv, 3, 1);
3668
3669 if (phase == MV_NETC_SECOND_PHASE) {
3670 /* Enable the GOP internal clock logic */
3671 gop_netc_gop_clock_logic_set(priv, 1);
3672 /* De-assert GOP unit reset */
3673 gop_netc_gop_reset(priv, 1);
3674 }
3675
3676 return 0;
3677}
3678
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003679/* Set defaults to the MVPP2 port */
3680static void mvpp2_defaults_set(struct mvpp2_port *port)
3681{
3682 int tx_port_num, val, queue, ptxq, lrxq;
3683
Thomas Petazzonib8c8e6f2017-02-16 06:57:24 +01003684 if (port->priv->hw_version == MVPP21) {
3685 /* Configure port to loopback if needed */
3686 if (port->flags & MVPP2_F_LOOPBACK)
3687 mvpp2_port_loopback_set(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003688
Thomas Petazzonib8c8e6f2017-02-16 06:57:24 +01003689 /* Update TX FIFO MIN Threshold */
3690 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3691 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3692 /* Min. TX threshold must be less than minimal packet length */
3693 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3694 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3695 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003696
3697 /* Disable Legacy WRR, Disable EJP, Release from reset */
3698 tx_port_num = mvpp2_egress_port(port);
3699 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3700 tx_port_num);
3701 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3702
3703 /* Close bandwidth for all queues */
3704 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3705 ptxq = mvpp2_txq_phys(port->id, queue);
3706 mvpp2_write(port->priv,
3707 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3708 }
3709
3710 /* Set refill period to 1 usec, refill tokens
3711 * and bucket size to maximum
3712 */
3713 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3714 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3715 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3716 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3717 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3718 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3719 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3720 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3721
3722 /* Set MaximumLowLatencyPacketSize value to 256 */
3723 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3724 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3725 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3726
3727 /* Enable Rx cache snoop */
3728 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3729 queue = port->rxqs[lrxq]->id;
3730 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3731 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3732 MVPP2_SNOOP_BUF_HDR_MASK;
3733 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3734 }
3735}
3736
3737/* Enable/disable receiving packets */
3738static void mvpp2_ingress_enable(struct mvpp2_port *port)
3739{
3740 u32 val;
3741 int lrxq, queue;
3742
3743 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3744 queue = port->rxqs[lrxq]->id;
3745 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3746 val &= ~MVPP2_RXQ_DISABLE_MASK;
3747 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3748 }
3749}
3750
3751static void mvpp2_ingress_disable(struct mvpp2_port *port)
3752{
3753 u32 val;
3754 int lrxq, queue;
3755
3756 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3757 queue = port->rxqs[lrxq]->id;
3758 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3759 val |= MVPP2_RXQ_DISABLE_MASK;
3760 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3761 }
3762}
3763
3764/* Enable transmit via physical egress queue
3765 * - HW starts take descriptors from DRAM
3766 */
3767static void mvpp2_egress_enable(struct mvpp2_port *port)
3768{
3769 u32 qmap;
3770 int queue;
3771 int tx_port_num = mvpp2_egress_port(port);
3772
3773 /* Enable all initialized TXs. */
3774 qmap = 0;
3775 for (queue = 0; queue < txq_number; queue++) {
3776 struct mvpp2_tx_queue *txq = port->txqs[queue];
3777
3778 if (txq->descs != NULL)
3779 qmap |= (1 << queue);
3780 }
3781
3782 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3783 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3784}
3785
3786/* Disable transmit via physical egress queue
3787 * - HW doesn't take descriptors from DRAM
3788 */
3789static void mvpp2_egress_disable(struct mvpp2_port *port)
3790{
3791 u32 reg_data;
3792 int delay;
3793 int tx_port_num = mvpp2_egress_port(port);
3794
3795 /* Issue stop command for active channels only */
3796 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3797 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3798 MVPP2_TXP_SCHED_ENQ_MASK;
3799 if (reg_data != 0)
3800 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3801 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3802
3803 /* Wait for all Tx activity to terminate. */
3804 delay = 0;
3805 do {
3806 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3807 netdev_warn(port->dev,
3808 "Tx stop timed out, status=0x%08x\n",
3809 reg_data);
3810 break;
3811 }
3812 mdelay(1);
3813 delay++;
3814
3815 /* Check port TX Command register that all
3816 * Tx queues are stopped
3817 */
3818 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3819 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3820}
3821
3822/* Rx descriptors helper methods */
3823
3824/* Get number of Rx descriptors occupied by received packets */
3825static inline int
3826mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3827{
3828 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3829
3830 return val & MVPP2_RXQ_OCCUPIED_MASK;
3831}
3832
3833/* Update Rx queue status with the number of occupied and available
3834 * Rx descriptor slots.
3835 */
3836static inline void
3837mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3838 int used_count, int free_count)
3839{
3840 /* Decrement the number of used descriptors and increment count
3841 * increment the number of free descriptors.
3842 */
3843 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3844
3845 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3846}
3847
3848/* Get pointer to next RX descriptor to be processed by SW */
3849static inline struct mvpp2_rx_desc *
3850mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3851{
3852 int rx_desc = rxq->next_desc_to_proc;
3853
3854 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3855 prefetch(rxq->descs + rxq->next_desc_to_proc);
3856 return rxq->descs + rx_desc;
3857}
3858
3859/* Set rx queue offset */
3860static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3861 int prxq, int offset)
3862{
3863 u32 val;
3864
3865 /* Convert offset from bytes to units of 32 bytes */
3866 offset = offset >> 5;
3867
3868 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3869 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3870
3871 /* Offset is in */
3872 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3873 MVPP2_RXQ_PACKET_OFFSET_MASK);
3874
3875 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3876}
3877
3878/* Obtain BM cookie information from descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003879static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3880 struct mvpp2_rx_desc *rx_desc)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003881{
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003882 int cpu = smp_processor_id();
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003883 int pool;
3884
3885 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3886 MVPP2_RXD_BM_POOL_ID_MASK) >>
3887 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003888
3889 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3890 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3891}
3892
3893/* Tx descriptors helper methods */
3894
3895/* Get number of Tx descriptors waiting to be transmitted by HW */
3896static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3897 struct mvpp2_tx_queue *txq)
3898{
3899 u32 val;
3900
3901 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3902 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3903
3904 return val & MVPP2_TXQ_PENDING_MASK;
3905}
3906
3907/* Get pointer to next Tx descriptor to be processed (send) by HW */
3908static struct mvpp2_tx_desc *
3909mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3910{
3911 int tx_desc = txq->next_desc_to_proc;
3912
3913 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3914 return txq->descs + tx_desc;
3915}
3916
3917/* Update HW with number of aggregated Tx descriptors to be sent */
3918static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3919{
3920 /* aggregated access - relevant TXQ number is written in TX desc */
3921 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3922}
3923
3924/* Get number of sent descriptors and decrement counter.
3925 * The number of sent descriptors is returned.
3926 * Per-CPU access
3927 */
3928static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3929 struct mvpp2_tx_queue *txq)
3930{
3931 u32 val;
3932
3933 /* Reading status reg resets transmitted descriptor counter */
3934 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3935
3936 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3937 MVPP2_TRANSMITTED_COUNT_OFFSET;
3938}
3939
3940static void mvpp2_txq_sent_counter_clear(void *arg)
3941{
3942 struct mvpp2_port *port = arg;
3943 int queue;
3944
3945 for (queue = 0; queue < txq_number; queue++) {
3946 int id = port->txqs[queue]->id;
3947
3948 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3949 }
3950}
3951
3952/* Set max sizes for Tx queues */
3953static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3954{
3955 u32 val, size, mtu;
3956 int txq, tx_port_num;
3957
3958 mtu = port->pkt_size * 8;
3959 if (mtu > MVPP2_TXP_MTU_MAX)
3960 mtu = MVPP2_TXP_MTU_MAX;
3961
3962 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3963 mtu = 3 * mtu;
3964
3965 /* Indirect access to registers */
3966 tx_port_num = mvpp2_egress_port(port);
3967 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3968
3969 /* Set MTU */
3970 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3971 val &= ~MVPP2_TXP_MTU_MAX;
3972 val |= mtu;
3973 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3974
3975 /* TXP token size and all TXQs token size must be larger that MTU */
3976 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3977 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3978 if (size < mtu) {
3979 size = mtu;
3980 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3981 val |= size;
3982 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3983 }
3984
3985 for (txq = 0; txq < txq_number; txq++) {
3986 val = mvpp2_read(port->priv,
3987 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3988 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3989
3990 if (size < mtu) {
3991 size = mtu;
3992 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3993 val |= size;
3994 mvpp2_write(port->priv,
3995 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3996 val);
3997 }
3998 }
3999}
4000
4001/* Free Tx queue skbuffs */
4002static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4003 struct mvpp2_tx_queue *txq,
4004 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4005{
4006 int i;
4007
4008 for (i = 0; i < num; i++)
4009 mvpp2_txq_inc_get(txq_pcpu);
4010}
4011
4012static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4013 u32 cause)
4014{
4015 int queue = fls(cause) - 1;
4016
4017 return port->rxqs[queue];
4018}
4019
4020static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4021 u32 cause)
4022{
4023 int queue = fls(cause) - 1;
4024
4025 return port->txqs[queue];
4026}
4027
4028/* Rx/Tx queue initialization/cleanup methods */
4029
4030/* Allocate and initialize descriptors for aggr TXQ */
4031static int mvpp2_aggr_txq_init(struct udevice *dev,
4032 struct mvpp2_tx_queue *aggr_txq,
4033 int desc_num, int cpu,
4034 struct mvpp2 *priv)
4035{
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004036 u32 txq_dma;
4037
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004038 /* Allocate memory for TX descriptors */
4039 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004040 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004041 if (!aggr_txq->descs)
4042 return -ENOMEM;
4043
4044 /* Make sure descriptor address is cache line size aligned */
4045 BUG_ON(aggr_txq->descs !=
4046 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4047
4048 aggr_txq->last_desc = aggr_txq->size - 1;
4049
4050 /* Aggr TXQ no reset WA */
4051 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4052 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4053
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004054 /* Set Tx descriptors queue starting address indirect
4055 * access
4056 */
4057 if (priv->hw_version == MVPP21)
4058 txq_dma = aggr_txq->descs_dma;
4059 else
4060 txq_dma = aggr_txq->descs_dma >>
4061 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4062
4063 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004064 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4065
4066 return 0;
4067}
4068
4069/* Create a specified Rx queue */
4070static int mvpp2_rxq_init(struct mvpp2_port *port,
4071 struct mvpp2_rx_queue *rxq)
4072
4073{
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004074 u32 rxq_dma;
4075
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004076 rxq->size = port->rx_ring_size;
4077
4078 /* Allocate memory for RX descriptors */
4079 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004080 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004081 if (!rxq->descs)
4082 return -ENOMEM;
4083
4084 BUG_ON(rxq->descs !=
4085 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4086
4087 rxq->last_desc = rxq->size - 1;
4088
4089 /* Zero occupied and non-occupied counters - direct access */
4090 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4091
4092 /* Set Rx descriptors queue starting address - indirect access */
4093 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004094 if (port->priv->hw_version == MVPP21)
4095 rxq_dma = rxq->descs_dma;
4096 else
4097 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4098 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004099 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4100 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4101
4102 /* Set Offset */
4103 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4104
4105 /* Add number of descriptors ready for receiving packets */
4106 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4107
4108 return 0;
4109}
4110
4111/* Push packets received by the RXQ to BM pool */
4112static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4113 struct mvpp2_rx_queue *rxq)
4114{
4115 int rx_received, i;
4116
4117 rx_received = mvpp2_rxq_received(port, rxq->id);
4118 if (!rx_received)
4119 return;
4120
4121 for (i = 0; i < rx_received; i++) {
4122 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004123 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004124
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004125 mvpp2_pool_refill(port, bm,
4126 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4127 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004128 }
4129 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4130}
4131
4132/* Cleanup Rx queue */
4133static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4134 struct mvpp2_rx_queue *rxq)
4135{
4136 mvpp2_rxq_drop_pkts(port, rxq);
4137
4138 rxq->descs = NULL;
4139 rxq->last_desc = 0;
4140 rxq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004141 rxq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004142
4143 /* Clear Rx descriptors queue starting address and size;
4144 * free descriptor number
4145 */
4146 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4147 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4148 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4149 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4150}
4151
4152/* Create and initialize a Tx queue */
4153static int mvpp2_txq_init(struct mvpp2_port *port,
4154 struct mvpp2_tx_queue *txq)
4155{
4156 u32 val;
4157 int cpu, desc, desc_per_txq, tx_port_num;
4158 struct mvpp2_txq_pcpu *txq_pcpu;
4159
4160 txq->size = port->tx_ring_size;
4161
4162 /* Allocate memory for Tx descriptors */
4163 txq->descs = buffer_loc.tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004164 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004165 if (!txq->descs)
4166 return -ENOMEM;
4167
4168 /* Make sure descriptor address is cache line size aligned */
4169 BUG_ON(txq->descs !=
4170 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4171
4172 txq->last_desc = txq->size - 1;
4173
4174 /* Set Tx descriptors queue starting address - indirect access */
4175 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004176 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004177 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4178 MVPP2_TXQ_DESC_SIZE_MASK);
4179 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4180 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4181 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4182 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4183 val &= ~MVPP2_TXQ_PENDING_MASK;
4184 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4185
4186 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4187 * for each existing TXQ.
4188 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4189 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4190 */
4191 desc_per_txq = 16;
4192 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4193 (txq->log_id * desc_per_txq);
4194
4195 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4196 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
Thomas Petazzoni26a52782017-02-16 08:03:37 +01004197 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004198
4199 /* WRR / EJP configuration - indirect access */
4200 tx_port_num = mvpp2_egress_port(port);
4201 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4202
4203 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4204 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4205 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4206 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4207 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4208
4209 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4210 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4211 val);
4212
4213 for_each_present_cpu(cpu) {
4214 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4215 txq_pcpu->size = txq->size;
4216 }
4217
4218 return 0;
4219}
4220
4221/* Free allocated TXQ resources */
4222static void mvpp2_txq_deinit(struct mvpp2_port *port,
4223 struct mvpp2_tx_queue *txq)
4224{
4225 txq->descs = NULL;
4226 txq->last_desc = 0;
4227 txq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004228 txq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004229
4230 /* Set minimum bandwidth for disabled TXQs */
4231 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4232
4233 /* Set Tx descriptors queue starting address and size */
4234 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4235 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4236 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4237}
4238
4239/* Cleanup Tx ports */
4240static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4241{
4242 struct mvpp2_txq_pcpu *txq_pcpu;
4243 int delay, pending, cpu;
4244 u32 val;
4245
4246 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4247 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4248 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4249 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4250
4251 /* The napi queue has been stopped so wait for all packets
4252 * to be transmitted.
4253 */
4254 delay = 0;
4255 do {
4256 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4257 netdev_warn(port->dev,
4258 "port %d: cleaning queue %d timed out\n",
4259 port->id, txq->log_id);
4260 break;
4261 }
4262 mdelay(1);
4263 delay++;
4264
4265 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4266 } while (pending);
4267
4268 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4269 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4270
4271 for_each_present_cpu(cpu) {
4272 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4273
4274 /* Release all packets */
4275 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4276
4277 /* Reset queue */
4278 txq_pcpu->count = 0;
4279 txq_pcpu->txq_put_index = 0;
4280 txq_pcpu->txq_get_index = 0;
4281 }
4282}
4283
4284/* Cleanup all Tx queues */
4285static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4286{
4287 struct mvpp2_tx_queue *txq;
4288 int queue;
4289 u32 val;
4290
4291 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4292
4293 /* Reset Tx ports and delete Tx queues */
4294 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4295 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4296
4297 for (queue = 0; queue < txq_number; queue++) {
4298 txq = port->txqs[queue];
4299 mvpp2_txq_clean(port, txq);
4300 mvpp2_txq_deinit(port, txq);
4301 }
4302
4303 mvpp2_txq_sent_counter_clear(port);
4304
4305 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4306 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4307}
4308
4309/* Cleanup all Rx queues */
4310static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4311{
4312 int queue;
4313
4314 for (queue = 0; queue < rxq_number; queue++)
4315 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4316}
4317
4318/* Init all Rx queues for port */
4319static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4320{
4321 int queue, err;
4322
4323 for (queue = 0; queue < rxq_number; queue++) {
4324 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4325 if (err)
4326 goto err_cleanup;
4327 }
4328 return 0;
4329
4330err_cleanup:
4331 mvpp2_cleanup_rxqs(port);
4332 return err;
4333}
4334
4335/* Init all tx queues for port */
4336static int mvpp2_setup_txqs(struct mvpp2_port *port)
4337{
4338 struct mvpp2_tx_queue *txq;
4339 int queue, err;
4340
4341 for (queue = 0; queue < txq_number; queue++) {
4342 txq = port->txqs[queue];
4343 err = mvpp2_txq_init(port, txq);
4344 if (err)
4345 goto err_cleanup;
4346 }
4347
4348 mvpp2_txq_sent_counter_clear(port);
4349 return 0;
4350
4351err_cleanup:
4352 mvpp2_cleanup_txqs(port);
4353 return err;
4354}
4355
4356/* Adjust link */
4357static void mvpp2_link_event(struct mvpp2_port *port)
4358{
4359 struct phy_device *phydev = port->phy_dev;
4360 int status_change = 0;
4361 u32 val;
4362
4363 if (phydev->link) {
4364 if ((port->speed != phydev->speed) ||
4365 (port->duplex != phydev->duplex)) {
4366 u32 val;
4367
4368 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4369 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4370 MVPP2_GMAC_CONFIG_GMII_SPEED |
4371 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4372 MVPP2_GMAC_AN_SPEED_EN |
4373 MVPP2_GMAC_AN_DUPLEX_EN);
4374
4375 if (phydev->duplex)
4376 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4377
4378 if (phydev->speed == SPEED_1000)
4379 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4380 else if (phydev->speed == SPEED_100)
4381 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4382
4383 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4384
4385 port->duplex = phydev->duplex;
4386 port->speed = phydev->speed;
4387 }
4388 }
4389
4390 if (phydev->link != port->link) {
4391 if (!phydev->link) {
4392 port->duplex = -1;
4393 port->speed = 0;
4394 }
4395
4396 port->link = phydev->link;
4397 status_change = 1;
4398 }
4399
4400 if (status_change) {
4401 if (phydev->link) {
4402 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4403 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4404 MVPP2_GMAC_FORCE_LINK_DOWN);
4405 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4406 mvpp2_egress_enable(port);
4407 mvpp2_ingress_enable(port);
4408 } else {
4409 mvpp2_ingress_disable(port);
4410 mvpp2_egress_disable(port);
4411 }
4412 }
4413}
4414
4415/* Main RX/TX processing routines */
4416
4417/* Display more error info */
4418static void mvpp2_rx_error(struct mvpp2_port *port,
4419 struct mvpp2_rx_desc *rx_desc)
4420{
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004421 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4422 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004423
4424 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4425 case MVPP2_RXD_ERR_CRC:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004426 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
4427 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004428 break;
4429 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004430 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
4431 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004432 break;
4433 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004434 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
4435 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004436 break;
4437 }
4438}
4439
4440/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4441static int mvpp2_rx_refill(struct mvpp2_port *port,
4442 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004443 u32 bm, dma_addr_t dma_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004444{
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004445 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004446 return 0;
4447}
4448
4449/* Set hw internals when starting port */
4450static void mvpp2_start_dev(struct mvpp2_port *port)
4451{
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004452 switch (port->phy_interface) {
4453 case PHY_INTERFACE_MODE_RGMII:
4454 case PHY_INTERFACE_MODE_RGMII_ID:
4455 case PHY_INTERFACE_MODE_SGMII:
4456 mvpp2_gmac_max_rx_size_set(port);
4457 default:
4458 break;
4459 }
4460
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004461 mvpp2_txp_max_tx_size_set(port);
4462
Stefan Roese31aa1e32017-03-22 15:07:30 +01004463 if (port->priv->hw_version == MVPP21)
4464 mvpp2_port_enable(port);
4465 else
4466 gop_port_enable(port, 1);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004467}
4468
4469/* Set hw internals when stopping port */
4470static void mvpp2_stop_dev(struct mvpp2_port *port)
4471{
4472 /* Stop new packets from arriving to RXQs */
4473 mvpp2_ingress_disable(port);
4474
4475 mvpp2_egress_disable(port);
Stefan Roese31aa1e32017-03-22 15:07:30 +01004476
4477 if (port->priv->hw_version == MVPP21)
4478 mvpp2_port_disable(port);
4479 else
4480 gop_port_enable(port, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004481}
4482
Stefan Chulski13b725f2019-08-15 18:08:41 -04004483static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004484{
4485 struct phy_device *phy_dev;
4486
4487 if (!port->init || port->link == 0) {
Nevo Hed2a428702019-08-15 18:08:44 -04004488 phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
4489 dev, port->phy_interface);
Grzegorz Jaszczyk62394832019-08-15 18:08:42 -04004490
4491 /*
4492 * If the phy doesn't match with any existing u-boot drivers the
4493 * phy framework will connect it to generic one which
4494 * uid == 0xffffffff. In this case act as if the phy wouldn't be
4495 * declared in dts. Otherwise in case of 3310 (for which the
4496 * driver doesn't exist) the link will not be correctly
4497 * detected. Removing phy entry from dts in case of 3310 is not
4498 * an option because it is required for the phy_fw_down
4499 * procedure.
4500 */
4501 if (phy_dev &&
4502 phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
4503 netdev_warn(port->dev,
4504 "Marking phy as invalid, link will not be checked\n");
4505 /* set phy_addr to invalid value */
4506 port->phyaddr = PHY_MAX_ADDR;
4507 mvpp2_egress_enable(port);
4508 mvpp2_ingress_enable(port);
4509
4510 return;
4511 }
4512
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004513 port->phy_dev = phy_dev;
4514 if (!phy_dev) {
4515 netdev_err(port->dev, "cannot connect to phy\n");
Stefan Chulski13b725f2019-08-15 18:08:41 -04004516 return;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004517 }
4518 phy_dev->supported &= PHY_GBIT_FEATURES;
4519 phy_dev->advertising = phy_dev->supported;
4520
4521 port->phy_dev = phy_dev;
4522 port->link = 0;
4523 port->duplex = 0;
4524 port->speed = 0;
4525
4526 phy_config(phy_dev);
4527 phy_startup(phy_dev);
Stefan Chulski13b725f2019-08-15 18:08:41 -04004528 if (!phy_dev->link)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004529 printf("%s: No link\n", phy_dev->dev->name);
Stefan Chulski13b725f2019-08-15 18:08:41 -04004530 else
4531 port->init = 1;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004532 } else {
4533 mvpp2_egress_enable(port);
4534 mvpp2_ingress_enable(port);
4535 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004536}
4537
4538static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4539{
4540 unsigned char mac_bcast[ETH_ALEN] = {
4541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4542 int err;
4543
4544 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4545 if (err) {
4546 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4547 return err;
4548 }
4549 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4550 port->dev_addr, true);
4551 if (err) {
4552 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
4553 return err;
4554 }
4555 err = mvpp2_prs_def_flow(port);
4556 if (err) {
4557 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4558 return err;
4559 }
4560
4561 /* Allocate the Rx/Tx queues */
4562 err = mvpp2_setup_rxqs(port);
4563 if (err) {
4564 netdev_err(port->dev, "cannot allocate Rx queues\n");
4565 return err;
4566 }
4567
4568 err = mvpp2_setup_txqs(port);
4569 if (err) {
4570 netdev_err(port->dev, "cannot allocate Tx queues\n");
4571 return err;
4572 }
4573
Nevo Hed2a428702019-08-15 18:08:44 -04004574 if (port->phyaddr < PHY_MAX_ADDR) {
Stefan Chulski13b725f2019-08-15 18:08:41 -04004575 mvpp2_phy_connect(dev, port);
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004576 mvpp2_link_event(port);
4577 } else {
4578 mvpp2_egress_enable(port);
4579 mvpp2_ingress_enable(port);
4580 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004581
4582 mvpp2_start_dev(port);
4583
4584 return 0;
4585}
4586
4587/* No Device ops here in U-Boot */
4588
4589/* Driver initialization */
4590
4591static void mvpp2_port_power_up(struct mvpp2_port *port)
4592{
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004593 struct mvpp2 *priv = port->priv;
4594
Stefan Roese31aa1e32017-03-22 15:07:30 +01004595 /* On PPv2.2 the GoP / interface configuration has already been done */
4596 if (priv->hw_version == MVPP21)
4597 mvpp2_port_mii_set(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004598 mvpp2_port_periodic_xon_disable(port);
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004599 if (priv->hw_version == MVPP21)
4600 mvpp2_port_fc_adv_enable(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004601 mvpp2_port_reset(port);
4602}
4603
4604/* Initialize port HW */
4605static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4606{
4607 struct mvpp2 *priv = port->priv;
4608 struct mvpp2_txq_pcpu *txq_pcpu;
4609 int queue, cpu, err;
4610
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004611 if (port->first_rxq + rxq_number >
4612 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004613 return -EINVAL;
4614
4615 /* Disable port */
4616 mvpp2_egress_disable(port);
Stefan Roese31aa1e32017-03-22 15:07:30 +01004617 if (priv->hw_version == MVPP21)
4618 mvpp2_port_disable(port);
4619 else
4620 gop_port_enable(port, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004621
4622 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4623 GFP_KERNEL);
4624 if (!port->txqs)
4625 return -ENOMEM;
4626
4627 /* Associate physical Tx queues to this port and initialize.
4628 * The mapping is predefined.
4629 */
4630 for (queue = 0; queue < txq_number; queue++) {
4631 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4632 struct mvpp2_tx_queue *txq;
4633
4634 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4635 if (!txq)
4636 return -ENOMEM;
4637
4638 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4639 GFP_KERNEL);
4640 if (!txq->pcpu)
4641 return -ENOMEM;
4642
4643 txq->id = queue_phy_id;
4644 txq->log_id = queue;
4645 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4646 for_each_present_cpu(cpu) {
4647 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4648 txq_pcpu->cpu = cpu;
4649 }
4650
4651 port->txqs[queue] = txq;
4652 }
4653
4654 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4655 GFP_KERNEL);
4656 if (!port->rxqs)
4657 return -ENOMEM;
4658
4659 /* Allocate and initialize Rx queue for this port */
4660 for (queue = 0; queue < rxq_number; queue++) {
4661 struct mvpp2_rx_queue *rxq;
4662
4663 /* Map physical Rx queue to port's logical Rx queue */
4664 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4665 if (!rxq)
4666 return -ENOMEM;
4667 /* Map this Rx queue to a physical queue */
4668 rxq->id = port->first_rxq + queue;
4669 rxq->port = port->id;
4670 rxq->logic_rxq = queue;
4671
4672 port->rxqs[queue] = rxq;
4673 }
4674
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004675
4676 /* Create Rx descriptor rings */
4677 for (queue = 0; queue < rxq_number; queue++) {
4678 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4679
4680 rxq->size = port->rx_ring_size;
4681 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4682 rxq->time_coal = MVPP2_RX_COAL_USEC;
4683 }
4684
4685 mvpp2_ingress_disable(port);
4686
4687 /* Port default configuration */
4688 mvpp2_defaults_set(port);
4689
4690 /* Port's classifier configuration */
4691 mvpp2_cls_oversize_rxq_set(port);
4692 mvpp2_cls_port_config(port);
4693
4694 /* Provide an initial Rx packet size */
4695 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4696
4697 /* Initialize pools for swf */
4698 err = mvpp2_swf_bm_pool_init(port);
4699 if (err)
4700 return err;
4701
4702 return 0;
4703}
4704
Stefan Roese66b11cc2017-03-22 14:11:16 +01004705static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004706{
Stefan Roese66b11cc2017-03-22 14:11:16 +01004707 int port_node = dev_of_offset(dev);
4708 const char *phy_mode_str;
Baruch Siachacce7532018-11-21 13:05:33 +02004709 int phy_node;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004710 u32 id;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004711 u32 phyaddr = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004712 int phy_mode = -1;
Nevo Hed2a428702019-08-15 18:08:44 -04004713 int ret;
Baruch Siach21586cd2018-11-21 13:05:34 +02004714
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004715 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004716
4717 if (phy_node > 0) {
Nevo Hed2a428702019-08-15 18:08:44 -04004718 int parent;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004719 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
4720 if (phyaddr < 0) {
4721 dev_err(&pdev->dev, "could not find phy address\n");
4722 return -1;
4723 }
Nevo Hed2a428702019-08-15 18:08:44 -04004724 parent = fdt_parent_offset(gd->fdt_blob, phy_node);
4725 ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
4726 &port->mdio_dev);
4727 if (ret)
4728 return ret;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004729 } else {
Nevo Hed2a428702019-08-15 18:08:44 -04004730 /* phy_addr is set to invalid value */
4731 phyaddr = PHY_MAX_ADDR;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004732 }
4733
4734 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
4735 if (phy_mode_str)
4736 phy_mode = phy_get_interface_by_name(phy_mode_str);
4737 if (phy_mode == -1) {
4738 dev_err(&pdev->dev, "incorrect phy mode\n");
4739 return -EINVAL;
4740 }
4741
4742 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4743 if (id == -1) {
4744 dev_err(&pdev->dev, "missing port-id value\n");
4745 return -EINVAL;
4746 }
4747
Simon Glassbcee8d62019-12-06 21:41:35 -07004748#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004749 gpio_request_by_name(dev, "phy-reset-gpios", 0,
4750 &port->phy_reset_gpio, GPIOD_IS_OUT);
4751 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4752 &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4753#endif
4754
Stefan Roese9acb7da2017-03-22 14:15:40 +01004755 /*
4756 * ToDo:
4757 * Not sure if this DT property "phy-speed" will get accepted, so
4758 * this might change later
4759 */
4760 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
4761 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node,
4762 "phy-speed", 1000);
4763
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004764 port->id = id;
Stefan Roese66b11cc2017-03-22 14:11:16 +01004765 if (port->priv->hw_version == MVPP21)
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004766 port->first_rxq = port->id * rxq_number;
4767 else
Stefan Roese66b11cc2017-03-22 14:11:16 +01004768 port->first_rxq = port->id * port->priv->max_port_rxqs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004769 port->phy_interface = phy_mode;
4770 port->phyaddr = phyaddr;
4771
Stefan Roese66b11cc2017-03-22 14:11:16 +01004772 return 0;
4773}
Thomas Petazzoni26a52782017-02-16 08:03:37 +01004774
Simon Glassbcee8d62019-12-06 21:41:35 -07004775#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004776/* Port GPIO initialization */
4777static void mvpp2_gpio_init(struct mvpp2_port *port)
4778{
4779 if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
Stefan Chulski41893732017-08-09 10:37:43 +03004780 dm_gpio_set_value(&port->phy_reset_gpio, 1);
Baruch Siach18593fa2018-10-15 13:16:48 +03004781 mdelay(10);
Baruch Siachfa140272018-10-15 13:16:47 +03004782 dm_gpio_set_value(&port->phy_reset_gpio, 0);
Stefan Chulski41893732017-08-09 10:37:43 +03004783 }
4784
4785 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4786 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4787}
4788#endif
4789
Stefan Roese66b11cc2017-03-22 14:11:16 +01004790/* Ports initialization */
4791static int mvpp2_port_probe(struct udevice *dev,
4792 struct mvpp2_port *port,
4793 int port_node,
4794 struct mvpp2 *priv)
4795{
4796 int err;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004797
4798 port->tx_ring_size = MVPP2_MAX_TXD;
4799 port->rx_ring_size = MVPP2_MAX_RXD;
4800
4801 err = mvpp2_port_init(dev, port);
4802 if (err < 0) {
Stefan Roese66b11cc2017-03-22 14:11:16 +01004803 dev_err(&pdev->dev, "failed to init port %d\n", port->id);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004804 return err;
4805 }
4806 mvpp2_port_power_up(port);
4807
Simon Glassbcee8d62019-12-06 21:41:35 -07004808#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004809 mvpp2_gpio_init(port);
4810#endif
4811
Stefan Roese66b11cc2017-03-22 14:11:16 +01004812 priv->port_list[port->id] = port;
Stefan Chulskibb915c82017-08-09 10:37:46 +03004813 priv->num_ports++;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004814 return 0;
4815}
4816
4817/* Initialize decoding windows */
4818static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4819 struct mvpp2 *priv)
4820{
4821 u32 win_enable;
4822 int i;
4823
4824 for (i = 0; i < 6; i++) {
4825 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4826 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4827
4828 if (i < 4)
4829 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4830 }
4831
4832 win_enable = 0;
4833
4834 for (i = 0; i < dram->num_cs; i++) {
4835 const struct mbus_dram_window *cs = dram->cs + i;
4836
4837 mvpp2_write(priv, MVPP2_WIN_BASE(i),
4838 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4839 dram->mbus_dram_target_id);
4840
4841 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4842 (cs->size - 1) & 0xffff0000);
4843
4844 win_enable |= (1 << i);
4845 }
4846
4847 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4848}
4849
4850/* Initialize Rx FIFO's */
4851static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4852{
4853 int port;
4854
4855 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Stefan Roeseff572c62017-03-01 13:09:42 +01004856 if (priv->hw_version == MVPP22) {
4857 if (port == 0) {
4858 mvpp2_write(priv,
4859 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4860 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4861 mvpp2_write(priv,
4862 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4863 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4864 } else if (port == 1) {
4865 mvpp2_write(priv,
4866 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4867 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4868 mvpp2_write(priv,
4869 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4870 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4871 } else {
4872 mvpp2_write(priv,
4873 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4874 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4875 mvpp2_write(priv,
4876 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4877 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4878 }
4879 } else {
4880 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4881 MVPP21_RX_FIFO_PORT_DATA_SIZE);
4882 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4883 MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4884 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004885 }
4886
4887 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4888 MVPP2_RX_FIFO_PORT_MIN_PKT);
4889 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4890}
4891
Stefan Roeseff572c62017-03-01 13:09:42 +01004892/* Initialize Tx FIFO's */
4893static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4894{
4895 int port, val;
4896
4897 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4898 /* Port 0 supports 10KB TX FIFO */
4899 if (port == 0) {
4900 val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4901 MVPP22_TX_FIFO_SIZE_MASK;
4902 } else {
4903 val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4904 MVPP22_TX_FIFO_SIZE_MASK;
4905 }
4906 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4907 }
4908}
4909
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004910static void mvpp2_axi_init(struct mvpp2 *priv)
4911{
4912 u32 val, rdval, wrval;
4913
4914 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4915
4916 /* AXI Bridge Configuration */
4917
4918 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4919 << MVPP22_AXI_ATTR_CACHE_OFFS;
4920 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4921 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4922
4923 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4924 << MVPP22_AXI_ATTR_CACHE_OFFS;
4925 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4926 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4927
4928 /* BM */
4929 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4930 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4931
4932 /* Descriptors */
4933 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4934 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4935 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4936 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4937
4938 /* Buffer Data */
4939 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4940 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4941
4942 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4943 << MVPP22_AXI_CODE_CACHE_OFFS;
4944 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4945 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4946 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4947 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4948
4949 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4950 << MVPP22_AXI_CODE_CACHE_OFFS;
4951 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4952 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4953
4954 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4955
4956 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4957 << MVPP22_AXI_CODE_CACHE_OFFS;
4958 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4959 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4960
4961 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4962}
4963
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004964/* Initialize network controller common part HW */
4965static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4966{
4967 const struct mbus_dram_target_info *dram_target_info;
4968 int err, i;
4969 u32 val;
4970
4971 /* Checks for hardware constraints (U-Boot uses only one rxq) */
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004972 if ((rxq_number > priv->max_port_rxqs) ||
4973 (txq_number > MVPP2_MAX_TXQ)) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004974 dev_err(&pdev->dev, "invalid queue size parameter\n");
4975 return -EINVAL;
4976 }
4977
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004978 if (priv->hw_version == MVPP22)
4979 mvpp2_axi_init(priv);
Stefan Chulskid4b0e002017-08-09 10:37:48 +03004980 else {
4981 /* MBUS windows configuration */
4982 dram_target_info = mvebu_mbus_dram_info();
4983 if (dram_target_info)
4984 mvpp2_conf_mbus_windows(dram_target_info, priv);
4985 }
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004986
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004987 if (priv->hw_version == MVPP21) {
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004988 /* Disable HW PHY polling */
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004989 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4990 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
4991 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4992 } else {
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004993 /* Enable HW PHY polling */
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004994 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004995 val |= MVPP22_SMI_POLLING_EN;
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004996 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4997 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004998
4999 /* Allocate and initialize aggregated TXQs */
5000 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
5001 sizeof(struct mvpp2_tx_queue),
5002 GFP_KERNEL);
5003 if (!priv->aggr_txqs)
5004 return -ENOMEM;
5005
5006 for_each_present_cpu(i) {
5007 priv->aggr_txqs[i].id = i;
5008 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5009 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5010 MVPP2_AGGR_TXQ_SIZE, i, priv);
5011 if (err < 0)
5012 return err;
5013 }
5014
5015 /* Rx Fifo Init */
5016 mvpp2_rx_fifo_init(priv);
5017
Stefan Roeseff572c62017-03-01 13:09:42 +01005018 /* Tx Fifo Init */
5019 if (priv->hw_version == MVPP22)
5020 mvpp2_tx_fifo_init(priv);
5021
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01005022 if (priv->hw_version == MVPP21)
5023 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5024 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005025
5026 /* Allow cache snoop when transmiting packets */
5027 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5028
5029 /* Buffer Manager initialization */
5030 err = mvpp2_bm_init(dev, priv);
5031 if (err < 0)
5032 return err;
5033
5034 /* Parser default initialization */
5035 err = mvpp2_prs_default_init(dev, priv);
5036 if (err < 0)
5037 return err;
5038
5039 /* Classifier default initialization */
5040 mvpp2_cls_init(priv);
5041
5042 return 0;
5043}
5044
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005045static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5046{
5047 struct mvpp2_port *port = dev_get_priv(dev);
5048 struct mvpp2_rx_desc *rx_desc;
5049 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005050 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005051 u32 bm, rx_status;
5052 int pool, rx_bytes, err;
5053 int rx_received;
5054 struct mvpp2_rx_queue *rxq;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005055 u8 *data;
5056
Nevo Hed2a428702019-08-15 18:08:44 -04005057 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulski13b725f2019-08-15 18:08:41 -04005058 if (!port->phy_dev->link)
5059 return 0;
5060
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005061 /* Process RX packets */
Stefan Chulski16f18d22017-08-09 10:37:49 +03005062 rxq = port->rxqs[0];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005063
5064 /* Get number of received packets and clamp the to-do */
5065 rx_received = mvpp2_rxq_received(port, rxq->id);
5066
5067 /* Return if no packets are received */
5068 if (!rx_received)
5069 return 0;
5070
5071 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005072 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5073 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5074 rx_bytes -= MVPP2_MH_SIZE;
5075 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005076
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005077 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005078 pool = mvpp2_bm_cookie_pool_get(bm);
5079 bm_pool = &port->priv->bm_pools[pool];
5080
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005081 /* In case of an error, release the requested buffer pointer
5082 * to the Buffer Manager. This request process is controlled
5083 * by the hardware, and the information about the buffer is
5084 * comprised by the RX descriptor.
5085 */
5086 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5087 mvpp2_rx_error(port, rx_desc);
5088 /* Return the buffer to the pool */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005089 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005090 return 0;
5091 }
5092
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005093 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005094 if (err) {
5095 netdev_err(port->dev, "failed to refill BM pools\n");
5096 return 0;
5097 }
5098
5099 /* Update Rx queue management counters */
5100 mb();
5101 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5102
5103 /* give packet to stack - skip on first n bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005104 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005105
5106 if (rx_bytes <= 0)
5107 return 0;
5108
5109 /*
5110 * No cache invalidation needed here, since the rx_buffer's are
5111 * located in a uncached memory region
5112 */
5113 *packetp = data;
5114
5115 return rx_bytes;
5116}
5117
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005118static int mvpp2_send(struct udevice *dev, void *packet, int length)
5119{
5120 struct mvpp2_port *port = dev_get_priv(dev);
5121 struct mvpp2_tx_queue *txq, *aggr_txq;
5122 struct mvpp2_tx_desc *tx_desc;
5123 int tx_done;
5124 int timeout;
5125
Nevo Hed2a428702019-08-15 18:08:44 -04005126 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulski13b725f2019-08-15 18:08:41 -04005127 if (!port->phy_dev->link)
5128 return 0;
5129
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005130 txq = port->txqs[0];
5131 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5132
5133 /* Get a descriptor for the first part of the packet */
5134 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005135 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5136 mvpp2_txdesc_size_set(port, tx_desc, length);
5137 mvpp2_txdesc_offset_set(port, tx_desc,
5138 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5139 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5140 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005141 /* First and Last descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005142 mvpp2_txdesc_cmd_set(port, tx_desc,
5143 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5144 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005145
5146 /* Flush tx data */
Stefan Roesef811e042017-02-16 13:58:37 +01005147 flush_dcache_range((unsigned long)packet,
5148 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005149
5150 /* Enable transmit */
5151 mb();
5152 mvpp2_aggr_txq_pend_desc_add(port, 1);
5153
5154 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5155
5156 timeout = 0;
5157 do {
5158 if (timeout++ > 10000) {
5159 printf("timeout: packet not sent from aggregated to phys TXQ\n");
5160 return 0;
5161 }
5162 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5163 } while (tx_done);
5164
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005165 timeout = 0;
5166 do {
5167 if (timeout++ > 10000) {
5168 printf("timeout: packet not sent\n");
5169 return 0;
5170 }
5171 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5172 } while (!tx_done);
5173
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005174 return 0;
5175}
5176
5177static int mvpp2_start(struct udevice *dev)
5178{
5179 struct eth_pdata *pdata = dev_get_platdata(dev);
5180 struct mvpp2_port *port = dev_get_priv(dev);
5181
5182 /* Load current MAC address */
5183 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5184
5185 /* Reconfigure parser accept the original MAC address */
5186 mvpp2_prs_update_mac_da(port, port->dev_addr);
5187
Stefan Chulskie09d0c82017-04-06 15:39:08 +02005188 switch (port->phy_interface) {
5189 case PHY_INTERFACE_MODE_RGMII:
5190 case PHY_INTERFACE_MODE_RGMII_ID:
5191 case PHY_INTERFACE_MODE_SGMII:
5192 mvpp2_port_power_up(port);
5193 default:
5194 break;
5195 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005196
5197 mvpp2_open(dev, port);
5198
5199 return 0;
5200}
5201
5202static void mvpp2_stop(struct udevice *dev)
5203{
5204 struct mvpp2_port *port = dev_get_priv(dev);
5205
5206 mvpp2_stop_dev(port);
5207 mvpp2_cleanup_rxqs(port);
5208 mvpp2_cleanup_txqs(port);
5209}
5210
Matt Pellanda37c0822019-07-30 09:40:24 -04005211static int mvpp2_write_hwaddr(struct udevice *dev)
5212{
5213 struct mvpp2_port *port = dev_get_priv(dev);
5214
5215 return mvpp2_prs_update_mac_da(port, port->dev_addr);
5216}
5217
Stefan Roesefb640722017-03-10 06:07:45 +01005218static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port)
5219{
5220 writel(port->phyaddr, port->priv->iface_base +
5221 MVPP22_SMI_PHY_ADDR_REG(port->gop_id));
5222
5223 return 0;
5224}
5225
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005226static int mvpp2_base_probe(struct udevice *dev)
5227{
5228 struct mvpp2 *priv = dev_get_priv(dev);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005229 void *bd_space;
5230 u32 size = 0;
5231 int i;
5232
Thomas Petazzoni16a98982017-02-15 14:08:59 +01005233 /* Save hw-version */
5234 priv->hw_version = dev_get_driver_data(dev);
5235
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005236 /*
5237 * U-Boot special buffer handling:
5238 *
5239 * Allocate buffer area for descs and rx_buffers. This is only
5240 * done once for all interfaces. As only one interface can
5241 * be active. Make this area DMA-safe by disabling the D-cache
5242 */
5243
5244 /* Align buffer area for descs and rx_buffers to 1MiB */
5245 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005246 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5247 BD_SPACE, DCACHE_OFF);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005248
5249 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5250 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
5251
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005252 buffer_loc.tx_descs =
5253 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005254 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
5255
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005256 buffer_loc.rx_descs =
5257 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005258 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
5259
5260 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005261 buffer_loc.bm_pool[i] =
5262 (unsigned long *)((unsigned long)bd_space + size);
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01005263 if (priv->hw_version == MVPP21)
5264 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5265 else
5266 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005267 }
5268
5269 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005270 buffer_loc.rx_buffer[i] =
5271 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005272 size += RX_BUFFER_SIZE;
5273 }
5274
Stefan Roese30edc372017-02-16 13:29:08 +01005275 /* Clear the complete area so that all descriptors are cleared */
5276 memset(bd_space, 0, size);
5277
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005278 /* Save base addresses for later use */
Simon Glassa821c4a2017-05-17 17:18:05 -06005279 priv->base = (void *)devfdt_get_addr_index(dev, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005280 if (IS_ERR(priv->base))
5281 return PTR_ERR(priv->base);
5282
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005283 if (priv->hw_version == MVPP21) {
Simon Glassa821c4a2017-05-17 17:18:05 -06005284 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1);
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005285 if (IS_ERR(priv->lms_base))
5286 return PTR_ERR(priv->lms_base);
5287 } else {
Simon Glassa821c4a2017-05-17 17:18:05 -06005288 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1);
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005289 if (IS_ERR(priv->iface_base))
5290 return PTR_ERR(priv->iface_base);
Stefan Roese0a61e9a2017-02-16 08:31:32 +01005291
Stefan Roese31aa1e32017-03-22 15:07:30 +01005292 /* Store common base addresses for all ports */
5293 priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5294 priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5295 priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005296 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005297
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01005298 if (priv->hw_version == MVPP21)
5299 priv->max_port_rxqs = 8;
5300 else
5301 priv->max_port_rxqs = 32;
5302
Baruch Siach21586cd2018-11-21 13:05:34 +02005303 return 0;
5304}
5305
5306static int mvpp2_probe(struct udevice *dev)
5307{
5308 struct mvpp2_port *port = dev_get_priv(dev);
5309 struct mvpp2 *priv = dev_get_priv(dev->parent);
Baruch Siach21586cd2018-11-21 13:05:34 +02005310 int err;
5311
5312 /* Only call the probe function for the parent once */
5313 if (!priv->probe_done)
5314 err = mvpp2_base_probe(dev->parent);
5315
Nevo Hed2a428702019-08-15 18:08:44 -04005316 port->priv = priv;
Stefan Roese66b11cc2017-03-22 14:11:16 +01005317
5318 err = phy_info_parse(dev, port);
5319 if (err)
5320 return err;
5321
5322 /*
5323 * We need the port specific io base addresses at this stage, since
5324 * gop_port_init() accesses these registers
5325 */
5326 if (priv->hw_version == MVPP21) {
5327 int priv_common_regs_num = 2;
5328
Simon Glassa821c4a2017-05-17 17:18:05 -06005329 port->base = (void __iomem *)devfdt_get_addr_index(
Stefan Roese66b11cc2017-03-22 14:11:16 +01005330 dev->parent, priv_common_regs_num + port->id);
5331 if (IS_ERR(port->base))
5332 return PTR_ERR(port->base);
5333 } else {
5334 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5335 "gop-port-id", -1);
5336 if (port->id == -1) {
5337 dev_err(&pdev->dev, "missing gop-port-id value\n");
5338 return -EINVAL;
5339 }
5340
5341 port->base = priv->iface_base + MVPP22_PORT_BASE +
5342 port->gop_id * MVPP22_PORT_OFFSET;
Stefan Roese31aa1e32017-03-22 15:07:30 +01005343
Stefan Roesefb640722017-03-10 06:07:45 +01005344 /* Set phy address of the port */
Nevo Hed2a428702019-08-15 18:08:44 -04005345 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulskie09d0c82017-04-06 15:39:08 +02005346 mvpp22_smi_phy_addr_cfg(port);
Stefan Roesefb640722017-03-10 06:07:45 +01005347
Stefan Roese31aa1e32017-03-22 15:07:30 +01005348 /* GoP Init */
5349 gop_port_init(port);
Stefan Roese66b11cc2017-03-22 14:11:16 +01005350 }
5351
Stefan Chulskibb915c82017-08-09 10:37:46 +03005352 if (!priv->probe_done) {
5353 /* Initialize network controller */
5354 err = mvpp2_init(dev, priv);
5355 if (err < 0) {
5356 dev_err(&pdev->dev, "failed to initialize controller\n");
5357 return err;
5358 }
5359 priv->num_ports = 0;
5360 priv->probe_done = 1;
Stefan Roese1fabbd02017-02-16 15:26:06 +01005361 }
5362
Stefan Roese31aa1e32017-03-22 15:07:30 +01005363 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5364 if (err)
5365 return err;
5366
5367 if (priv->hw_version == MVPP22) {
5368 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5369 port->phy_interface);
5370
5371 /* Netcomplex configurations for all ports */
5372 gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5373 gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5374 }
5375
5376 return 0;
Stefan Roese1fabbd02017-02-16 15:26:06 +01005377}
5378
Stefan Roese2f720f12017-03-23 17:01:59 +01005379/*
5380 * Empty BM pool and stop its activity before the OS is started
5381 */
5382static int mvpp2_remove(struct udevice *dev)
5383{
5384 struct mvpp2_port *port = dev_get_priv(dev);
5385 struct mvpp2 *priv = port->priv;
5386 int i;
5387
Stefan Chulskibb915c82017-08-09 10:37:46 +03005388 priv->num_ports--;
5389
5390 if (priv->num_ports)
5391 return 0;
5392
Stefan Roese2f720f12017-03-23 17:01:59 +01005393 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5394 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5395
5396 return 0;
5397}
5398
Stefan Roese1fabbd02017-02-16 15:26:06 +01005399static const struct eth_ops mvpp2_ops = {
5400 .start = mvpp2_start,
5401 .send = mvpp2_send,
5402 .recv = mvpp2_recv,
5403 .stop = mvpp2_stop,
Matt Pellanda37c0822019-07-30 09:40:24 -04005404 .write_hwaddr = mvpp2_write_hwaddr
Stefan Roese1fabbd02017-02-16 15:26:06 +01005405};
5406
5407static struct driver mvpp2_driver = {
5408 .name = "mvpp2",
5409 .id = UCLASS_ETH,
5410 .probe = mvpp2_probe,
Stefan Roese2f720f12017-03-23 17:01:59 +01005411 .remove = mvpp2_remove,
Stefan Roese1fabbd02017-02-16 15:26:06 +01005412 .ops = &mvpp2_ops,
5413 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
5414 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
Stefan Roese2f720f12017-03-23 17:01:59 +01005415 .flags = DM_FLAG_ACTIVE_DMA,
Stefan Roese1fabbd02017-02-16 15:26:06 +01005416};
5417
5418/*
5419 * Use a MISC device to bind the n instances (child nodes) of the
5420 * network base controller in UCLASS_ETH.
5421 */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005422static int mvpp2_base_bind(struct udevice *parent)
5423{
5424 const void *blob = gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -07005425 int node = dev_of_offset(parent);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005426 struct uclass_driver *drv;
5427 struct udevice *dev;
5428 struct eth_pdata *plat;
5429 char *name;
5430 int subnode;
5431 u32 id;
Stefan Roesec9607c92017-02-24 10:12:41 +01005432 int base_id_add;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005433
5434 /* Lookup eth driver */
5435 drv = lists_uclass_lookup(UCLASS_ETH);
5436 if (!drv) {
5437 puts("Cannot find eth driver\n");
5438 return -ENOENT;
5439 }
5440
Stefan Roesec9607c92017-02-24 10:12:41 +01005441 base_id_add = base_id;
5442
Simon Glassdf87e6b2016-10-02 17:59:29 -06005443 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roesec9607c92017-02-24 10:12:41 +01005444 /* Increment base_id for all subnodes, also the disabled ones */
5445 base_id++;
5446
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005447 /* Skip disabled ports */
5448 if (!fdtdec_get_is_enabled(blob, subnode))
5449 continue;
5450
5451 plat = calloc(1, sizeof(*plat));
5452 if (!plat)
5453 return -ENOMEM;
5454
5455 id = fdtdec_get_int(blob, subnode, "port-id", -1);
Stefan Roesec9607c92017-02-24 10:12:41 +01005456 id += base_id_add;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005457
5458 name = calloc(1, 16);
Heinrich Schuchardtb24b1e42018-03-07 03:39:04 +01005459 if (!name) {
5460 free(plat);
5461 return -ENOMEM;
5462 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005463 sprintf(name, "mvpp2-%d", id);
5464
5465 /* Create child device UCLASS_ETH and bind it */
5466 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glasse160f7d2017-01-17 16:52:55 -07005467 dev_set_of_offset(dev, subnode);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005468 }
5469
5470 return 0;
5471}
5472
5473static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni16a98982017-02-15 14:08:59 +01005474 {
5475 .compatible = "marvell,armada-375-pp2",
5476 .data = MVPP21,
5477 },
Thomas Petazzonia83a6412017-02-20 11:54:31 +01005478 {
5479 .compatible = "marvell,armada-7k-pp22",
5480 .data = MVPP22,
5481 },
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005482 { }
5483};
5484
5485U_BOOT_DRIVER(mvpp2_base) = {
5486 .name = "mvpp2_base",
5487 .id = UCLASS_MISC,
5488 .of_match = mvpp2_ids,
5489 .bind = mvpp2_base_bind,
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005490 .priv_auto_alloc_size = sizeof(struct mvpp2),
5491};