blob: fcd24868af48605c25e0ec5bf0fc639e970e6c3c [file] [log] [blame]
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * U-Boot version:
Stefan Roesec9607c92017-02-24 10:12:41 +01009 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010010 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010018#include <dm.h>
19#include <dm/device-internal.h>
Simon Glass336d4612020-02-03 07:36:16 -070020#include <dm/device_compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070021#include <dm/devres.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010022#include <dm/lists.h>
23#include <net.h>
24#include <netdev.h>
25#include <config.h>
26#include <malloc.h>
27#include <asm/io.h>
Simon Glass61b29b82020-02-03 07:36:15 -070028#include <linux/err.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090029#include <linux/errno.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010030#include <phy.h>
31#include <miiphy.h>
32#include <watchdog.h>
33#include <asm/arch/cpu.h>
34#include <asm/arch/soc.h>
35#include <linux/compat.h>
36#include <linux/mbus.h>
Stefan Chulski41893732017-08-09 10:37:43 +030037#include <asm-generic/gpio.h>
Stefan Chulski377883f2017-08-09 10:37:44 +030038#include <fdt_support.h>
Nevo Hed2a428702019-08-15 18:08:44 -040039#include <linux/mdio.h>
Stefan Roese99d4c6d2016-02-10 07:22:10 +010040
41DECLARE_GLOBAL_DATA_PTR;
42
Stefan Roese99d4c6d2016-02-10 07:22:10 +010043#define __verify_pcpu_ptr(ptr) \
44do { \
45 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
46 (void)__vpp_verify; \
47} while (0)
48
49#define VERIFY_PERCPU_PTR(__p) \
50({ \
51 __verify_pcpu_ptr(__p); \
52 (typeof(*(__p)) __kernel __force *)(__p); \
53})
54
55#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
56#define smp_processor_id() 0
57#define num_present_cpus() 1
58#define for_each_present_cpu(cpu) \
59 for ((cpu) = 0; (cpu) < 1; (cpu)++)
60
61#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
62
63#define CONFIG_NR_CPUS 1
Stefan Roese99d4c6d2016-02-10 07:22:10 +010064
65/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
66#define WRAP (2 + ETH_HLEN + 4 + 32)
67#define MTU 1500
68#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
69
Stefan Roese99d4c6d2016-02-10 07:22:10 +010070/* RX Fifo Registers */
71#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
72#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
73#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
74#define MVPP2_RX_FIFO_INIT_REG 0x64
75
76/* RX DMA Top Registers */
77#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
78#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
79#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
80#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
81#define MVPP2_POOL_BUF_SIZE_OFFSET 5
82#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
83#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
84#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
85#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +010086#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
87#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Stefan Roese99d4c6d2016-02-10 07:22:10 +010088#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +010089#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
90#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Stefan Roese99d4c6d2016-02-10 07:22:10 +010091#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
92#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
93#define MVPP2_RXQ_DISABLE_MASK BIT(31)
94
95/* Parser Registers */
96#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
97#define MVPP2_PRS_PORT_LU_MAX 0xf
98#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
99#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
100#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
101#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
102#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
103#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
104#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
105#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
106#define MVPP2_PRS_TCAM_IDX_REG 0x1100
107#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
108#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
109#define MVPP2_PRS_SRAM_IDX_REG 0x1200
110#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
111#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
112#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
113
114/* Classifier Registers */
115#define MVPP2_CLS_MODE_REG 0x1800
116#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
117#define MVPP2_CLS_PORT_WAY_REG 0x1810
118#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
119#define MVPP2_CLS_LKP_INDEX_REG 0x1814
120#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
121#define MVPP2_CLS_LKP_TBL_REG 0x1818
122#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
123#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
124#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
125#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
126#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
127#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
128#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
129#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
130#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
131#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
132#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
133#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
134
135/* Descriptor Manager Top Registers */
136#define MVPP2_RXQ_NUM_REG 0x2040
137#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzoni80350f52017-02-20 11:36:57 +0100138#define MVPP22_DESC_ADDR_OFFS 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100139#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
140#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
141#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
142#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
143#define MVPP2_RXQ_NUM_NEW_OFFSET 16
144#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
145#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
146#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
147#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
148#define MVPP2_RXQ_THRESH_REG 0x204c
149#define MVPP2_OCCUPIED_THRESH_OFFSET 0
150#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
151#define MVPP2_RXQ_INDEX_REG 0x2050
152#define MVPP2_TXQ_NUM_REG 0x2080
153#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
154#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
155#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
156#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
157#define MVPP2_TXQ_THRESH_REG 0x2094
158#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
159#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
160#define MVPP2_TXQ_INDEX_REG 0x2098
161#define MVPP2_TXQ_PREF_BUF_REG 0x209c
162#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
163#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
164#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
165#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
166#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
167#define MVPP2_TXQ_PENDING_REG 0x20a0
168#define MVPP2_TXQ_PENDING_MASK 0x3fff
169#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
170#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
171#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
172#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
173#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
174#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
175#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
176#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
177#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
178#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
179#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzoni80350f52017-02-20 11:36:57 +0100180#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100181#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
182#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
183#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
184#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
185#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
186
187/* MBUS bridge registers */
188#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
189#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
190#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
191#define MVPP2_BASE_ADDR_ENABLE 0x4060
192
Thomas Petazzonicdf77792017-02-16 08:41:07 +0100193/* AXI Bridge Registers */
194#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
195#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
196#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
197#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
198#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
199#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
200#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
201#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
202#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
203#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
204#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
205#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
206
207/* Values for AXI Bridge registers */
208#define MVPP22_AXI_ATTR_CACHE_OFFS 0
209#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
210
211#define MVPP22_AXI_CODE_CACHE_OFFS 0
212#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
213
214#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
215#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
216#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
217
218#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
219#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
220
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100221/* Interrupt Cause and Mask registers */
222#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzonibc0bbf42017-02-16 08:46:37 +0100223#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
224
225#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
226#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
227#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
228#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
229
230#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
231#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
232
233#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
234#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
235#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
236#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
237
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100238#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
239#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
240#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
241#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
242#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
243#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
244#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
245#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
246#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
247#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
248#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
249#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
250#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
251#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
252#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
253#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
254#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
255#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
256
257/* Buffer Manager registers */
258#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
259#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
260#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
261#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
262#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
263#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
264#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
265#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
266#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
267#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
268#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
269#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
270#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
271#define MVPP2_BM_START_MASK BIT(0)
272#define MVPP2_BM_STOP_MASK BIT(1)
273#define MVPP2_BM_STATE_MASK BIT(4)
274#define MVPP2_BM_LOW_THRESH_OFFS 8
275#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
276#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
277 MVPP2_BM_LOW_THRESH_OFFS)
278#define MVPP2_BM_HIGH_THRESH_OFFS 16
279#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
280#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
281 MVPP2_BM_HIGH_THRESH_OFFS)
282#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
283#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
284#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
285#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
286#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
287#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
288#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
289#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
290#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
291#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100292#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444
293#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff
294#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00
295#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100296#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
297#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
298#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
299#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
300#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100301#define MVPP21_BM_MC_RLS_REG 0x64c4
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100302#define MVPP2_BM_MC_ID_MASK 0xfff
303#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
Thomas Petazzonic8feeb22017-02-20 11:29:16 +0100304#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
305#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
306#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
307#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
308#define MVPP22_BM_MC_RLS_REG 0x64d4
Stefan Chulski783e7852017-08-09 10:37:50 +0300309#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310
310#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100311
312/* TX Scheduler registers */
313#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
314#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
315#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
316#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
317#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
318#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
319#define MVPP2_TXP_SCHED_MTU_REG 0x801c
320#define MVPP2_TXP_MTU_MAX 0x7FFFF
321#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
322#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
323#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
324#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
325#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
326#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
327#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
328#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
329#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
330#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
331#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
332#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
333#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
334#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
335
336/* TX general registers */
337#define MVPP2_TX_SNOOP_REG 0x8800
338#define MVPP2_TX_PORT_FLUSH_REG 0x8810
339#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
340
341/* LMS registers */
342#define MVPP2_SRC_ADDR_MIDDLE 0x24
343#define MVPP2_SRC_ADDR_HIGH 0x28
344#define MVPP2_PHY_AN_CFG0_REG 0x34
345#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100346#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni6b28f422017-02-15 12:16:23 +0100347#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100348
349/* Per-port registers */
350#define MVPP2_GMAC_CTRL_0_REG 0x0
351#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100352#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100353#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
354#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
355#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
356#define MVPP2_GMAC_CTRL_1_REG 0x4
357#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
358#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
359#define MVPP2_GMAC_PCS_LB_EN_BIT 6
360#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
361#define MVPP2_GMAC_SA_LOW_OFFS 7
362#define MVPP2_GMAC_CTRL_2_REG 0x8
363#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100364#define MVPP2_GMAC_SGMII_MODE_MASK BIT(0)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100365#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
366#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100367#define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100368#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100369#define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100370#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
371#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
372#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100373#define MVPP2_GMAC_EN_PCS_AN BIT(2)
374#define MVPP2_GMAC_AN_BYPASS_EN BIT(3)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100375#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
376#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
377#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
378#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100379#define MVPP2_GMAC_EN_FC_AN BIT(11)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100380#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
381#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100382#define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100383#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
384#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
385#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
386#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
387 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100388#define MVPP2_GMAC_CTRL_4_REG 0x90
389#define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0)
390#define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5)
391#define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6)
392#define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7)
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100393
Stefan Roese31aa1e32017-03-22 15:07:30 +0100394/*
395 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
396 * relative to port->base.
397 */
398
399/* Port Mac Control0 */
400#define MVPP22_XLG_CTRL0_REG 0x100
401#define MVPP22_XLG_PORT_EN BIT(0)
402#define MVPP22_XLG_MAC_RESETN BIT(1)
403#define MVPP22_XLG_RX_FC_EN BIT(7)
404#define MVPP22_XLG_MIBCNT_DIS BIT(13)
405/* Port Mac Control1 */
406#define MVPP22_XLG_CTRL1_REG 0x104
407#define MVPP22_XLG_MAX_RX_SIZE_OFFS 0
408#define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff
409/* Port Interrupt Mask */
410#define MVPP22_XLG_INTERRUPT_MASK_REG 0x118
411#define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1)
412/* Port Mac Control3 */
413#define MVPP22_XLG_CTRL3_REG 0x11c
414#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
415#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
416#define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13)
417/* Port Mac Control4 */
418#define MVPP22_XLG_CTRL4_REG 0x184
419#define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5)
420#define MVPP22_XLG_FORWARD_PFC_EN BIT(6)
421#define MVPP22_XLG_MODE_DMA_1G BIT(12)
422#define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14)
423
424/* XPCS registers */
425
426/* Global Configuration 0 */
427#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0
428#define MVPP22_XPCS_PCSRESET BIT(0)
429#define MVPP22_XPCS_PCSMODE_OFFS 3
430#define MVPP22_XPCS_PCSMODE_MASK (0x3 << \
431 MVPP22_XPCS_PCSMODE_OFFS)
432#define MVPP22_XPCS_LANEACTIVE_OFFS 5
433#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \
434 MVPP22_XPCS_LANEACTIVE_OFFS)
435
436/* MPCS registers */
437
438#define PCS40G_COMMON_CONTROL 0x14
Stefan Chulskie09d0c82017-04-06 15:39:08 +0200439#define FORWARD_ERROR_CORRECTION_MASK BIT(10)
Stefan Roese31aa1e32017-03-22 15:07:30 +0100440
441#define PCS_CLOCK_RESET 0x14c
442#define TX_SD_CLK_RESET_MASK BIT(0)
443#define RX_SD_CLK_RESET_MASK BIT(1)
444#define MAC_CLK_RESET_MASK BIT(2)
445#define CLK_DIVISION_RATIO_OFFS 4
446#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS)
447#define CLK_DIV_PHASE_SET_MASK BIT(11)
448
449/* System Soft Reset 1 */
450#define GOP_SOFT_RESET_1_REG 0x108
451#define NETC_GOP_SOFT_RESET_OFFS 6
452#define NETC_GOP_SOFT_RESET_MASK (0x1 << \
453 NETC_GOP_SOFT_RESET_OFFS)
454
455/* Ports Control 0 */
456#define NETCOMP_PORTS_CONTROL_0_REG 0x110
457#define NETC_BUS_WIDTH_SELECT_OFFS 1
458#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \
459 NETC_BUS_WIDTH_SELECT_OFFS)
460#define NETC_GIG_RX_DATA_SAMPLE_OFFS 29
461#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \
462 NETC_GIG_RX_DATA_SAMPLE_OFFS)
463#define NETC_CLK_DIV_PHASE_OFFS 31
464#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS)
465/* Ports Control 1 */
466#define NETCOMP_PORTS_CONTROL_1_REG 0x114
467#define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p)
468#define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \
469 NETC_PORTS_ACTIVE_OFFSET(p))
470#define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p)
471#define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \
472 NETC_PORT_GIG_RF_RESET_OFFS(p))
473#define NETCOMP_CONTROL_0_REG 0x120
474#define NETC_GBE_PORT0_SGMII_MODE_OFFS 0
475#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \
476 NETC_GBE_PORT0_SGMII_MODE_OFFS)
477#define NETC_GBE_PORT1_SGMII_MODE_OFFS 1
478#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \
479 NETC_GBE_PORT1_SGMII_MODE_OFFS)
480#define NETC_GBE_PORT1_MII_MODE_OFFS 2
481#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \
482 NETC_GBE_PORT1_MII_MODE_OFFS)
483
484#define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04)
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +0100485#define MVPP22_SMI_POLLING_EN BIT(10)
486
Stefan Roese31aa1e32017-03-22 15:07:30 +0100487#define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \
488 (0x4 * (port)))
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100489
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100490#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
491
492/* Descriptor ring Macros */
493#define MVPP2_QUEUE_NEXT_DESC(q, index) \
494 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
495
Stefan Roese0a61e9a2017-02-16 08:31:32 +0100496/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
497#define MVPP22_SMI 0x1200
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100498
Stefan Roese31aa1e32017-03-22 15:07:30 +0100499/* Additional PPv2.2 offsets */
500#define MVPP22_MPCS 0x007000
501#define MVPP22_XPCS 0x007400
502#define MVPP22_PORT_BASE 0x007e00
503#define MVPP22_PORT_OFFSET 0x001000
504#define MVPP22_RFU1 0x318000
505
506/* Maximum number of ports */
507#define MVPP22_GOP_MAC_NUM 4
508
509/* Sets the field located at the specified in data */
510#define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41
511#define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5
512#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb
513
514/* Net Complex */
515enum mv_netc_topology {
516 MV_NETC_GE_MAC2_SGMII = BIT(0),
517 MV_NETC_GE_MAC3_SGMII = BIT(1),
518 MV_NETC_GE_MAC3_RGMII = BIT(2),
519};
520
521enum mv_netc_phase {
522 MV_NETC_FIRST_PHASE,
523 MV_NETC_SECOND_PHASE,
524};
525
526enum mv_netc_sgmii_xmi_mode {
527 MV_NETC_GBE_SGMII,
528 MV_NETC_GBE_XMII,
529};
530
531enum mv_netc_mii_mode {
532 MV_NETC_GBE_RGMII,
533 MV_NETC_GBE_MII,
534};
535
536enum mv_netc_lanes {
537 MV_NETC_LANE_23,
538 MV_NETC_LANE_45,
539};
540
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100541/* Various constants */
542
543/* Coalescing */
544#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
545#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
546#define MVPP2_RX_COAL_PKTS 32
547#define MVPP2_RX_COAL_USEC 100
548
549/* The two bytes Marvell header. Either contains a special value used
550 * by Marvell switches when a specific hardware mode is enabled (not
551 * supported by this driver) or is filled automatically by zeroes on
552 * the RX side. Those two bytes being at the front of the Ethernet
553 * header, they allow to have the IP header aligned on a 4 bytes
554 * boundary automatically: the hardware skips those two bytes on its
555 * own.
556 */
557#define MVPP2_MH_SIZE 2
558#define MVPP2_ETH_TYPE_LEN 2
559#define MVPP2_PPPOE_HDR_SIZE 8
560#define MVPP2_VLAN_TAG_LEN 4
561
562/* Lbtd 802.3 type */
563#define MVPP2_IP_LBDT_TYPE 0xfffa
564
565#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
566#define MVPP2_TX_CSUM_MAX_SIZE 9800
567
568/* Timeout constants */
569#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
570#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
571
572#define MVPP2_TX_MTU_MAX 0x7ffff
573
574/* Maximum number of T-CONTs of PON port */
575#define MVPP2_MAX_TCONT 16
576
577/* Maximum number of supported ports */
578#define MVPP2_MAX_PORTS 4
579
580/* Maximum number of TXQs used by single port */
581#define MVPP2_MAX_TXQ 8
582
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100583/* Default number of TXQs in use */
584#define MVPP2_DEFAULT_TXQ 1
585
Flavio Suligoidad9af52020-01-29 09:38:56 +0100586/* Default number of RXQs in use */
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100587#define MVPP2_DEFAULT_RXQ 1
588#define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */
589
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100590/* Max number of Rx descriptors */
591#define MVPP2_MAX_RXD 16
592
593/* Max number of Tx descriptors */
594#define MVPP2_MAX_TXD 16
595
596/* Amount of Tx descriptors that can be reserved at once by CPU */
Stefan Chulskif0e970f2017-08-09 10:37:47 +0300597#define MVPP2_CPU_DESC_CHUNK 16
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100598
599/* Max number of Tx descriptors in each aggregated queue */
Stefan Chulskif0e970f2017-08-09 10:37:47 +0300600#define MVPP2_AGGR_TXQ_SIZE 16
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100601
602/* Descriptor aligned size */
603#define MVPP2_DESC_ALIGNED_SIZE 32
604
605/* Descriptor alignment mask */
606#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
607
608/* RX FIFO constants */
Stefan Roeseff572c62017-03-01 13:09:42 +0100609#define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000
610#define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80
611#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000
612#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000
613#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000
614#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200
615#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80
616#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40
617#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
618
619/* TX general registers */
620#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2))
621#define MVPP22_TX_FIFO_SIZE_MASK 0xf
622
623/* TX FIFO constants */
624#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa
625#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100626
627/* RX buffer constants */
628#define MVPP2_SKB_SHINFO_SIZE \
629 0
630
631#define MVPP2_RX_PKT_SIZE(mtu) \
632 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
633 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
634
635#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
636#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
637#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
638 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
639
640#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
641
642/* IPv6 max L3 address size */
643#define MVPP2_MAX_L3_ADDR_SIZE 16
644
645/* Port flags */
646#define MVPP2_F_LOOPBACK BIT(0)
647
648/* Marvell tag types */
649enum mvpp2_tag_type {
650 MVPP2_TAG_TYPE_NONE = 0,
651 MVPP2_TAG_TYPE_MH = 1,
652 MVPP2_TAG_TYPE_DSA = 2,
653 MVPP2_TAG_TYPE_EDSA = 3,
654 MVPP2_TAG_TYPE_VLAN = 4,
655 MVPP2_TAG_TYPE_LAST = 5
656};
657
658/* Parser constants */
659#define MVPP2_PRS_TCAM_SRAM_SIZE 256
660#define MVPP2_PRS_TCAM_WORDS 6
661#define MVPP2_PRS_SRAM_WORDS 4
662#define MVPP2_PRS_FLOW_ID_SIZE 64
663#define MVPP2_PRS_FLOW_ID_MASK 0x3f
664#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
665#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
666#define MVPP2_PRS_IPV4_HEAD 0x40
667#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
668#define MVPP2_PRS_IPV4_MC 0xe0
669#define MVPP2_PRS_IPV4_MC_MASK 0xf0
670#define MVPP2_PRS_IPV4_BC_MASK 0xff
671#define MVPP2_PRS_IPV4_IHL 0x5
672#define MVPP2_PRS_IPV4_IHL_MASK 0xf
673#define MVPP2_PRS_IPV6_MC 0xff
674#define MVPP2_PRS_IPV6_MC_MASK 0xff
675#define MVPP2_PRS_IPV6_HOP_MASK 0xff
676#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
677#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
678#define MVPP2_PRS_DBL_VLANS_MAX 100
679
680/* Tcam structure:
681 * - lookup ID - 4 bits
682 * - port ID - 1 byte
683 * - additional information - 1 byte
684 * - header data - 8 bytes
685 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
686 */
687#define MVPP2_PRS_AI_BITS 8
688#define MVPP2_PRS_PORT_MASK 0xff
689#define MVPP2_PRS_LU_MASK 0xf
690#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
691 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
692#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
693 (((offs) * 2) - ((offs) % 2) + 2)
694#define MVPP2_PRS_TCAM_AI_BYTE 16
695#define MVPP2_PRS_TCAM_PORT_BYTE 17
696#define MVPP2_PRS_TCAM_LU_BYTE 20
697#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
698#define MVPP2_PRS_TCAM_INV_WORD 5
699/* Tcam entries ID */
700#define MVPP2_PE_DROP_ALL 0
701#define MVPP2_PE_FIRST_FREE_TID 1
702#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
703#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
704#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
705#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
706#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
707#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
708#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
709#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
710#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
711#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
712#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
713#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
714#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
715#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
716#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
717#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
718#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
719#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
720#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
721#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
722#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
723#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
724#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
725#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
726#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
727
728/* Sram structure
729 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
730 */
731#define MVPP2_PRS_SRAM_RI_OFFS 0
732#define MVPP2_PRS_SRAM_RI_WORD 0
733#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
734#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
735#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
736#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
737#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
738#define MVPP2_PRS_SRAM_UDF_OFFS 73
739#define MVPP2_PRS_SRAM_UDF_BITS 8
740#define MVPP2_PRS_SRAM_UDF_MASK 0xff
741#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
742#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
743#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
744#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
745#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
746#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
747#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
748#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
749#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
750#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
751#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
752#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
753#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
754#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
755#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
756#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
757#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
758#define MVPP2_PRS_SRAM_AI_OFFS 90
759#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
760#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
761#define MVPP2_PRS_SRAM_AI_MASK 0xff
762#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
763#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
764#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
765#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
766
767/* Sram result info bits assignment */
768#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
769#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100770#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
771#define MVPP2_PRS_RI_VLAN_NONE 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100772#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
773#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
774#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
775#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
776#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100777#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
778#define MVPP2_PRS_RI_L2_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100779#define MVPP2_PRS_RI_L2_MCAST BIT(9)
780#define MVPP2_PRS_RI_L2_BCAST BIT(10)
781#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100782#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
783#define MVPP2_PRS_RI_L3_UN 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100784#define MVPP2_PRS_RI_L3_IP4 BIT(12)
785#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
786#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
787#define MVPP2_PRS_RI_L3_IP6 BIT(14)
788#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
789#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzonic0abc762017-02-15 12:19:36 +0100790#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
791#define MVPP2_PRS_RI_L3_UCAST 0x0
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100792#define MVPP2_PRS_RI_L3_MCAST BIT(15)
793#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
794#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
795#define MVPP2_PRS_RI_UDF3_MASK 0x300000
796#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
797#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
798#define MVPP2_PRS_RI_L4_TCP BIT(22)
799#define MVPP2_PRS_RI_L4_UDP BIT(23)
800#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
801#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
802#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
803#define MVPP2_PRS_RI_DROP_MASK 0x80000000
804
805/* Sram additional info bits assignment */
806#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
807#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
808#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
809#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
810#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
811#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
812#define MVPP2_PRS_SINGLE_VLAN_AI 0
813#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
814
815/* DSA/EDSA type */
816#define MVPP2_PRS_TAGGED true
817#define MVPP2_PRS_UNTAGGED false
818#define MVPP2_PRS_EDSA true
819#define MVPP2_PRS_DSA false
820
821/* MAC entries, shadow udf */
822enum mvpp2_prs_udf {
823 MVPP2_PRS_UDF_MAC_DEF,
824 MVPP2_PRS_UDF_MAC_RANGE,
825 MVPP2_PRS_UDF_L2_DEF,
826 MVPP2_PRS_UDF_L2_DEF_COPY,
827 MVPP2_PRS_UDF_L2_USER,
828};
829
830/* Lookup ID */
831enum mvpp2_prs_lookup {
832 MVPP2_PRS_LU_MH,
833 MVPP2_PRS_LU_MAC,
834 MVPP2_PRS_LU_DSA,
835 MVPP2_PRS_LU_VLAN,
836 MVPP2_PRS_LU_L2,
837 MVPP2_PRS_LU_PPPOE,
838 MVPP2_PRS_LU_IP4,
839 MVPP2_PRS_LU_IP6,
840 MVPP2_PRS_LU_FLOWS,
841 MVPP2_PRS_LU_LAST,
842};
843
844/* L3 cast enum */
845enum mvpp2_prs_l3_cast {
846 MVPP2_PRS_L3_UNI_CAST,
847 MVPP2_PRS_L3_MULTI_CAST,
848 MVPP2_PRS_L3_BROAD_CAST
849};
850
851/* Classifier constants */
852#define MVPP2_CLS_FLOWS_TBL_SIZE 512
853#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
854#define MVPP2_CLS_LKP_TBL_SIZE 64
855
856/* BM constants */
857#define MVPP2_BM_POOLS_NUM 1
858#define MVPP2_BM_LONG_BUF_NUM 16
859#define MVPP2_BM_SHORT_BUF_NUM 16
860#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
861#define MVPP2_BM_POOL_PTR_ALIGN 128
862#define MVPP2_BM_SWF_LONG_POOL(port) 0
863
864/* BM cookie (32 bits) definition */
865#define MVPP2_BM_COOKIE_POOL_OFFS 8
866#define MVPP2_BM_COOKIE_CPU_OFFS 24
867
868/* BM short pool packet size
869 * These value assure that for SWF the total number
870 * of bytes allocated for each buffer will be 512
871 */
872#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
873
874enum mvpp2_bm_type {
875 MVPP2_BM_FREE,
876 MVPP2_BM_SWF_LONG,
877 MVPP2_BM_SWF_SHORT
878};
879
880/* Definitions */
881
882/* Shared Packet Processor resources */
883struct mvpp2 {
884 /* Shared registers' base addresses */
885 void __iomem *base;
886 void __iomem *lms_base;
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100887 void __iomem *iface_base;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100888
Stefan Roese31aa1e32017-03-22 15:07:30 +0100889 void __iomem *mpcs_base;
890 void __iomem *xpcs_base;
891 void __iomem *rfu1_base;
892
893 u32 netc_config;
894
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100895 /* List of pointers to port structures */
896 struct mvpp2_port **port_list;
897
898 /* Aggregated TXQs */
899 struct mvpp2_tx_queue *aggr_txqs;
900
901 /* BM pools */
902 struct mvpp2_bm_pool *bm_pools;
903
904 /* PRS shadow table */
905 struct mvpp2_prs_shadow *prs_shadow;
906 /* PRS auxiliary table for double vlan entries control */
907 bool *prs_double_vlans;
908
909 /* Tclk value */
910 u32 tclk;
911
Thomas Petazzoni16a98982017-02-15 14:08:59 +0100912 /* HW version */
913 enum { MVPP21, MVPP22 } hw_version;
914
Thomas Petazzoni09b3f942017-02-16 09:03:16 +0100915 /* Maximum number of RXQs per port */
916 unsigned int max_port_rxqs;
917
Stefan Roese1fabbd02017-02-16 15:26:06 +0100918 int probe_done;
Stefan Chulskibb915c82017-08-09 10:37:46 +0300919 u8 num_ports;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100920};
921
922struct mvpp2_pcpu_stats {
923 u64 rx_packets;
924 u64 rx_bytes;
925 u64 tx_packets;
926 u64 tx_bytes;
927};
928
929struct mvpp2_port {
930 u8 id;
931
Thomas Petazzoni26a52782017-02-16 08:03:37 +0100932 /* Index of the port from the "group of ports" complex point
933 * of view
934 */
935 int gop_id;
936
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100937 int irq;
938
939 struct mvpp2 *priv;
940
941 /* Per-port registers' base address */
942 void __iomem *base;
943
944 struct mvpp2_rx_queue **rxqs;
945 struct mvpp2_tx_queue **txqs;
946
947 int pkt_size;
948
949 u32 pending_cause_rx;
950
951 /* Per-CPU port control */
952 struct mvpp2_port_pcpu __percpu *pcpu;
953
954 /* Flags */
955 unsigned long flags;
956
957 u16 tx_ring_size;
958 u16 rx_ring_size;
959 struct mvpp2_pcpu_stats __percpu *stats;
960
961 struct phy_device *phy_dev;
962 phy_interface_t phy_interface;
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100963 int phyaddr;
Nevo Hed2a428702019-08-15 18:08:44 -0400964 struct udevice *mdio_dev;
Simon Glassbcee8d62019-12-06 21:41:35 -0700965 struct mii_dev *bus;
966#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +0300967 struct gpio_desc phy_reset_gpio;
968 struct gpio_desc phy_tx_disable_gpio;
969#endif
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100970 int init;
971 unsigned int link;
972 unsigned int duplex;
973 unsigned int speed;
974
Stefan Roese9acb7da2017-03-22 14:15:40 +0100975 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */
976
Stefan Roese99d4c6d2016-02-10 07:22:10 +0100977 struct mvpp2_bm_pool *pool_long;
978 struct mvpp2_bm_pool *pool_short;
979
980 /* Index of first port's physical RXQ */
981 u8 first_rxq;
982
983 u8 dev_addr[ETH_ALEN];
984};
985
986/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
987 * layout of the transmit and reception DMA descriptors, and their
988 * layout is therefore defined by the hardware design
989 */
990
991#define MVPP2_TXD_L3_OFF_SHIFT 0
992#define MVPP2_TXD_IP_HLEN_SHIFT 8
993#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
994#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
995#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
996#define MVPP2_TXD_PADDING_DISABLE BIT(23)
997#define MVPP2_TXD_L4_UDP BIT(24)
998#define MVPP2_TXD_L3_IP6 BIT(26)
999#define MVPP2_TXD_L_DESC BIT(28)
1000#define MVPP2_TXD_F_DESC BIT(29)
1001
1002#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1003#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1004#define MVPP2_RXD_ERR_CRC 0x0
1005#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1006#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1007#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1008#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1009#define MVPP2_RXD_HWF_SYNC BIT(21)
1010#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1011#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1012#define MVPP2_RXD_L4_TCP BIT(25)
1013#define MVPP2_RXD_L4_UDP BIT(26)
1014#define MVPP2_RXD_L3_IP4 BIT(28)
1015#define MVPP2_RXD_L3_IP6 BIT(30)
1016#define MVPP2_RXD_BUF_HDR BIT(31)
1017
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001018/* HW TX descriptor for PPv2.1 */
1019struct mvpp21_tx_desc {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001020 u32 command; /* Options used by HW for packet transmitting.*/
1021 u8 packet_offset; /* the offset from the buffer beginning */
1022 u8 phys_txq; /* destination queue ID */
1023 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001024 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001025 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1026 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1027 u32 reserved2; /* reserved (for future use) */
1028};
1029
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001030/* HW RX descriptor for PPv2.1 */
1031struct mvpp21_rx_desc {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001032 u32 status; /* info about received packet */
1033 u16 reserved1; /* parser_info (for future use, PnC) */
1034 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001035 u32 buf_dma_addr; /* physical address of the buffer */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001036 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1037 u16 reserved2; /* gem_port_id (for future use, PON) */
1038 u16 reserved3; /* csum_l4 (for future use, PnC) */
1039 u8 reserved4; /* bm_qset (for future use, BM) */
1040 u8 reserved5;
1041 u16 reserved6; /* classify_info (for future use, PnC) */
1042 u32 reserved7; /* flow_id (for future use, PnC) */
1043 u32 reserved8;
1044};
1045
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001046/* HW TX descriptor for PPv2.2 */
1047struct mvpp22_tx_desc {
1048 u32 command;
1049 u8 packet_offset;
1050 u8 phys_txq;
1051 u16 data_size;
1052 u64 reserved1;
1053 u64 buf_dma_addr_ptp;
1054 u64 buf_cookie_misc;
1055};
1056
1057/* HW RX descriptor for PPv2.2 */
1058struct mvpp22_rx_desc {
1059 u32 status;
1060 u16 reserved1;
1061 u16 data_size;
1062 u32 reserved2;
1063 u32 reserved3;
1064 u64 buf_dma_addr_key_hash;
1065 u64 buf_cookie_misc;
1066};
1067
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001068/* Opaque type used by the driver to manipulate the HW TX and RX
1069 * descriptors
1070 */
1071struct mvpp2_tx_desc {
1072 union {
1073 struct mvpp21_tx_desc pp21;
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001074 struct mvpp22_tx_desc pp22;
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001075 };
1076};
1077
1078struct mvpp2_rx_desc {
1079 union {
1080 struct mvpp21_rx_desc pp21;
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001081 struct mvpp22_rx_desc pp22;
Thomas Petazzoni9a6db0b2017-02-15 16:25:53 +01001082 };
1083};
1084
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001085/* Per-CPU Tx queue control */
1086struct mvpp2_txq_pcpu {
1087 int cpu;
1088
1089 /* Number of Tx DMA descriptors in the descriptor ring */
1090 int size;
1091
1092 /* Number of currently used Tx DMA descriptor in the
1093 * descriptor ring
1094 */
1095 int count;
1096
1097 /* Number of Tx DMA descriptors reserved for each CPU */
1098 int reserved_num;
1099
1100 /* Index of last TX DMA descriptor that was inserted */
1101 int txq_put_index;
1102
1103 /* Index of the TX DMA descriptor to be cleaned up */
1104 int txq_get_index;
1105};
1106
1107struct mvpp2_tx_queue {
1108 /* Physical number of this Tx queue */
1109 u8 id;
1110
1111 /* Logical number of this Tx queue */
1112 u8 log_id;
1113
1114 /* Number of Tx DMA descriptors in the descriptor ring */
1115 int size;
1116
1117 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1118 int count;
1119
1120 /* Per-CPU control of physical Tx queues */
1121 struct mvpp2_txq_pcpu __percpu *pcpu;
1122
1123 u32 done_pkts_coal;
1124
1125 /* Virtual address of thex Tx DMA descriptors array */
1126 struct mvpp2_tx_desc *descs;
1127
1128 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001129 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001130
1131 /* Index of the last Tx DMA descriptor */
1132 int last_desc;
1133
1134 /* Index of the next Tx DMA descriptor to process */
1135 int next_desc_to_proc;
1136};
1137
1138struct mvpp2_rx_queue {
1139 /* RX queue number, in the range 0-31 for physical RXQs */
1140 u8 id;
1141
1142 /* Num of rx descriptors in the rx descriptor ring */
1143 int size;
1144
1145 u32 pkts_coal;
1146 u32 time_coal;
1147
1148 /* Virtual address of the RX DMA descriptors array */
1149 struct mvpp2_rx_desc *descs;
1150
1151 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001152 dma_addr_t descs_dma;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001153
1154 /* Index of the last RX DMA descriptor */
1155 int last_desc;
1156
1157 /* Index of the next RX DMA descriptor to process */
1158 int next_desc_to_proc;
1159
1160 /* ID of port to which physical RXQ is mapped */
1161 int port;
1162
1163 /* Port's logic RXQ number to which physical RXQ is mapped */
1164 int logic_rxq;
1165};
1166
1167union mvpp2_prs_tcam_entry {
1168 u32 word[MVPP2_PRS_TCAM_WORDS];
1169 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1170};
1171
1172union mvpp2_prs_sram_entry {
1173 u32 word[MVPP2_PRS_SRAM_WORDS];
1174 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1175};
1176
1177struct mvpp2_prs_entry {
1178 u32 index;
1179 union mvpp2_prs_tcam_entry tcam;
1180 union mvpp2_prs_sram_entry sram;
1181};
1182
1183struct mvpp2_prs_shadow {
1184 bool valid;
1185 bool finish;
1186
1187 /* Lookup ID */
1188 int lu;
1189
1190 /* User defined offset */
1191 int udf;
1192
1193 /* Result info */
1194 u32 ri;
1195 u32 ri_mask;
1196};
1197
1198struct mvpp2_cls_flow_entry {
1199 u32 index;
1200 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1201};
1202
1203struct mvpp2_cls_lookup_entry {
1204 u32 lkpid;
1205 u32 way;
1206 u32 data;
1207};
1208
1209struct mvpp2_bm_pool {
1210 /* Pool number in the range 0-7 */
1211 int id;
1212 enum mvpp2_bm_type type;
1213
1214 /* Buffer Pointers Pool External (BPPE) size */
1215 int size;
1216 /* Number of buffers for this pool */
1217 int buf_num;
1218 /* Pool buffer size */
1219 int buf_size;
1220 /* Packet size */
1221 int pkt_size;
1222
1223 /* BPPE virtual base address */
Stefan Roesea7c28ff2017-02-15 12:46:18 +01001224 unsigned long *virt_addr;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01001225 /* BPPE DMA base address */
1226 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001227
1228 /* Ports using BM pool */
1229 u32 port_map;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001230};
1231
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001232/* Static declaractions */
1233
1234/* Number of RXQs used by single port */
1235static int rxq_number = MVPP2_DEFAULT_RXQ;
1236/* Number of TXQs used by single port */
1237static int txq_number = MVPP2_DEFAULT_TXQ;
1238
Stefan Roesec9607c92017-02-24 10:12:41 +01001239static int base_id;
1240
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001241#define MVPP2_DRIVER_NAME "mvpp2"
1242#define MVPP2_DRIVER_VERSION "1.0"
1243
1244/*
1245 * U-Boot internal data, mostly uncached buffers for descriptors and data
1246 */
1247struct buffer_location {
1248 struct mvpp2_tx_desc *aggr_tx_descs;
1249 struct mvpp2_tx_desc *tx_descs;
1250 struct mvpp2_rx_desc *rx_descs;
Stefan Roesea7c28ff2017-02-15 12:46:18 +01001251 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1252 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001253 int first_rxq;
1254};
1255
1256/*
1257 * All 4 interfaces use the same global buffer, since only one interface
1258 * can be enabled at once
1259 */
1260static struct buffer_location buffer_loc;
1261
1262/*
1263 * Page table entries are set to 1MB, or multiples of 1MB
1264 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1265 */
1266#define BD_SPACE (1 << 20)
1267
1268/* Utility/helper methods */
1269
1270static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1271{
1272 writel(data, priv->base + offset);
1273}
1274
1275static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1276{
1277 return readl(priv->base + offset);
1278}
1279
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001280static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1281 struct mvpp2_tx_desc *tx_desc,
1282 dma_addr_t dma_addr)
1283{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001284 if (port->priv->hw_version == MVPP21) {
1285 tx_desc->pp21.buf_dma_addr = dma_addr;
1286 } else {
1287 u64 val = (u64)dma_addr;
1288
1289 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1290 tx_desc->pp22.buf_dma_addr_ptp |= val;
1291 }
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001292}
1293
1294static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1295 struct mvpp2_tx_desc *tx_desc,
1296 size_t size)
1297{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001298 if (port->priv->hw_version == MVPP21)
1299 tx_desc->pp21.data_size = size;
1300 else
1301 tx_desc->pp22.data_size = size;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001302}
1303
1304static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1305 struct mvpp2_tx_desc *tx_desc,
1306 unsigned int txq)
1307{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001308 if (port->priv->hw_version == MVPP21)
1309 tx_desc->pp21.phys_txq = txq;
1310 else
1311 tx_desc->pp22.phys_txq = txq;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001312}
1313
1314static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1315 struct mvpp2_tx_desc *tx_desc,
1316 unsigned int command)
1317{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001318 if (port->priv->hw_version == MVPP21)
1319 tx_desc->pp21.command = command;
1320 else
1321 tx_desc->pp22.command = command;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001322}
1323
1324static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1325 struct mvpp2_tx_desc *tx_desc,
1326 unsigned int offset)
1327{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001328 if (port->priv->hw_version == MVPP21)
1329 tx_desc->pp21.packet_offset = offset;
1330 else
1331 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001332}
1333
1334static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1335 struct mvpp2_rx_desc *rx_desc)
1336{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001337 if (port->priv->hw_version == MVPP21)
1338 return rx_desc->pp21.buf_dma_addr;
1339 else
1340 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001341}
1342
1343static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1344 struct mvpp2_rx_desc *rx_desc)
1345{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001346 if (port->priv->hw_version == MVPP21)
1347 return rx_desc->pp21.buf_cookie;
1348 else
1349 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001350}
1351
1352static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1353 struct mvpp2_rx_desc *rx_desc)
1354{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001355 if (port->priv->hw_version == MVPP21)
1356 return rx_desc->pp21.data_size;
1357 else
1358 return rx_desc->pp22.data_size;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001359}
1360
1361static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1362 struct mvpp2_rx_desc *rx_desc)
1363{
Thomas Petazzonif50a0112017-02-20 11:08:46 +01001364 if (port->priv->hw_version == MVPP21)
1365 return rx_desc->pp21.status;
1366 else
1367 return rx_desc->pp22.status;
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01001368}
1369
Stefan Roese99d4c6d2016-02-10 07:22:10 +01001370static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1371{
1372 txq_pcpu->txq_get_index++;
1373 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1374 txq_pcpu->txq_get_index = 0;
1375}
1376
1377/* Get number of physical egress port */
1378static inline int mvpp2_egress_port(struct mvpp2_port *port)
1379{
1380 return MVPP2_MAX_TCONT + port->id;
1381}
1382
1383/* Get number of physical TXQ */
1384static inline int mvpp2_txq_phys(int port, int txq)
1385{
1386 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1387}
1388
1389/* Parser configuration routines */
1390
1391/* Update parser tcam and sram hw entries */
1392static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1393{
1394 int i;
1395
1396 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1397 return -EINVAL;
1398
1399 /* Clear entry invalidation bit */
1400 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1401
1402 /* Write tcam index - indirect access */
1403 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1404 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1405 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1406
1407 /* Write sram index - indirect access */
1408 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1409 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1410 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1411
1412 return 0;
1413}
1414
1415/* Read tcam entry from hw */
1416static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1417{
1418 int i;
1419
1420 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1421 return -EINVAL;
1422
1423 /* Write tcam index - indirect access */
1424 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1425
1426 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1427 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1428 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1429 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1430
1431 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1432 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1433
1434 /* Write sram index - indirect access */
1435 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1436 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1437 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1438
1439 return 0;
1440}
1441
1442/* Invalidate tcam hw entry */
1443static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1444{
1445 /* Write index - indirect access */
1446 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1447 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1448 MVPP2_PRS_TCAM_INV_MASK);
1449}
1450
1451/* Enable shadow table entry and set its lookup ID */
1452static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1453{
1454 priv->prs_shadow[index].valid = true;
1455 priv->prs_shadow[index].lu = lu;
1456}
1457
1458/* Update ri fields in shadow table entry */
1459static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1460 unsigned int ri, unsigned int ri_mask)
1461{
1462 priv->prs_shadow[index].ri_mask = ri_mask;
1463 priv->prs_shadow[index].ri = ri;
1464}
1465
1466/* Update lookup field in tcam sw entry */
1467static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1468{
1469 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1470
1471 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1472 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1473}
1474
1475/* Update mask for single port in tcam sw entry */
1476static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1477 unsigned int port, bool add)
1478{
1479 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1480
1481 if (add)
1482 pe->tcam.byte[enable_off] &= ~(1 << port);
1483 else
1484 pe->tcam.byte[enable_off] |= 1 << port;
1485}
1486
1487/* Update port map in tcam sw entry */
1488static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1489 unsigned int ports)
1490{
1491 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1492 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1493
1494 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1495 pe->tcam.byte[enable_off] &= ~port_mask;
1496 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1497}
1498
1499/* Obtain port map from tcam sw entry */
1500static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1501{
1502 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1503
1504 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1505}
1506
1507/* Set byte of data and its enable bits in tcam sw entry */
1508static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1509 unsigned int offs, unsigned char byte,
1510 unsigned char enable)
1511{
1512 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1513 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1514}
1515
1516/* Get byte of data and its enable bits from tcam sw entry */
1517static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1518 unsigned int offs, unsigned char *byte,
1519 unsigned char *enable)
1520{
1521 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1522 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1523}
1524
1525/* Set ethertype in tcam sw entry */
1526static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1527 unsigned short ethertype)
1528{
1529 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1530 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1531}
1532
1533/* Set bits in sram sw entry */
1534static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1535 int val)
1536{
1537 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1538}
1539
1540/* Clear bits in sram sw entry */
1541static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1542 int val)
1543{
1544 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1545}
1546
1547/* Update ri bits in sram sw entry */
1548static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1549 unsigned int bits, unsigned int mask)
1550{
1551 unsigned int i;
1552
1553 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1554 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1555
1556 if (!(mask & BIT(i)))
1557 continue;
1558
1559 if (bits & BIT(i))
1560 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1561 else
1562 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1563
1564 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1565 }
1566}
1567
1568/* Update ai bits in sram sw entry */
1569static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1570 unsigned int bits, unsigned int mask)
1571{
1572 unsigned int i;
1573 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1574
1575 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1576
1577 if (!(mask & BIT(i)))
1578 continue;
1579
1580 if (bits & BIT(i))
1581 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1582 else
1583 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1584
1585 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1586 }
1587}
1588
1589/* Read ai bits from sram sw entry */
1590static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1591{
1592 u8 bits;
1593 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1594 int ai_en_off = ai_off + 1;
1595 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1596
1597 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1598 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1599
1600 return bits;
1601}
1602
1603/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1604 * lookup interation
1605 */
1606static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1607 unsigned int lu)
1608{
1609 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1610
1611 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1612 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1613 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1614}
1615
1616/* In the sram sw entry set sign and value of the next lookup offset
1617 * and the offset value generated to the classifier
1618 */
1619static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1620 unsigned int op)
1621{
1622 /* Set sign */
1623 if (shift < 0) {
1624 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1625 shift = 0 - shift;
1626 } else {
1627 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1628 }
1629
1630 /* Set value */
1631 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1632 (unsigned char)shift;
1633
1634 /* Reset and set operation */
1635 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1636 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1637 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1638
1639 /* Set base offset as current */
1640 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1641}
1642
1643/* In the sram sw entry set sign and value of the user defined offset
1644 * generated to the classifier
1645 */
1646static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1647 unsigned int type, int offset,
1648 unsigned int op)
1649{
1650 /* Set sign */
1651 if (offset < 0) {
1652 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1653 offset = 0 - offset;
1654 } else {
1655 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1656 }
1657
1658 /* Set value */
1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1660 MVPP2_PRS_SRAM_UDF_MASK);
1661 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1662 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1663 MVPP2_PRS_SRAM_UDF_BITS)] &=
1664 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1665 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1666 MVPP2_PRS_SRAM_UDF_BITS)] |=
1667 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1668
1669 /* Set offset type */
1670 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1671 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1672 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1673
1674 /* Set offset operation */
1675 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1676 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1677 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1678
1679 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1680 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1681 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1682 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1683
1684 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1685 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1686 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1687
1688 /* Set base offset as current */
1689 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1690}
1691
1692/* Find parser flow entry */
1693static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1694{
1695 struct mvpp2_prs_entry *pe;
1696 int tid;
1697
1698 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1699 if (!pe)
1700 return NULL;
1701 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1702
1703 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1704 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1705 u8 bits;
1706
1707 if (!priv->prs_shadow[tid].valid ||
1708 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1709 continue;
1710
1711 pe->index = tid;
1712 mvpp2_prs_hw_read(priv, pe);
1713 bits = mvpp2_prs_sram_ai_get(pe);
1714
1715 /* Sram store classification lookup ID in AI bits [5:0] */
1716 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1717 return pe;
1718 }
1719 kfree(pe);
1720
1721 return NULL;
1722}
1723
1724/* Return first free tcam index, seeking from start to end */
1725static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1726 unsigned char end)
1727{
1728 int tid;
1729
1730 if (start > end)
1731 swap(start, end);
1732
1733 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1734 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1735
1736 for (tid = start; tid <= end; tid++) {
1737 if (!priv->prs_shadow[tid].valid)
1738 return tid;
1739 }
1740
1741 return -EINVAL;
1742}
1743
1744/* Enable/disable dropping all mac da's */
1745static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1746{
1747 struct mvpp2_prs_entry pe;
1748
1749 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1750 /* Entry exist - update port only */
1751 pe.index = MVPP2_PE_DROP_ALL;
1752 mvpp2_prs_hw_read(priv, &pe);
1753 } else {
1754 /* Entry doesn't exist - create new */
1755 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1756 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1757 pe.index = MVPP2_PE_DROP_ALL;
1758
1759 /* Non-promiscuous mode for all ports - DROP unknown packets */
1760 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1761 MVPP2_PRS_RI_DROP_MASK);
1762
1763 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1764 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1765
1766 /* Update shadow table */
1767 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1768
1769 /* Mask all ports */
1770 mvpp2_prs_tcam_port_map_set(&pe, 0);
1771 }
1772
1773 /* Update port mask */
1774 mvpp2_prs_tcam_port_set(&pe, port, add);
1775
1776 mvpp2_prs_hw_write(priv, &pe);
1777}
1778
1779/* Set port to promiscuous mode */
1780static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1781{
1782 struct mvpp2_prs_entry pe;
1783
1784 /* Promiscuous mode - Accept unknown packets */
1785
1786 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1787 /* Entry exist - update port only */
1788 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1789 mvpp2_prs_hw_read(priv, &pe);
1790 } else {
1791 /* Entry doesn't exist - create new */
1792 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1793 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1794 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1795
1796 /* Continue - set next lookup */
1797 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1798
1799 /* Set result info bits */
1800 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1801 MVPP2_PRS_RI_L2_CAST_MASK);
1802
1803 /* Shift to ethertype */
1804 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1805 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1806
1807 /* Mask all ports */
1808 mvpp2_prs_tcam_port_map_set(&pe, 0);
1809
1810 /* Update shadow table */
1811 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1812 }
1813
1814 /* Update port mask */
1815 mvpp2_prs_tcam_port_set(&pe, port, add);
1816
1817 mvpp2_prs_hw_write(priv, &pe);
1818}
1819
1820/* Accept multicast */
1821static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1822 bool add)
1823{
1824 struct mvpp2_prs_entry pe;
1825 unsigned char da_mc;
1826
1827 /* Ethernet multicast address first byte is
1828 * 0x01 for IPv4 and 0x33 for IPv6
1829 */
1830 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1831
1832 if (priv->prs_shadow[index].valid) {
1833 /* Entry exist - update port only */
1834 pe.index = index;
1835 mvpp2_prs_hw_read(priv, &pe);
1836 } else {
1837 /* Entry doesn't exist - create new */
1838 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1839 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1840 pe.index = index;
1841
1842 /* Continue - set next lookup */
1843 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1844
1845 /* Set result info bits */
1846 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1847 MVPP2_PRS_RI_L2_CAST_MASK);
1848
1849 /* Update tcam entry data first byte */
1850 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1851
1852 /* Shift to ethertype */
1853 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1854 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1855
1856 /* Mask all ports */
1857 mvpp2_prs_tcam_port_map_set(&pe, 0);
1858
1859 /* Update shadow table */
1860 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1861 }
1862
1863 /* Update port mask */
1864 mvpp2_prs_tcam_port_set(&pe, port, add);
1865
1866 mvpp2_prs_hw_write(priv, &pe);
1867}
1868
1869/* Parser per-port initialization */
1870static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1871 int lu_max, int offset)
1872{
1873 u32 val;
1874
1875 /* Set lookup ID */
1876 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1877 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1878 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1879 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1880
1881 /* Set maximum number of loops for packet received from port */
1882 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1883 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1884 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1885 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1886
1887 /* Set initial offset for packet header extraction for the first
1888 * searching loop
1889 */
1890 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1891 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1892 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1893 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1894}
1895
1896/* Default flow entries initialization for all ports */
1897static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1898{
1899 struct mvpp2_prs_entry pe;
1900 int port;
1901
1902 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1903 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1904 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1905 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1906
1907 /* Mask all ports */
1908 mvpp2_prs_tcam_port_map_set(&pe, 0);
1909
1910 /* Set flow ID*/
1911 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1912 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1913
1914 /* Update shadow table and hw entry */
1915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1916 mvpp2_prs_hw_write(priv, &pe);
1917 }
1918}
1919
1920/* Set default entry for Marvell Header field */
1921static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1922{
1923 struct mvpp2_prs_entry pe;
1924
1925 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1926
1927 pe.index = MVPP2_PE_MH_DEFAULT;
1928 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1929 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1930 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1931 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1932
1933 /* Unmask all ports */
1934 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1935
1936 /* Update shadow table and hw entry */
1937 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1938 mvpp2_prs_hw_write(priv, &pe);
1939}
1940
1941/* Set default entires (place holder) for promiscuous, non-promiscuous and
1942 * multicast MAC addresses
1943 */
1944static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1945{
1946 struct mvpp2_prs_entry pe;
1947
1948 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1949
1950 /* Non-promiscuous mode for all ports - DROP unknown packets */
1951 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1952 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1953
1954 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1955 MVPP2_PRS_RI_DROP_MASK);
1956 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1957 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1958
1959 /* Unmask all ports */
1960 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1961
1962 /* Update shadow table and hw entry */
1963 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1964 mvpp2_prs_hw_write(priv, &pe);
1965
1966 /* place holders only - no ports */
1967 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1968 mvpp2_prs_mac_promisc_set(priv, 0, false);
1969 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1970 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1971}
1972
1973/* Match basic ethertypes */
1974static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1975{
1976 struct mvpp2_prs_entry pe;
1977 int tid;
1978
1979 /* Ethertype: PPPoE */
1980 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1981 MVPP2_PE_LAST_FREE_TID);
1982 if (tid < 0)
1983 return tid;
1984
1985 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1986 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1987 pe.index = tid;
1988
1989 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
1990
1991 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1992 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1993 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1994 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1995 MVPP2_PRS_RI_PPPOE_MASK);
1996
1997 /* Update shadow table and hw entry */
1998 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1999 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2000 priv->prs_shadow[pe.index].finish = false;
2001 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2002 MVPP2_PRS_RI_PPPOE_MASK);
2003 mvpp2_prs_hw_write(priv, &pe);
2004
2005 /* Ethertype: ARP */
2006 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2007 MVPP2_PE_LAST_FREE_TID);
2008 if (tid < 0)
2009 return tid;
2010
2011 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2012 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2013 pe.index = tid;
2014
2015 mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2016
2017 /* Generate flow in the next iteration*/
2018 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2019 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2020 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2021 MVPP2_PRS_RI_L3_PROTO_MASK);
2022 /* Set L3 offset */
2023 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2024 MVPP2_ETH_TYPE_LEN,
2025 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2026
2027 /* Update shadow table and hw entry */
2028 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2029 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2030 priv->prs_shadow[pe.index].finish = true;
2031 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2032 MVPP2_PRS_RI_L3_PROTO_MASK);
2033 mvpp2_prs_hw_write(priv, &pe);
2034
2035 /* Ethertype: LBTD */
2036 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2037 MVPP2_PE_LAST_FREE_TID);
2038 if (tid < 0)
2039 return tid;
2040
2041 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2042 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2043 pe.index = tid;
2044
2045 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2046
2047 /* Generate flow in the next iteration*/
2048 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2049 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2050 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2051 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2052 MVPP2_PRS_RI_CPU_CODE_MASK |
2053 MVPP2_PRS_RI_UDF3_MASK);
2054 /* Set L3 offset */
2055 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2056 MVPP2_ETH_TYPE_LEN,
2057 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2058
2059 /* Update shadow table and hw entry */
2060 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2061 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2062 priv->prs_shadow[pe.index].finish = true;
2063 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2064 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2065 MVPP2_PRS_RI_CPU_CODE_MASK |
2066 MVPP2_PRS_RI_UDF3_MASK);
2067 mvpp2_prs_hw_write(priv, &pe);
2068
2069 /* Ethertype: IPv4 without options */
2070 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2071 MVPP2_PE_LAST_FREE_TID);
2072 if (tid < 0)
2073 return tid;
2074
2075 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2077 pe.index = tid;
2078
2079 mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2080 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2081 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2082 MVPP2_PRS_IPV4_HEAD_MASK |
2083 MVPP2_PRS_IPV4_IHL_MASK);
2084
2085 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2086 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2087 MVPP2_PRS_RI_L3_PROTO_MASK);
2088 /* Skip eth_type + 4 bytes of IP header */
2089 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2090 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2091 /* Set L3 offset */
2092 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2093 MVPP2_ETH_TYPE_LEN,
2094 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2095
2096 /* Update shadow table and hw entry */
2097 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2098 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2099 priv->prs_shadow[pe.index].finish = false;
2100 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2101 MVPP2_PRS_RI_L3_PROTO_MASK);
2102 mvpp2_prs_hw_write(priv, &pe);
2103
2104 /* Ethertype: IPv4 with options */
2105 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2106 MVPP2_PE_LAST_FREE_TID);
2107 if (tid < 0)
2108 return tid;
2109
2110 pe.index = tid;
2111
2112 /* Clear tcam data before updating */
2113 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2114 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2115
2116 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2117 MVPP2_PRS_IPV4_HEAD,
2118 MVPP2_PRS_IPV4_HEAD_MASK);
2119
2120 /* Clear ri before updating */
2121 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2122 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2123 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2124 MVPP2_PRS_RI_L3_PROTO_MASK);
2125
2126 /* Update shadow table and hw entry */
2127 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2128 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2129 priv->prs_shadow[pe.index].finish = false;
2130 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2131 MVPP2_PRS_RI_L3_PROTO_MASK);
2132 mvpp2_prs_hw_write(priv, &pe);
2133
2134 /* Ethertype: IPv6 without options */
2135 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2136 MVPP2_PE_LAST_FREE_TID);
2137 if (tid < 0)
2138 return tid;
2139
2140 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2141 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2142 pe.index = tid;
2143
2144 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2145
2146 /* Skip DIP of IPV6 header */
2147 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2148 MVPP2_MAX_L3_ADDR_SIZE,
2149 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2150 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2151 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2152 MVPP2_PRS_RI_L3_PROTO_MASK);
2153 /* Set L3 offset */
2154 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2155 MVPP2_ETH_TYPE_LEN,
2156 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2157
2158 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2159 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2160 priv->prs_shadow[pe.index].finish = false;
2161 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2162 MVPP2_PRS_RI_L3_PROTO_MASK);
2163 mvpp2_prs_hw_write(priv, &pe);
2164
2165 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2166 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2167 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2168 pe.index = MVPP2_PE_ETH_TYPE_UN;
2169
2170 /* Unmask all ports */
2171 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2172
2173 /* Generate flow in the next iteration*/
2174 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2176 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2177 MVPP2_PRS_RI_L3_PROTO_MASK);
2178 /* Set L3 offset even it's unknown L3 */
2179 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2180 MVPP2_ETH_TYPE_LEN,
2181 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2182
2183 /* Update shadow table and hw entry */
2184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2185 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2186 priv->prs_shadow[pe.index].finish = true;
2187 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2188 MVPP2_PRS_RI_L3_PROTO_MASK);
2189 mvpp2_prs_hw_write(priv, &pe);
2190
2191 return 0;
2192}
2193
2194/* Parser default initialization */
2195static int mvpp2_prs_default_init(struct udevice *dev,
2196 struct mvpp2 *priv)
2197{
2198 int err, index, i;
2199
2200 /* Enable tcam table */
2201 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2202
2203 /* Clear all tcam and sram entries */
2204 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2205 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2206 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2207 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2208
2209 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2210 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2211 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2212 }
2213
2214 /* Invalidate all tcam entries */
2215 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2216 mvpp2_prs_hw_inv(priv, index);
2217
2218 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2219 sizeof(struct mvpp2_prs_shadow),
2220 GFP_KERNEL);
2221 if (!priv->prs_shadow)
2222 return -ENOMEM;
2223
2224 /* Always start from lookup = 0 */
2225 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2226 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2227 MVPP2_PRS_PORT_LU_MAX, 0);
2228
2229 mvpp2_prs_def_flow_init(priv);
2230
2231 mvpp2_prs_mh_init(priv);
2232
2233 mvpp2_prs_mac_init(priv);
2234
2235 err = mvpp2_prs_etype_init(priv);
2236 if (err)
2237 return err;
2238
2239 return 0;
2240}
2241
2242/* Compare MAC DA with tcam entry data */
2243static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2244 const u8 *da, unsigned char *mask)
2245{
2246 unsigned char tcam_byte, tcam_mask;
2247 int index;
2248
2249 for (index = 0; index < ETH_ALEN; index++) {
2250 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2251 if (tcam_mask != mask[index])
2252 return false;
2253
2254 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2255 return false;
2256 }
2257
2258 return true;
2259}
2260
2261/* Find tcam entry with matched pair <MAC DA, port> */
2262static struct mvpp2_prs_entry *
2263mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2264 unsigned char *mask, int udf_type)
2265{
2266 struct mvpp2_prs_entry *pe;
2267 int tid;
2268
2269 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2270 if (!pe)
2271 return NULL;
2272 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2273
2274 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2275 for (tid = MVPP2_PE_FIRST_FREE_TID;
2276 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2277 unsigned int entry_pmap;
2278
2279 if (!priv->prs_shadow[tid].valid ||
2280 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2281 (priv->prs_shadow[tid].udf != udf_type))
2282 continue;
2283
2284 pe->index = tid;
2285 mvpp2_prs_hw_read(priv, pe);
2286 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2287
2288 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2289 entry_pmap == pmap)
2290 return pe;
2291 }
2292 kfree(pe);
2293
2294 return NULL;
2295}
2296
2297/* Update parser's mac da entry */
2298static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2299 const u8 *da, bool add)
2300{
2301 struct mvpp2_prs_entry *pe;
2302 unsigned int pmap, len, ri;
2303 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2304 int tid;
2305
2306 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2307 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2308 MVPP2_PRS_UDF_MAC_DEF);
2309
2310 /* No such entry */
2311 if (!pe) {
2312 if (!add)
2313 return 0;
2314
2315 /* Create new TCAM entry */
2316 /* Find first range mac entry*/
2317 for (tid = MVPP2_PE_FIRST_FREE_TID;
2318 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2319 if (priv->prs_shadow[tid].valid &&
2320 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2321 (priv->prs_shadow[tid].udf ==
2322 MVPP2_PRS_UDF_MAC_RANGE))
2323 break;
2324
2325 /* Go through the all entries from first to last */
2326 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2327 tid - 1);
2328 if (tid < 0)
2329 return tid;
2330
2331 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2332 if (!pe)
2333 return -1;
2334 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2335 pe->index = tid;
2336
2337 /* Mask all ports */
2338 mvpp2_prs_tcam_port_map_set(pe, 0);
2339 }
2340
2341 /* Update port mask */
2342 mvpp2_prs_tcam_port_set(pe, port, add);
2343
2344 /* Invalidate the entry if no ports are left enabled */
2345 pmap = mvpp2_prs_tcam_port_map_get(pe);
2346 if (pmap == 0) {
2347 if (add) {
2348 kfree(pe);
2349 return -1;
2350 }
2351 mvpp2_prs_hw_inv(priv, pe->index);
2352 priv->prs_shadow[pe->index].valid = false;
2353 kfree(pe);
2354 return 0;
2355 }
2356
2357 /* Continue - set next lookup */
2358 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2359
2360 /* Set match on DA */
2361 len = ETH_ALEN;
2362 while (len--)
2363 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2364
2365 /* Set result info bits */
2366 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2367
2368 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2369 MVPP2_PRS_RI_MAC_ME_MASK);
2370 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2371 MVPP2_PRS_RI_MAC_ME_MASK);
2372
2373 /* Shift to ethertype */
2374 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2375 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2376
2377 /* Update shadow table and hw entry */
2378 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2379 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2380 mvpp2_prs_hw_write(priv, pe);
2381
2382 kfree(pe);
2383
2384 return 0;
2385}
2386
2387static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2388{
2389 int err;
2390
2391 /* Remove old parser entry */
2392 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2393 false);
2394 if (err)
2395 return err;
2396
2397 /* Add new parser entry */
2398 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2399 if (err)
2400 return err;
2401
2402 /* Set addr in the device */
2403 memcpy(port->dev_addr, da, ETH_ALEN);
2404
2405 return 0;
2406}
2407
2408/* Set prs flow for the port */
2409static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2410{
2411 struct mvpp2_prs_entry *pe;
2412 int tid;
2413
2414 pe = mvpp2_prs_flow_find(port->priv, port->id);
2415
2416 /* Such entry not exist */
2417 if (!pe) {
2418 /* Go through the all entires from last to first */
2419 tid = mvpp2_prs_tcam_first_free(port->priv,
2420 MVPP2_PE_LAST_FREE_TID,
2421 MVPP2_PE_FIRST_FREE_TID);
2422 if (tid < 0)
2423 return tid;
2424
2425 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2426 if (!pe)
2427 return -ENOMEM;
2428
2429 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2430 pe->index = tid;
2431
2432 /* Set flow ID*/
2433 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2434 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2435
2436 /* Update shadow table */
2437 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2438 }
2439
2440 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2441 mvpp2_prs_hw_write(port->priv, pe);
2442 kfree(pe);
2443
2444 return 0;
2445}
2446
2447/* Classifier configuration routines */
2448
2449/* Update classification flow table registers */
2450static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2451 struct mvpp2_cls_flow_entry *fe)
2452{
2453 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2454 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
2455 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
2456 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
2457}
2458
2459/* Update classification lookup table register */
2460static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2461 struct mvpp2_cls_lookup_entry *le)
2462{
2463 u32 val;
2464
2465 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2466 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2467 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2468}
2469
2470/* Classifier default initialization */
2471static void mvpp2_cls_init(struct mvpp2 *priv)
2472{
2473 struct mvpp2_cls_lookup_entry le;
2474 struct mvpp2_cls_flow_entry fe;
2475 int index;
2476
2477 /* Enable classifier */
2478 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2479
2480 /* Clear classifier flow table */
2481 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2482 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2483 fe.index = index;
2484 mvpp2_cls_flow_write(priv, &fe);
2485 }
2486
2487 /* Clear classifier lookup table */
2488 le.data = 0;
2489 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2490 le.lkpid = index;
2491 le.way = 0;
2492 mvpp2_cls_lookup_write(priv, &le);
2493
2494 le.way = 1;
2495 mvpp2_cls_lookup_write(priv, &le);
2496 }
2497}
2498
2499static void mvpp2_cls_port_config(struct mvpp2_port *port)
2500{
2501 struct mvpp2_cls_lookup_entry le;
2502 u32 val;
2503
2504 /* Set way for the port */
2505 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2506 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2507 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2508
2509 /* Pick the entry to be accessed in lookup ID decoding table
2510 * according to the way and lkpid.
2511 */
2512 le.lkpid = port->id;
2513 le.way = 0;
2514 le.data = 0;
2515
2516 /* Set initial CPU queue for receiving packets */
2517 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2518 le.data |= port->first_rxq;
2519
2520 /* Disable classification engines */
2521 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2522
2523 /* Update lookup ID table entry */
2524 mvpp2_cls_lookup_write(port->priv, &le);
2525}
2526
2527/* Set CPU queue number for oversize packets */
2528static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2529{
2530 u32 val;
2531
2532 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2533 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2534
2535 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2536 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2537
2538 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2539 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2540 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2541}
2542
2543/* Buffer Manager configuration routines */
2544
2545/* Create pool */
2546static int mvpp2_bm_pool_create(struct udevice *dev,
2547 struct mvpp2 *priv,
2548 struct mvpp2_bm_pool *bm_pool, int size)
2549{
2550 u32 val;
2551
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002552 /* Number of buffer pointers must be a multiple of 16, as per
2553 * hardware constraints
2554 */
2555 if (!IS_ALIGNED(size, 16))
2556 return -EINVAL;
2557
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002558 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002559 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002560 if (!bm_pool->virt_addr)
2561 return -ENOMEM;
2562
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002563 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2564 MVPP2_BM_POOL_PTR_ALIGN)) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002565 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2566 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2567 return -ENOMEM;
2568 }
2569
2570 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002571 lower_32_bits(bm_pool->dma_addr));
Stefan Chulski783e7852017-08-09 10:37:50 +03002572 if (priv->hw_version == MVPP22)
2573 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2574 (upper_32_bits(bm_pool->dma_addr) &
2575 MVPP22_BM_POOL_BASE_HIGH_MASK));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002576 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2577
2578 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2579 val |= MVPP2_BM_START_MASK;
2580 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2581
2582 bm_pool->type = MVPP2_BM_FREE;
2583 bm_pool->size = size;
2584 bm_pool->pkt_size = 0;
2585 bm_pool->buf_num = 0;
2586
2587 return 0;
2588}
2589
2590/* Set pool buffer size */
2591static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2592 struct mvpp2_bm_pool *bm_pool,
2593 int buf_size)
2594{
2595 u32 val;
2596
2597 bm_pool->buf_size = buf_size;
2598
2599 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2600 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2601}
2602
2603/* Free all buffers from the pool */
2604static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2605 struct mvpp2_bm_pool *bm_pool)
2606{
Stefan Roese2f720f12017-03-23 17:01:59 +01002607 int i;
2608
2609 for (i = 0; i < bm_pool->buf_num; i++) {
2610 /* Allocate buffer back from the buffer manager */
2611 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2612 }
2613
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002614 bm_pool->buf_num = 0;
2615}
2616
2617/* Cleanup pool */
2618static int mvpp2_bm_pool_destroy(struct udevice *dev,
2619 struct mvpp2 *priv,
2620 struct mvpp2_bm_pool *bm_pool)
2621{
2622 u32 val;
2623
2624 mvpp2_bm_bufs_free(dev, priv, bm_pool);
2625 if (bm_pool->buf_num) {
2626 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2627 return 0;
2628 }
2629
2630 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2631 val |= MVPP2_BM_STOP_MASK;
2632 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2633
2634 return 0;
2635}
2636
2637static int mvpp2_bm_pools_init(struct udevice *dev,
2638 struct mvpp2 *priv)
2639{
2640 int i, err, size;
2641 struct mvpp2_bm_pool *bm_pool;
2642
2643 /* Create all pools with maximum size */
2644 size = MVPP2_BM_POOL_SIZE_MAX;
2645 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2646 bm_pool = &priv->bm_pools[i];
2647 bm_pool->id = i;
2648 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2649 if (err)
2650 goto err_unroll_pools;
Stefan Chulskiceec6c42017-08-09 10:37:52 +03002651 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002652 }
2653 return 0;
2654
2655err_unroll_pools:
2656 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2657 for (i = i - 1; i >= 0; i--)
2658 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2659 return err;
2660}
2661
2662static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2663{
2664 int i, err;
2665
2666 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2667 /* Mask BM all interrupts */
2668 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2669 /* Clear BM cause register */
2670 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2671 }
2672
2673 /* Allocate and initialize BM pools */
2674 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2675 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2676 if (!priv->bm_pools)
2677 return -ENOMEM;
2678
2679 err = mvpp2_bm_pools_init(dev, priv);
2680 if (err < 0)
2681 return err;
2682 return 0;
2683}
2684
2685/* Attach long pool to rxq */
2686static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2687 int lrxq, int long_pool)
2688{
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002689 u32 val, mask;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002690 int prxq;
2691
2692 /* Get queue physical ID */
2693 prxq = port->rxqs[lrxq]->id;
2694
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002695 if (port->priv->hw_version == MVPP21)
2696 mask = MVPP21_RXQ_POOL_LONG_MASK;
2697 else
2698 mask = MVPP22_RXQ_POOL_LONG_MASK;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002699
Thomas Petazzoni8f3e4c32017-02-16 06:53:51 +01002700 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2701 val &= ~mask;
2702 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002703 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2704}
2705
2706/* Set pool number in a BM cookie */
2707static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2708{
2709 u32 bm;
2710
2711 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2712 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2713
2714 return bm;
2715}
2716
2717/* Get pool number from a BM cookie */
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002718static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002719{
2720 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2721}
2722
2723/* Release buffer to BM */
2724static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002725 dma_addr_t buf_dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002726 unsigned long buf_phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002727{
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01002728 if (port->priv->hw_version == MVPP22) {
2729 u32 val = 0;
2730
2731 if (sizeof(dma_addr_t) == 8)
2732 val |= upper_32_bits(buf_dma_addr) &
2733 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2734
2735 if (sizeof(phys_addr_t) == 8)
2736 val |= (upper_32_bits(buf_phys_addr)
2737 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2738 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2739
2740 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2741 }
2742
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002743 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2744 * returned in the "cookie" field of the RX
2745 * descriptor. Instead of storing the virtual address, we
2746 * store the physical address
2747 */
2748 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002749 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002750}
2751
2752/* Refill BM pool */
2753static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01002754 dma_addr_t dma_addr,
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002755 phys_addr_t phys_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002756{
2757 int pool = mvpp2_bm_cookie_pool_get(bm);
2758
Thomas Petazzonicd9ee192017-02-20 10:37:59 +01002759 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002760}
2761
2762/* Allocate buffers for the pool */
2763static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2764 struct mvpp2_bm_pool *bm_pool, int buf_num)
2765{
2766 int i;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002767
2768 if (buf_num < 0 ||
2769 (buf_num + bm_pool->buf_num > bm_pool->size)) {
2770 netdev_err(port->dev,
2771 "cannot allocate %d buffers for pool %d\n",
2772 buf_num, bm_pool->id);
2773 return 0;
2774 }
2775
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002776 for (i = 0; i < buf_num; i++) {
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002777 mvpp2_bm_pool_put(port, bm_pool->id,
Thomas Petazzonid1d075a2017-02-15 12:31:53 +01002778 (dma_addr_t)buffer_loc.rx_buffer[i],
2779 (unsigned long)buffer_loc.rx_buffer[i]);
Thomas Petazzonif1060f02017-02-15 12:13:43 +01002780
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002781 }
2782
2783 /* Update BM driver with number of buffers added to pool */
2784 bm_pool->buf_num += i;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002785
2786 return i;
2787}
2788
2789/* Notify the driver that BM pool is being used as specific type and return the
2790 * pool pointer on success
2791 */
2792static struct mvpp2_bm_pool *
2793mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2794 int pkt_size)
2795{
2796 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2797 int num;
2798
2799 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2800 netdev_err(port->dev, "mixing pool types is forbidden\n");
2801 return NULL;
2802 }
2803
2804 if (new_pool->type == MVPP2_BM_FREE)
2805 new_pool->type = type;
2806
2807 /* Allocate buffers in case BM pool is used as long pool, but packet
2808 * size doesn't match MTU or BM pool hasn't being used yet
2809 */
2810 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2811 (new_pool->pkt_size == 0)) {
2812 int pkts_num;
2813
2814 /* Set default buffer number or free all the buffers in case
2815 * the pool is not empty
2816 */
2817 pkts_num = new_pool->buf_num;
2818 if (pkts_num == 0)
2819 pkts_num = type == MVPP2_BM_SWF_LONG ?
2820 MVPP2_BM_LONG_BUF_NUM :
2821 MVPP2_BM_SHORT_BUF_NUM;
2822 else
2823 mvpp2_bm_bufs_free(NULL,
2824 port->priv, new_pool);
2825
2826 new_pool->pkt_size = pkt_size;
2827
2828 /* Allocate buffers for this pool */
2829 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2830 if (num != pkts_num) {
2831 dev_err(dev, "pool %d: %d of %d allocated\n",
2832 new_pool->id, num, pkts_num);
2833 return NULL;
2834 }
2835 }
2836
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002837 return new_pool;
2838}
2839
2840/* Initialize pools for swf */
2841static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2842{
2843 int rxq;
2844
2845 if (!port->pool_long) {
2846 port->pool_long =
2847 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2848 MVPP2_BM_SWF_LONG,
2849 port->pkt_size);
2850 if (!port->pool_long)
2851 return -ENOMEM;
2852
2853 port->pool_long->port_map |= (1 << port->id);
2854
2855 for (rxq = 0; rxq < rxq_number; rxq++)
2856 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2857 }
2858
2859 return 0;
2860}
2861
2862/* Port configuration routines */
2863
2864static void mvpp2_port_mii_set(struct mvpp2_port *port)
2865{
2866 u32 val;
2867
2868 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2869
2870 switch (port->phy_interface) {
2871 case PHY_INTERFACE_MODE_SGMII:
2872 val |= MVPP2_GMAC_INBAND_AN_MASK;
2873 break;
2874 case PHY_INTERFACE_MODE_RGMII:
Stefan Roese025e5922017-03-22 15:11:00 +01002875 case PHY_INTERFACE_MODE_RGMII_ID:
Stefan Roese99d4c6d2016-02-10 07:22:10 +01002876 val |= MVPP2_GMAC_PORT_RGMII_MASK;
2877 default:
2878 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2879 }
2880
2881 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2882}
2883
2884static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2885{
2886 u32 val;
2887
2888 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2889 val |= MVPP2_GMAC_FC_ADV_EN;
2890 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2891}
2892
2893static void mvpp2_port_enable(struct mvpp2_port *port)
2894{
2895 u32 val;
2896
2897 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2898 val |= MVPP2_GMAC_PORT_EN_MASK;
2899 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2900 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2901}
2902
2903static void mvpp2_port_disable(struct mvpp2_port *port)
2904{
2905 u32 val;
2906
2907 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2908 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2909 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2910}
2911
2912/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
2913static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2914{
2915 u32 val;
2916
2917 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2918 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2919 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2920}
2921
2922/* Configure loopback port */
2923static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2924{
2925 u32 val;
2926
2927 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2928
2929 if (port->speed == 1000)
2930 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2931 else
2932 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2933
2934 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2935 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2936 else
2937 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2938
2939 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2940}
2941
2942static void mvpp2_port_reset(struct mvpp2_port *port)
2943{
2944 u32 val;
2945
2946 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2947 ~MVPP2_GMAC_PORT_RESET_MASK;
2948 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2949
2950 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2951 MVPP2_GMAC_PORT_RESET_MASK)
2952 continue;
2953}
2954
2955/* Change maximum receive size of the port */
2956static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2957{
2958 u32 val;
2959
2960 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2961 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2962 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2963 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2964 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2965}
2966
Stefan Roese31aa1e32017-03-22 15:07:30 +01002967/* PPv2.2 GoP/GMAC config */
2968
2969/* Set the MAC to reset or exit from reset */
2970static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2971{
2972 u32 val;
2973
2974 /* read - modify - write */
2975 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2976 if (reset)
2977 val |= MVPP2_GMAC_PORT_RESET_MASK;
2978 else
2979 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2980 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2981
2982 return 0;
2983}
2984
2985/*
2986 * gop_gpcs_mode_cfg
2987 *
2988 * Configure port to working with Gig PCS or don't.
2989 */
2990static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
2991{
2992 u32 val;
2993
2994 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2995 if (en)
2996 val |= MVPP2_GMAC_PCS_ENABLE_MASK;
2997 else
2998 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2999 /* enable / disable PCS on this port */
3000 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3001
3002 return 0;
3003}
3004
3005static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3006{
3007 u32 val;
3008
3009 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3010 if (en)
3011 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3012 else
3013 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3014 /* enable / disable PCS on this port */
3015 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3016
3017 return 0;
3018}
3019
3020static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port)
3021{
3022 u32 val, thresh;
3023
3024 /*
3025 * Configure minimal level of the Tx FIFO before the lower part
3026 * starts to read a packet
3027 */
3028 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3029 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3030 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3031 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3032 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3033
3034 /* Disable bypass of sync module */
3035 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3036 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3037 /* configure DP clock select according to mode */
3038 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3039 /* configure QSGMII bypass according to mode */
3040 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3041 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3042
Stefan Roese31aa1e32017-03-22 15:07:30 +01003043 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3044 /*
3045 * Configure GIG MAC to 1000Base-X mode connected to a fiber
3046 * transceiver
3047 */
3048 val |= MVPP2_GMAC_PORT_TYPE_MASK;
3049 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3050
3051 /* configure AN 0x9268 */
3052 val = MVPP2_GMAC_EN_PCS_AN |
3053 MVPP2_GMAC_AN_BYPASS_EN |
3054 MVPP2_GMAC_CONFIG_MII_SPEED |
3055 MVPP2_GMAC_CONFIG_GMII_SPEED |
3056 MVPP2_GMAC_FC_ADV_EN |
3057 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3058 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3059 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3060}
3061
3062static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3063{
3064 u32 val, thresh;
3065
3066 /*
3067 * Configure minimal level of the Tx FIFO before the lower part
3068 * starts to read a packet
3069 */
3070 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3071 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3072 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3073 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3074 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3075
3076 /* Disable bypass of sync module */
3077 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3078 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3079 /* configure DP clock select according to mode */
3080 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3081 /* configure QSGMII bypass according to mode */
3082 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3083 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3084
Stefan Roese31aa1e32017-03-22 15:07:30 +01003085 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3086 /* configure GIG MAC to SGMII mode */
3087 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3088 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3089
3090 /* configure AN */
3091 val = MVPP2_GMAC_EN_PCS_AN |
3092 MVPP2_GMAC_AN_BYPASS_EN |
3093 MVPP2_GMAC_AN_SPEED_EN |
3094 MVPP2_GMAC_EN_FC_AN |
3095 MVPP2_GMAC_AN_DUPLEX_EN |
3096 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3097 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3098}
3099
3100static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3101{
3102 u32 val, thresh;
3103
3104 /*
3105 * Configure minimal level of the Tx FIFO before the lower part
3106 * starts to read a packet
3107 */
3108 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3109 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3110 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3111 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3112 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3113
3114 /* Disable bypass of sync module */
3115 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3116 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3117 /* configure DP clock select according to mode */
3118 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3119 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3120 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3121 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3122
Stefan Roese31aa1e32017-03-22 15:07:30 +01003123 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3124 /* configure GIG MAC to SGMII mode */
3125 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3126 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3127
3128 /* configure AN 0xb8e8 */
3129 val = MVPP2_GMAC_AN_BYPASS_EN |
3130 MVPP2_GMAC_AN_SPEED_EN |
3131 MVPP2_GMAC_EN_FC_AN |
3132 MVPP2_GMAC_AN_DUPLEX_EN |
3133 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3134 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3135}
3136
3137/* Set the internal mux's to the required MAC in the GOP */
3138static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3139{
3140 u32 val;
3141
3142 /* Set TX FIFO thresholds */
3143 switch (port->phy_interface) {
3144 case PHY_INTERFACE_MODE_SGMII:
3145 if (port->phy_speed == 2500)
3146 gop_gmac_sgmii2_5_cfg(port);
3147 else
3148 gop_gmac_sgmii_cfg(port);
3149 break;
3150
3151 case PHY_INTERFACE_MODE_RGMII:
3152 case PHY_INTERFACE_MODE_RGMII_ID:
3153 gop_gmac_rgmii_cfg(port);
3154 break;
3155
3156 default:
3157 return -1;
3158 }
3159
3160 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3161 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3162 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3163 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3164 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3165
3166 /* PeriodicXonEn disable */
3167 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3168 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3169 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3170
3171 return 0;
3172}
3173
3174static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3175{
3176 u32 val;
3177
3178 /* relevant only for MAC0 (XLG0 and GMAC0) */
3179 if (port->gop_id > 0)
3180 return;
3181
3182 /* configure 1Gig MAC mode */
3183 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3184 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3185 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3186 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3187}
3188
3189static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3190{
3191 u32 val;
3192
3193 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3194 if (reset)
3195 val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3196 else
3197 val |= MVPP2_GMAC_SGMII_MODE_MASK;
3198 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3199
3200 return 0;
3201}
3202
Stefan Roese2fe23042017-03-22 15:09:38 +01003203/* Set the internal mux's to the required PCS in the PI */
3204static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes)
3205{
3206 u32 val;
3207 int lane;
3208
3209 switch (num_of_lanes) {
3210 case 1:
3211 lane = 0;
3212 break;
3213 case 2:
3214 lane = 1;
3215 break;
3216 case 4:
3217 lane = 2;
3218 break;
3219 default:
3220 return -1;
3221 }
3222
3223 /* configure XG MAC mode */
3224 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
Stefan Chulskie09d0c82017-04-06 15:39:08 +02003225 val &= ~MVPP22_XPCS_PCSMODE_MASK;
Stefan Roese2fe23042017-03-22 15:09:38 +01003226 val &= ~MVPP22_XPCS_LANEACTIVE_MASK;
3227 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS;
3228 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3229
3230 return 0;
3231}
3232
3233static int gop_mpcs_mode(struct mvpp2_port *port)
3234{
3235 u32 val;
3236
3237 /* configure PCS40G COMMON CONTROL */
3238 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3239 val &= ~FORWARD_ERROR_CORRECTION_MASK;
3240 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3241
3242 /* configure PCS CLOCK RESET */
3243 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET);
3244 val &= ~CLK_DIVISION_RATIO_MASK;
3245 val |= 1 << CLK_DIVISION_RATIO_OFFS;
3246 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3247
3248 val &= ~CLK_DIV_PHASE_SET_MASK;
3249 val |= MAC_CLK_RESET_MASK;
3250 val |= RX_SD_CLK_RESET_MASK;
3251 val |= TX_SD_CLK_RESET_MASK;
3252 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3253
3254 return 0;
3255}
3256
3257/* Set the internal mux's to the required MAC in the GOP */
3258static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3259{
3260 u32 val;
3261
3262 /* configure 10G MAC mode */
3263 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3264 val |= MVPP22_XLG_RX_FC_EN;
3265 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3266
3267 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3268 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3269 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3270 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3271
3272 /* read - modify - write */
3273 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3274 val &= ~MVPP22_XLG_MODE_DMA_1G;
3275 val |= MVPP22_XLG_FORWARD_PFC_EN;
3276 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3277 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3278 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3279
3280 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3281 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3282 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3283 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3284 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3285
3286 /* unmask link change interrupt */
3287 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3288 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3289 val |= 1; /* unmask summary bit */
3290 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3291
3292 return 0;
3293}
3294
3295/* Set PCS to reset or exit from reset */
3296static int gop_xpcs_reset(struct mvpp2_port *port, int reset)
3297{
3298 u32 val;
3299
3300 /* read - modify - write */
3301 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3302 if (reset)
3303 val &= ~MVPP22_XPCS_PCSRESET;
3304 else
3305 val |= MVPP22_XPCS_PCSRESET;
3306 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3307
3308 return 0;
3309}
3310
3311/* Set the MAC to reset or exit from reset */
3312static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3313{
3314 u32 val;
3315
3316 /* read - modify - write */
3317 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3318 if (reset)
3319 val &= ~MVPP22_XLG_MAC_RESETN;
3320 else
3321 val |= MVPP22_XLG_MAC_RESETN;
3322 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3323
3324 return 0;
3325}
3326
Stefan Roese31aa1e32017-03-22 15:07:30 +01003327/*
3328 * gop_port_init
3329 *
3330 * Init physical port. Configures the port mode and all it's elements
3331 * accordingly.
3332 * Does not verify that the selected mode/port number is valid at the
3333 * core level.
3334 */
3335static int gop_port_init(struct mvpp2_port *port)
3336{
3337 int mac_num = port->gop_id;
Stefan Roese2fe23042017-03-22 15:09:38 +01003338 int num_of_act_lanes;
Stefan Roese31aa1e32017-03-22 15:07:30 +01003339
3340 if (mac_num >= MVPP22_GOP_MAC_NUM) {
3341 netdev_err(NULL, "%s: illegal port number %d", __func__,
3342 mac_num);
3343 return -1;
3344 }
3345
3346 switch (port->phy_interface) {
3347 case PHY_INTERFACE_MODE_RGMII:
3348 case PHY_INTERFACE_MODE_RGMII_ID:
3349 gop_gmac_reset(port, 1);
3350
3351 /* configure PCS */
3352 gop_gpcs_mode_cfg(port, 0);
3353 gop_bypass_clk_cfg(port, 1);
3354
3355 /* configure MAC */
3356 gop_gmac_mode_cfg(port);
3357 /* pcs unreset */
3358 gop_gpcs_reset(port, 0);
3359
3360 /* mac unreset */
3361 gop_gmac_reset(port, 0);
3362 break;
3363
3364 case PHY_INTERFACE_MODE_SGMII:
3365 /* configure PCS */
3366 gop_gpcs_mode_cfg(port, 1);
3367
3368 /* configure MAC */
3369 gop_gmac_mode_cfg(port);
3370 /* select proper Mac mode */
3371 gop_xlg_2_gig_mac_cfg(port);
3372
3373 /* pcs unreset */
3374 gop_gpcs_reset(port, 0);
3375 /* mac unreset */
3376 gop_gmac_reset(port, 0);
3377 break;
3378
Stefan Roese2fe23042017-03-22 15:09:38 +01003379 case PHY_INTERFACE_MODE_SFI:
3380 num_of_act_lanes = 2;
3381 mac_num = 0;
3382 /* configure PCS */
3383 gop_xpcs_mode(port, num_of_act_lanes);
3384 gop_mpcs_mode(port);
3385 /* configure MAC */
3386 gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3387
3388 /* pcs unreset */
3389 gop_xpcs_reset(port, 0);
3390
3391 /* mac unreset */
3392 gop_xlg_mac_reset(port, 0);
3393 break;
3394
Stefan Roese31aa1e32017-03-22 15:07:30 +01003395 default:
3396 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n",
3397 __func__, port->phy_interface);
3398 return -1;
3399 }
3400
3401 return 0;
3402}
3403
Stefan Roese2fe23042017-03-22 15:09:38 +01003404static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3405{
3406 u32 val;
3407
3408 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3409 if (enable) {
3410 /* Enable port and MIB counters update */
3411 val |= MVPP22_XLG_PORT_EN;
3412 val &= ~MVPP22_XLG_MIBCNT_DIS;
3413 } else {
3414 /* Disable port */
3415 val &= ~MVPP22_XLG_PORT_EN;
3416 }
3417 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3418}
3419
Stefan Roese31aa1e32017-03-22 15:07:30 +01003420static void gop_port_enable(struct mvpp2_port *port, int enable)
3421{
3422 switch (port->phy_interface) {
3423 case PHY_INTERFACE_MODE_RGMII:
3424 case PHY_INTERFACE_MODE_RGMII_ID:
3425 case PHY_INTERFACE_MODE_SGMII:
3426 if (enable)
3427 mvpp2_port_enable(port);
3428 else
3429 mvpp2_port_disable(port);
3430 break;
3431
Stefan Roese2fe23042017-03-22 15:09:38 +01003432 case PHY_INTERFACE_MODE_SFI:
3433 gop_xlg_mac_port_enable(port, enable);
3434
3435 break;
Stefan Roese31aa1e32017-03-22 15:07:30 +01003436 default:
3437 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__,
3438 port->phy_interface);
3439 return;
3440 }
3441}
3442
3443/* RFU1 functions */
3444static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3445{
3446 return readl(priv->rfu1_base + offset);
3447}
3448
3449static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3450{
3451 writel(data, priv->rfu1_base + offset);
3452}
3453
3454static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3455{
3456 u32 val = 0;
3457
3458 if (gop_id == 2) {
3459 if (phy_type == PHY_INTERFACE_MODE_SGMII)
3460 val |= MV_NETC_GE_MAC2_SGMII;
3461 }
3462
3463 if (gop_id == 3) {
3464 if (phy_type == PHY_INTERFACE_MODE_SGMII)
3465 val |= MV_NETC_GE_MAC3_SGMII;
3466 else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3467 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3468 val |= MV_NETC_GE_MAC3_RGMII;
3469 }
3470
3471 return val;
3472}
3473
3474static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3475{
3476 u32 reg;
3477
3478 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3479 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3480
3481 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3482 val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3483
3484 reg |= val;
3485
3486 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3487}
3488
3489static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3490{
3491 u32 reg;
3492
3493 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3494 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3495
3496 val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3497 val &= NETC_GBE_PORT1_MII_MODE_MASK;
3498
3499 reg |= val;
3500
3501 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3502}
3503
3504static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3505{
3506 u32 reg;
3507
3508 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3509 reg &= ~NETC_GOP_SOFT_RESET_MASK;
3510
3511 val <<= NETC_GOP_SOFT_RESET_OFFS;
3512 val &= NETC_GOP_SOFT_RESET_MASK;
3513
3514 reg |= val;
3515
3516 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3517}
3518
3519static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3520{
3521 u32 reg;
3522
3523 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3524 reg &= ~NETC_CLK_DIV_PHASE_MASK;
3525
3526 val <<= NETC_CLK_DIV_PHASE_OFFS;
3527 val &= NETC_CLK_DIV_PHASE_MASK;
3528
3529 reg |= val;
3530
3531 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3532}
3533
3534static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3535{
3536 u32 reg;
3537
3538 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3539 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3540
3541 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3542 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3543
3544 reg |= val;
3545
3546 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3547}
3548
3549static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3550 u32 val)
3551{
3552 u32 reg, mask, offset;
3553
3554 if (gop_id == 2) {
3555 mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3556 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3557 } else {
3558 mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3559 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3560 }
3561 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3562 reg &= ~mask;
3563
3564 val <<= offset;
3565 val &= mask;
3566
3567 reg |= val;
3568
3569 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3570}
3571
3572static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3573{
3574 u32 reg;
3575
3576 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3577 reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3578
3579 val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3580 val &= NETC_BUS_WIDTH_SELECT_MASK;
3581
3582 reg |= val;
3583
3584 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3585}
3586
3587static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3588{
3589 u32 reg;
3590
3591 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3592 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3593
3594 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3595 val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3596
3597 reg |= val;
3598
3599 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3600}
3601
3602static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3603 enum mv_netc_phase phase)
3604{
3605 switch (phase) {
3606 case MV_NETC_FIRST_PHASE:
3607 /* Set Bus Width to HB mode = 1 */
3608 gop_netc_bus_width_select(priv, 1);
3609 /* Select RGMII mode */
3610 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3611 break;
3612
3613 case MV_NETC_SECOND_PHASE:
3614 /* De-assert the relevant port HB reset */
3615 gop_netc_port_rf_reset(priv, gop_id, 1);
3616 break;
3617 }
3618}
3619
3620static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3621 enum mv_netc_phase phase)
3622{
3623 switch (phase) {
3624 case MV_NETC_FIRST_PHASE:
3625 /* Set Bus Width to HB mode = 1 */
3626 gop_netc_bus_width_select(priv, 1);
3627 /* Select SGMII mode */
3628 if (gop_id >= 1) {
3629 gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3630 MV_NETC_GBE_SGMII);
3631 }
3632
3633 /* Configure the sample stages */
3634 gop_netc_sample_stages_timing(priv, 0);
3635 /* Configure the ComPhy Selector */
3636 /* gop_netc_com_phy_selector_config(netComplex); */
3637 break;
3638
3639 case MV_NETC_SECOND_PHASE:
3640 /* De-assert the relevant port HB reset */
3641 gop_netc_port_rf_reset(priv, gop_id, 1);
3642 break;
3643 }
3644}
3645
3646static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3647{
3648 u32 c = priv->netc_config;
3649
3650 if (c & MV_NETC_GE_MAC2_SGMII)
3651 gop_netc_mac_to_sgmii(priv, 2, phase);
3652 else
3653 gop_netc_mac_to_xgmii(priv, 2, phase);
3654
3655 if (c & MV_NETC_GE_MAC3_SGMII) {
3656 gop_netc_mac_to_sgmii(priv, 3, phase);
3657 } else {
3658 gop_netc_mac_to_xgmii(priv, 3, phase);
3659 if (c & MV_NETC_GE_MAC3_RGMII)
3660 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3661 else
3662 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3663 }
3664
3665 /* Activate gop ports 0, 2, 3 */
3666 gop_netc_active_port(priv, 0, 1);
3667 gop_netc_active_port(priv, 2, 1);
3668 gop_netc_active_port(priv, 3, 1);
3669
3670 if (phase == MV_NETC_SECOND_PHASE) {
3671 /* Enable the GOP internal clock logic */
3672 gop_netc_gop_clock_logic_set(priv, 1);
3673 /* De-assert GOP unit reset */
3674 gop_netc_gop_reset(priv, 1);
3675 }
3676
3677 return 0;
3678}
3679
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003680/* Set defaults to the MVPP2 port */
3681static void mvpp2_defaults_set(struct mvpp2_port *port)
3682{
3683 int tx_port_num, val, queue, ptxq, lrxq;
3684
Thomas Petazzonib8c8e6f2017-02-16 06:57:24 +01003685 if (port->priv->hw_version == MVPP21) {
3686 /* Configure port to loopback if needed */
3687 if (port->flags & MVPP2_F_LOOPBACK)
3688 mvpp2_port_loopback_set(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003689
Thomas Petazzonib8c8e6f2017-02-16 06:57:24 +01003690 /* Update TX FIFO MIN Threshold */
3691 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3692 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3693 /* Min. TX threshold must be less than minimal packet length */
3694 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3695 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3696 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003697
3698 /* Disable Legacy WRR, Disable EJP, Release from reset */
3699 tx_port_num = mvpp2_egress_port(port);
3700 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3701 tx_port_num);
3702 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3703
3704 /* Close bandwidth for all queues */
3705 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3706 ptxq = mvpp2_txq_phys(port->id, queue);
3707 mvpp2_write(port->priv,
3708 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3709 }
3710
3711 /* Set refill period to 1 usec, refill tokens
3712 * and bucket size to maximum
3713 */
3714 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3715 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3716 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3717 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3718 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3719 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3720 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3721 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3722
3723 /* Set MaximumLowLatencyPacketSize value to 256 */
3724 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3725 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3726 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3727
3728 /* Enable Rx cache snoop */
3729 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3730 queue = port->rxqs[lrxq]->id;
3731 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3732 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3733 MVPP2_SNOOP_BUF_HDR_MASK;
3734 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3735 }
3736}
3737
3738/* Enable/disable receiving packets */
3739static void mvpp2_ingress_enable(struct mvpp2_port *port)
3740{
3741 u32 val;
3742 int lrxq, queue;
3743
3744 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3745 queue = port->rxqs[lrxq]->id;
3746 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3747 val &= ~MVPP2_RXQ_DISABLE_MASK;
3748 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3749 }
3750}
3751
3752static void mvpp2_ingress_disable(struct mvpp2_port *port)
3753{
3754 u32 val;
3755 int lrxq, queue;
3756
3757 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3758 queue = port->rxqs[lrxq]->id;
3759 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3760 val |= MVPP2_RXQ_DISABLE_MASK;
3761 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3762 }
3763}
3764
3765/* Enable transmit via physical egress queue
3766 * - HW starts take descriptors from DRAM
3767 */
3768static void mvpp2_egress_enable(struct mvpp2_port *port)
3769{
3770 u32 qmap;
3771 int queue;
3772 int tx_port_num = mvpp2_egress_port(port);
3773
3774 /* Enable all initialized TXs. */
3775 qmap = 0;
3776 for (queue = 0; queue < txq_number; queue++) {
3777 struct mvpp2_tx_queue *txq = port->txqs[queue];
3778
3779 if (txq->descs != NULL)
3780 qmap |= (1 << queue);
3781 }
3782
3783 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3784 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3785}
3786
3787/* Disable transmit via physical egress queue
3788 * - HW doesn't take descriptors from DRAM
3789 */
3790static void mvpp2_egress_disable(struct mvpp2_port *port)
3791{
3792 u32 reg_data;
3793 int delay;
3794 int tx_port_num = mvpp2_egress_port(port);
3795
3796 /* Issue stop command for active channels only */
3797 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3798 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3799 MVPP2_TXP_SCHED_ENQ_MASK;
3800 if (reg_data != 0)
3801 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3802 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3803
3804 /* Wait for all Tx activity to terminate. */
3805 delay = 0;
3806 do {
3807 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3808 netdev_warn(port->dev,
3809 "Tx stop timed out, status=0x%08x\n",
3810 reg_data);
3811 break;
3812 }
3813 mdelay(1);
3814 delay++;
3815
3816 /* Check port TX Command register that all
3817 * Tx queues are stopped
3818 */
3819 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3820 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3821}
3822
3823/* Rx descriptors helper methods */
3824
3825/* Get number of Rx descriptors occupied by received packets */
3826static inline int
3827mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3828{
3829 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3830
3831 return val & MVPP2_RXQ_OCCUPIED_MASK;
3832}
3833
3834/* Update Rx queue status with the number of occupied and available
3835 * Rx descriptor slots.
3836 */
3837static inline void
3838mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3839 int used_count, int free_count)
3840{
3841 /* Decrement the number of used descriptors and increment count
3842 * increment the number of free descriptors.
3843 */
3844 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3845
3846 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3847}
3848
3849/* Get pointer to next RX descriptor to be processed by SW */
3850static inline struct mvpp2_rx_desc *
3851mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3852{
3853 int rx_desc = rxq->next_desc_to_proc;
3854
3855 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3856 prefetch(rxq->descs + rxq->next_desc_to_proc);
3857 return rxq->descs + rx_desc;
3858}
3859
3860/* Set rx queue offset */
3861static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3862 int prxq, int offset)
3863{
3864 u32 val;
3865
3866 /* Convert offset from bytes to units of 32 bytes */
3867 offset = offset >> 5;
3868
3869 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3870 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3871
3872 /* Offset is in */
3873 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3874 MVPP2_RXQ_PACKET_OFFSET_MASK);
3875
3876 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3877}
3878
3879/* Obtain BM cookie information from descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003880static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3881 struct mvpp2_rx_desc *rx_desc)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003882{
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003883 int cpu = smp_processor_id();
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01003884 int pool;
3885
3886 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3887 MVPP2_RXD_BM_POOL_ID_MASK) >>
3888 MVPP2_RXD_BM_POOL_ID_OFFS;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01003889
3890 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3891 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3892}
3893
3894/* Tx descriptors helper methods */
3895
3896/* Get number of Tx descriptors waiting to be transmitted by HW */
3897static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3898 struct mvpp2_tx_queue *txq)
3899{
3900 u32 val;
3901
3902 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3903 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3904
3905 return val & MVPP2_TXQ_PENDING_MASK;
3906}
3907
3908/* Get pointer to next Tx descriptor to be processed (send) by HW */
3909static struct mvpp2_tx_desc *
3910mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3911{
3912 int tx_desc = txq->next_desc_to_proc;
3913
3914 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3915 return txq->descs + tx_desc;
3916}
3917
3918/* Update HW with number of aggregated Tx descriptors to be sent */
3919static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3920{
3921 /* aggregated access - relevant TXQ number is written in TX desc */
3922 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3923}
3924
3925/* Get number of sent descriptors and decrement counter.
3926 * The number of sent descriptors is returned.
3927 * Per-CPU access
3928 */
3929static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3930 struct mvpp2_tx_queue *txq)
3931{
3932 u32 val;
3933
3934 /* Reading status reg resets transmitted descriptor counter */
3935 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3936
3937 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3938 MVPP2_TRANSMITTED_COUNT_OFFSET;
3939}
3940
3941static void mvpp2_txq_sent_counter_clear(void *arg)
3942{
3943 struct mvpp2_port *port = arg;
3944 int queue;
3945
3946 for (queue = 0; queue < txq_number; queue++) {
3947 int id = port->txqs[queue]->id;
3948
3949 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3950 }
3951}
3952
3953/* Set max sizes for Tx queues */
3954static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3955{
3956 u32 val, size, mtu;
3957 int txq, tx_port_num;
3958
3959 mtu = port->pkt_size * 8;
3960 if (mtu > MVPP2_TXP_MTU_MAX)
3961 mtu = MVPP2_TXP_MTU_MAX;
3962
3963 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3964 mtu = 3 * mtu;
3965
3966 /* Indirect access to registers */
3967 tx_port_num = mvpp2_egress_port(port);
3968 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3969
3970 /* Set MTU */
3971 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3972 val &= ~MVPP2_TXP_MTU_MAX;
3973 val |= mtu;
3974 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3975
3976 /* TXP token size and all TXQs token size must be larger that MTU */
3977 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3978 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3979 if (size < mtu) {
3980 size = mtu;
3981 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3982 val |= size;
3983 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3984 }
3985
3986 for (txq = 0; txq < txq_number; txq++) {
3987 val = mvpp2_read(port->priv,
3988 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3989 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3990
3991 if (size < mtu) {
3992 size = mtu;
3993 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3994 val |= size;
3995 mvpp2_write(port->priv,
3996 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
3997 val);
3998 }
3999 }
4000}
4001
4002/* Free Tx queue skbuffs */
4003static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4004 struct mvpp2_tx_queue *txq,
4005 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4006{
4007 int i;
4008
4009 for (i = 0; i < num; i++)
4010 mvpp2_txq_inc_get(txq_pcpu);
4011}
4012
4013static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4014 u32 cause)
4015{
4016 int queue = fls(cause) - 1;
4017
4018 return port->rxqs[queue];
4019}
4020
4021static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4022 u32 cause)
4023{
4024 int queue = fls(cause) - 1;
4025
4026 return port->txqs[queue];
4027}
4028
4029/* Rx/Tx queue initialization/cleanup methods */
4030
4031/* Allocate and initialize descriptors for aggr TXQ */
4032static int mvpp2_aggr_txq_init(struct udevice *dev,
4033 struct mvpp2_tx_queue *aggr_txq,
4034 int desc_num, int cpu,
4035 struct mvpp2 *priv)
4036{
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004037 u32 txq_dma;
4038
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004039 /* Allocate memory for TX descriptors */
4040 aggr_txq->descs = buffer_loc.aggr_tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004041 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004042 if (!aggr_txq->descs)
4043 return -ENOMEM;
4044
4045 /* Make sure descriptor address is cache line size aligned */
4046 BUG_ON(aggr_txq->descs !=
4047 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4048
4049 aggr_txq->last_desc = aggr_txq->size - 1;
4050
4051 /* Aggr TXQ no reset WA */
4052 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4053 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4054
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004055 /* Set Tx descriptors queue starting address indirect
4056 * access
4057 */
4058 if (priv->hw_version == MVPP21)
4059 txq_dma = aggr_txq->descs_dma;
4060 else
4061 txq_dma = aggr_txq->descs_dma >>
4062 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4063
4064 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004065 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4066
4067 return 0;
4068}
4069
4070/* Create a specified Rx queue */
4071static int mvpp2_rxq_init(struct mvpp2_port *port,
4072 struct mvpp2_rx_queue *rxq)
4073
4074{
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004075 u32 rxq_dma;
4076
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004077 rxq->size = port->rx_ring_size;
4078
4079 /* Allocate memory for RX descriptors */
4080 rxq->descs = buffer_loc.rx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004081 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004082 if (!rxq->descs)
4083 return -ENOMEM;
4084
4085 BUG_ON(rxq->descs !=
4086 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4087
4088 rxq->last_desc = rxq->size - 1;
4089
4090 /* Zero occupied and non-occupied counters - direct access */
4091 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4092
4093 /* Set Rx descriptors queue starting address - indirect access */
4094 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzoni80350f52017-02-20 11:36:57 +01004095 if (port->priv->hw_version == MVPP21)
4096 rxq_dma = rxq->descs_dma;
4097 else
4098 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4099 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004100 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4101 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4102
4103 /* Set Offset */
4104 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4105
4106 /* Add number of descriptors ready for receiving packets */
4107 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4108
4109 return 0;
4110}
4111
4112/* Push packets received by the RXQ to BM pool */
4113static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4114 struct mvpp2_rx_queue *rxq)
4115{
4116 int rx_received, i;
4117
4118 rx_received = mvpp2_rxq_received(port, rxq->id);
4119 if (!rx_received)
4120 return;
4121
4122 for (i = 0; i < rx_received; i++) {
4123 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004124 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004125
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004126 mvpp2_pool_refill(port, bm,
4127 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4128 mvpp2_rxdesc_cookie_get(port, rx_desc));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004129 }
4130 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4131}
4132
4133/* Cleanup Rx queue */
4134static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4135 struct mvpp2_rx_queue *rxq)
4136{
4137 mvpp2_rxq_drop_pkts(port, rxq);
4138
4139 rxq->descs = NULL;
4140 rxq->last_desc = 0;
4141 rxq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004142 rxq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004143
4144 /* Clear Rx descriptors queue starting address and size;
4145 * free descriptor number
4146 */
4147 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4148 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4149 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4150 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4151}
4152
4153/* Create and initialize a Tx queue */
4154static int mvpp2_txq_init(struct mvpp2_port *port,
4155 struct mvpp2_tx_queue *txq)
4156{
4157 u32 val;
4158 int cpu, desc, desc_per_txq, tx_port_num;
4159 struct mvpp2_txq_pcpu *txq_pcpu;
4160
4161 txq->size = port->tx_ring_size;
4162
4163 /* Allocate memory for Tx descriptors */
4164 txq->descs = buffer_loc.tx_descs;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004165 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004166 if (!txq->descs)
4167 return -ENOMEM;
4168
4169 /* Make sure descriptor address is cache line size aligned */
4170 BUG_ON(txq->descs !=
4171 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4172
4173 txq->last_desc = txq->size - 1;
4174
4175 /* Set Tx descriptors queue starting address - indirect access */
4176 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004177 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004178 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4179 MVPP2_TXQ_DESC_SIZE_MASK);
4180 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4181 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4182 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4183 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4184 val &= ~MVPP2_TXQ_PENDING_MASK;
4185 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4186
4187 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4188 * for each existing TXQ.
4189 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4190 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4191 */
4192 desc_per_txq = 16;
4193 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4194 (txq->log_id * desc_per_txq);
4195
4196 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4197 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
Thomas Petazzoni26a52782017-02-16 08:03:37 +01004198 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004199
4200 /* WRR / EJP configuration - indirect access */
4201 tx_port_num = mvpp2_egress_port(port);
4202 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4203
4204 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4205 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4206 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4207 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4208 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4209
4210 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4211 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4212 val);
4213
4214 for_each_present_cpu(cpu) {
4215 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4216 txq_pcpu->size = txq->size;
4217 }
4218
4219 return 0;
4220}
4221
4222/* Free allocated TXQ resources */
4223static void mvpp2_txq_deinit(struct mvpp2_port *port,
4224 struct mvpp2_tx_queue *txq)
4225{
4226 txq->descs = NULL;
4227 txq->last_desc = 0;
4228 txq->next_desc_to_proc = 0;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004229 txq->descs_dma = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004230
4231 /* Set minimum bandwidth for disabled TXQs */
4232 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4233
4234 /* Set Tx descriptors queue starting address and size */
4235 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4236 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4237 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4238}
4239
4240/* Cleanup Tx ports */
4241static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4242{
4243 struct mvpp2_txq_pcpu *txq_pcpu;
4244 int delay, pending, cpu;
4245 u32 val;
4246
4247 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4248 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4249 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4250 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4251
4252 /* The napi queue has been stopped so wait for all packets
4253 * to be transmitted.
4254 */
4255 delay = 0;
4256 do {
4257 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4258 netdev_warn(port->dev,
4259 "port %d: cleaning queue %d timed out\n",
4260 port->id, txq->log_id);
4261 break;
4262 }
4263 mdelay(1);
4264 delay++;
4265
4266 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4267 } while (pending);
4268
4269 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4270 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4271
4272 for_each_present_cpu(cpu) {
4273 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4274
4275 /* Release all packets */
4276 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4277
4278 /* Reset queue */
4279 txq_pcpu->count = 0;
4280 txq_pcpu->txq_put_index = 0;
4281 txq_pcpu->txq_get_index = 0;
4282 }
4283}
4284
4285/* Cleanup all Tx queues */
4286static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4287{
4288 struct mvpp2_tx_queue *txq;
4289 int queue;
4290 u32 val;
4291
4292 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4293
4294 /* Reset Tx ports and delete Tx queues */
4295 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4296 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4297
4298 for (queue = 0; queue < txq_number; queue++) {
4299 txq = port->txqs[queue];
4300 mvpp2_txq_clean(port, txq);
4301 mvpp2_txq_deinit(port, txq);
4302 }
4303
4304 mvpp2_txq_sent_counter_clear(port);
4305
4306 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4307 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4308}
4309
4310/* Cleanup all Rx queues */
4311static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4312{
4313 int queue;
4314
4315 for (queue = 0; queue < rxq_number; queue++)
4316 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4317}
4318
4319/* Init all Rx queues for port */
4320static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4321{
4322 int queue, err;
4323
4324 for (queue = 0; queue < rxq_number; queue++) {
4325 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4326 if (err)
4327 goto err_cleanup;
4328 }
4329 return 0;
4330
4331err_cleanup:
4332 mvpp2_cleanup_rxqs(port);
4333 return err;
4334}
4335
4336/* Init all tx queues for port */
4337static int mvpp2_setup_txqs(struct mvpp2_port *port)
4338{
4339 struct mvpp2_tx_queue *txq;
4340 int queue, err;
4341
4342 for (queue = 0; queue < txq_number; queue++) {
4343 txq = port->txqs[queue];
4344 err = mvpp2_txq_init(port, txq);
4345 if (err)
4346 goto err_cleanup;
4347 }
4348
4349 mvpp2_txq_sent_counter_clear(port);
4350 return 0;
4351
4352err_cleanup:
4353 mvpp2_cleanup_txqs(port);
4354 return err;
4355}
4356
4357/* Adjust link */
4358static void mvpp2_link_event(struct mvpp2_port *port)
4359{
4360 struct phy_device *phydev = port->phy_dev;
4361 int status_change = 0;
4362 u32 val;
4363
4364 if (phydev->link) {
4365 if ((port->speed != phydev->speed) ||
4366 (port->duplex != phydev->duplex)) {
4367 u32 val;
4368
4369 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4370 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4371 MVPP2_GMAC_CONFIG_GMII_SPEED |
4372 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4373 MVPP2_GMAC_AN_SPEED_EN |
4374 MVPP2_GMAC_AN_DUPLEX_EN);
4375
4376 if (phydev->duplex)
4377 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4378
4379 if (phydev->speed == SPEED_1000)
4380 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4381 else if (phydev->speed == SPEED_100)
4382 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4383
4384 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4385
4386 port->duplex = phydev->duplex;
4387 port->speed = phydev->speed;
4388 }
4389 }
4390
4391 if (phydev->link != port->link) {
4392 if (!phydev->link) {
4393 port->duplex = -1;
4394 port->speed = 0;
4395 }
4396
4397 port->link = phydev->link;
4398 status_change = 1;
4399 }
4400
4401 if (status_change) {
4402 if (phydev->link) {
4403 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4404 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4405 MVPP2_GMAC_FORCE_LINK_DOWN);
4406 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4407 mvpp2_egress_enable(port);
4408 mvpp2_ingress_enable(port);
4409 } else {
4410 mvpp2_ingress_disable(port);
4411 mvpp2_egress_disable(port);
4412 }
4413 }
4414}
4415
4416/* Main RX/TX processing routines */
4417
4418/* Display more error info */
4419static void mvpp2_rx_error(struct mvpp2_port *port,
4420 struct mvpp2_rx_desc *rx_desc)
4421{
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004422 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4423 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004424
4425 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4426 case MVPP2_RXD_ERR_CRC:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004427 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
4428 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004429 break;
4430 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004431 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
4432 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004433 break;
4434 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01004435 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
4436 status, sz);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004437 break;
4438 }
4439}
4440
4441/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
4442static int mvpp2_rx_refill(struct mvpp2_port *port,
4443 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004444 u32 bm, dma_addr_t dma_addr)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004445{
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01004446 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004447 return 0;
4448}
4449
4450/* Set hw internals when starting port */
4451static void mvpp2_start_dev(struct mvpp2_port *port)
4452{
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004453 switch (port->phy_interface) {
4454 case PHY_INTERFACE_MODE_RGMII:
4455 case PHY_INTERFACE_MODE_RGMII_ID:
4456 case PHY_INTERFACE_MODE_SGMII:
4457 mvpp2_gmac_max_rx_size_set(port);
4458 default:
4459 break;
4460 }
4461
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004462 mvpp2_txp_max_tx_size_set(port);
4463
Stefan Roese31aa1e32017-03-22 15:07:30 +01004464 if (port->priv->hw_version == MVPP21)
4465 mvpp2_port_enable(port);
4466 else
4467 gop_port_enable(port, 1);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004468}
4469
4470/* Set hw internals when stopping port */
4471static void mvpp2_stop_dev(struct mvpp2_port *port)
4472{
4473 /* Stop new packets from arriving to RXQs */
4474 mvpp2_ingress_disable(port);
4475
4476 mvpp2_egress_disable(port);
Stefan Roese31aa1e32017-03-22 15:07:30 +01004477
4478 if (port->priv->hw_version == MVPP21)
4479 mvpp2_port_disable(port);
4480 else
4481 gop_port_enable(port, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004482}
4483
Stefan Chulski13b725f2019-08-15 18:08:41 -04004484static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004485{
4486 struct phy_device *phy_dev;
4487
4488 if (!port->init || port->link == 0) {
Nevo Hed2a428702019-08-15 18:08:44 -04004489 phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr,
4490 dev, port->phy_interface);
Grzegorz Jaszczyk62394832019-08-15 18:08:42 -04004491
4492 /*
4493 * If the phy doesn't match with any existing u-boot drivers the
4494 * phy framework will connect it to generic one which
4495 * uid == 0xffffffff. In this case act as if the phy wouldn't be
4496 * declared in dts. Otherwise in case of 3310 (for which the
4497 * driver doesn't exist) the link will not be correctly
4498 * detected. Removing phy entry from dts in case of 3310 is not
4499 * an option because it is required for the phy_fw_down
4500 * procedure.
4501 */
4502 if (phy_dev &&
4503 phy_dev->drv->uid == 0xffffffff) {/* Generic phy */
4504 netdev_warn(port->dev,
4505 "Marking phy as invalid, link will not be checked\n");
4506 /* set phy_addr to invalid value */
4507 port->phyaddr = PHY_MAX_ADDR;
4508 mvpp2_egress_enable(port);
4509 mvpp2_ingress_enable(port);
4510
4511 return;
4512 }
4513
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004514 port->phy_dev = phy_dev;
4515 if (!phy_dev) {
4516 netdev_err(port->dev, "cannot connect to phy\n");
Stefan Chulski13b725f2019-08-15 18:08:41 -04004517 return;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004518 }
4519 phy_dev->supported &= PHY_GBIT_FEATURES;
4520 phy_dev->advertising = phy_dev->supported;
4521
4522 port->phy_dev = phy_dev;
4523 port->link = 0;
4524 port->duplex = 0;
4525 port->speed = 0;
4526
4527 phy_config(phy_dev);
4528 phy_startup(phy_dev);
Stefan Chulski13b725f2019-08-15 18:08:41 -04004529 if (!phy_dev->link)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004530 printf("%s: No link\n", phy_dev->dev->name);
Stefan Chulski13b725f2019-08-15 18:08:41 -04004531 else
4532 port->init = 1;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004533 } else {
4534 mvpp2_egress_enable(port);
4535 mvpp2_ingress_enable(port);
4536 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004537}
4538
4539static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4540{
4541 unsigned char mac_bcast[ETH_ALEN] = {
4542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4543 int err;
4544
4545 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4546 if (err) {
4547 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4548 return err;
4549 }
4550 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4551 port->dev_addr, true);
4552 if (err) {
4553 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
4554 return err;
4555 }
4556 err = mvpp2_prs_def_flow(port);
4557 if (err) {
4558 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4559 return err;
4560 }
4561
4562 /* Allocate the Rx/Tx queues */
4563 err = mvpp2_setup_rxqs(port);
4564 if (err) {
4565 netdev_err(port->dev, "cannot allocate Rx queues\n");
4566 return err;
4567 }
4568
4569 err = mvpp2_setup_txqs(port);
4570 if (err) {
4571 netdev_err(port->dev, "cannot allocate Tx queues\n");
4572 return err;
4573 }
4574
Nevo Hed2a428702019-08-15 18:08:44 -04004575 if (port->phyaddr < PHY_MAX_ADDR) {
Stefan Chulski13b725f2019-08-15 18:08:41 -04004576 mvpp2_phy_connect(dev, port);
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004577 mvpp2_link_event(port);
4578 } else {
4579 mvpp2_egress_enable(port);
4580 mvpp2_ingress_enable(port);
4581 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004582
4583 mvpp2_start_dev(port);
4584
4585 return 0;
4586}
4587
4588/* No Device ops here in U-Boot */
4589
4590/* Driver initialization */
4591
4592static void mvpp2_port_power_up(struct mvpp2_port *port)
4593{
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004594 struct mvpp2 *priv = port->priv;
4595
Stefan Roese31aa1e32017-03-22 15:07:30 +01004596 /* On PPv2.2 the GoP / interface configuration has already been done */
4597 if (priv->hw_version == MVPP21)
4598 mvpp2_port_mii_set(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004599 mvpp2_port_periodic_xon_disable(port);
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004600 if (priv->hw_version == MVPP21)
4601 mvpp2_port_fc_adv_enable(port);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004602 mvpp2_port_reset(port);
4603}
4604
4605/* Initialize port HW */
4606static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4607{
4608 struct mvpp2 *priv = port->priv;
4609 struct mvpp2_txq_pcpu *txq_pcpu;
4610 int queue, cpu, err;
4611
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004612 if (port->first_rxq + rxq_number >
4613 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004614 return -EINVAL;
4615
4616 /* Disable port */
4617 mvpp2_egress_disable(port);
Stefan Roese31aa1e32017-03-22 15:07:30 +01004618 if (priv->hw_version == MVPP21)
4619 mvpp2_port_disable(port);
4620 else
4621 gop_port_enable(port, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004622
4623 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4624 GFP_KERNEL);
4625 if (!port->txqs)
4626 return -ENOMEM;
4627
4628 /* Associate physical Tx queues to this port and initialize.
4629 * The mapping is predefined.
4630 */
4631 for (queue = 0; queue < txq_number; queue++) {
4632 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4633 struct mvpp2_tx_queue *txq;
4634
4635 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4636 if (!txq)
4637 return -ENOMEM;
4638
4639 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4640 GFP_KERNEL);
4641 if (!txq->pcpu)
4642 return -ENOMEM;
4643
4644 txq->id = queue_phy_id;
4645 txq->log_id = queue;
4646 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4647 for_each_present_cpu(cpu) {
4648 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4649 txq_pcpu->cpu = cpu;
4650 }
4651
4652 port->txqs[queue] = txq;
4653 }
4654
4655 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4656 GFP_KERNEL);
4657 if (!port->rxqs)
4658 return -ENOMEM;
4659
4660 /* Allocate and initialize Rx queue for this port */
4661 for (queue = 0; queue < rxq_number; queue++) {
4662 struct mvpp2_rx_queue *rxq;
4663
4664 /* Map physical Rx queue to port's logical Rx queue */
4665 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4666 if (!rxq)
4667 return -ENOMEM;
4668 /* Map this Rx queue to a physical queue */
4669 rxq->id = port->first_rxq + queue;
4670 rxq->port = port->id;
4671 rxq->logic_rxq = queue;
4672
4673 port->rxqs[queue] = rxq;
4674 }
4675
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004676
4677 /* Create Rx descriptor rings */
4678 for (queue = 0; queue < rxq_number; queue++) {
4679 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4680
4681 rxq->size = port->rx_ring_size;
4682 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4683 rxq->time_coal = MVPP2_RX_COAL_USEC;
4684 }
4685
4686 mvpp2_ingress_disable(port);
4687
4688 /* Port default configuration */
4689 mvpp2_defaults_set(port);
4690
4691 /* Port's classifier configuration */
4692 mvpp2_cls_oversize_rxq_set(port);
4693 mvpp2_cls_port_config(port);
4694
4695 /* Provide an initial Rx packet size */
4696 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4697
4698 /* Initialize pools for swf */
4699 err = mvpp2_swf_bm_pool_init(port);
4700 if (err)
4701 return err;
4702
4703 return 0;
4704}
4705
Stefan Roese66b11cc2017-03-22 14:11:16 +01004706static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004707{
Stefan Roese66b11cc2017-03-22 14:11:16 +01004708 int port_node = dev_of_offset(dev);
4709 const char *phy_mode_str;
Baruch Siachacce7532018-11-21 13:05:33 +02004710 int phy_node;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004711 u32 id;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004712 u32 phyaddr = 0;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004713 int phy_mode = -1;
Nevo Hed2a428702019-08-15 18:08:44 -04004714 int ret;
Baruch Siach21586cd2018-11-21 13:05:34 +02004715
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004716 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004717
4718 if (phy_node > 0) {
Nevo Hed2a428702019-08-15 18:08:44 -04004719 int parent;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004720 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
4721 if (phyaddr < 0) {
4722 dev_err(&pdev->dev, "could not find phy address\n");
4723 return -1;
4724 }
Nevo Hed2a428702019-08-15 18:08:44 -04004725 parent = fdt_parent_offset(gd->fdt_blob, phy_node);
4726 ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent,
4727 &port->mdio_dev);
4728 if (ret)
4729 return ret;
Stefan Chulskie09d0c82017-04-06 15:39:08 +02004730 } else {
Nevo Hed2a428702019-08-15 18:08:44 -04004731 /* phy_addr is set to invalid value */
4732 phyaddr = PHY_MAX_ADDR;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004733 }
4734
4735 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
4736 if (phy_mode_str)
4737 phy_mode = phy_get_interface_by_name(phy_mode_str);
4738 if (phy_mode == -1) {
4739 dev_err(&pdev->dev, "incorrect phy mode\n");
4740 return -EINVAL;
4741 }
4742
4743 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4744 if (id == -1) {
4745 dev_err(&pdev->dev, "missing port-id value\n");
4746 return -EINVAL;
4747 }
4748
Simon Glassbcee8d62019-12-06 21:41:35 -07004749#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004750 gpio_request_by_name(dev, "phy-reset-gpios", 0,
4751 &port->phy_reset_gpio, GPIOD_IS_OUT);
4752 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4753 &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4754#endif
4755
Stefan Roese9acb7da2017-03-22 14:15:40 +01004756 /*
4757 * ToDo:
4758 * Not sure if this DT property "phy-speed" will get accepted, so
4759 * this might change later
4760 */
4761 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
4762 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node,
4763 "phy-speed", 1000);
4764
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004765 port->id = id;
Stefan Roese66b11cc2017-03-22 14:11:16 +01004766 if (port->priv->hw_version == MVPP21)
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004767 port->first_rxq = port->id * rxq_number;
4768 else
Stefan Roese66b11cc2017-03-22 14:11:16 +01004769 port->first_rxq = port->id * port->priv->max_port_rxqs;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004770 port->phy_interface = phy_mode;
4771 port->phyaddr = phyaddr;
4772
Stefan Roese66b11cc2017-03-22 14:11:16 +01004773 return 0;
4774}
Thomas Petazzoni26a52782017-02-16 08:03:37 +01004775
Simon Glassbcee8d62019-12-06 21:41:35 -07004776#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004777/* Port GPIO initialization */
4778static void mvpp2_gpio_init(struct mvpp2_port *port)
4779{
4780 if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
Stefan Chulski41893732017-08-09 10:37:43 +03004781 dm_gpio_set_value(&port->phy_reset_gpio, 1);
Baruch Siach18593fa2018-10-15 13:16:48 +03004782 mdelay(10);
Baruch Siachfa140272018-10-15 13:16:47 +03004783 dm_gpio_set_value(&port->phy_reset_gpio, 0);
Stefan Chulski41893732017-08-09 10:37:43 +03004784 }
4785
4786 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4787 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4788}
4789#endif
4790
Stefan Roese66b11cc2017-03-22 14:11:16 +01004791/* Ports initialization */
4792static int mvpp2_port_probe(struct udevice *dev,
4793 struct mvpp2_port *port,
4794 int port_node,
4795 struct mvpp2 *priv)
4796{
4797 int err;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004798
4799 port->tx_ring_size = MVPP2_MAX_TXD;
4800 port->rx_ring_size = MVPP2_MAX_RXD;
4801
4802 err = mvpp2_port_init(dev, port);
4803 if (err < 0) {
Stefan Roese66b11cc2017-03-22 14:11:16 +01004804 dev_err(&pdev->dev, "failed to init port %d\n", port->id);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004805 return err;
4806 }
4807 mvpp2_port_power_up(port);
4808
Simon Glassbcee8d62019-12-06 21:41:35 -07004809#if CONFIG_IS_ENABLED(DM_GPIO)
Stefan Chulski41893732017-08-09 10:37:43 +03004810 mvpp2_gpio_init(port);
4811#endif
4812
Stefan Roese66b11cc2017-03-22 14:11:16 +01004813 priv->port_list[port->id] = port;
Stefan Chulskibb915c82017-08-09 10:37:46 +03004814 priv->num_ports++;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004815 return 0;
4816}
4817
4818/* Initialize decoding windows */
4819static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4820 struct mvpp2 *priv)
4821{
4822 u32 win_enable;
4823 int i;
4824
4825 for (i = 0; i < 6; i++) {
4826 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4827 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4828
4829 if (i < 4)
4830 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4831 }
4832
4833 win_enable = 0;
4834
4835 for (i = 0; i < dram->num_cs; i++) {
4836 const struct mbus_dram_window *cs = dram->cs + i;
4837
4838 mvpp2_write(priv, MVPP2_WIN_BASE(i),
4839 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4840 dram->mbus_dram_target_id);
4841
4842 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4843 (cs->size - 1) & 0xffff0000);
4844
4845 win_enable |= (1 << i);
4846 }
4847
4848 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4849}
4850
4851/* Initialize Rx FIFO's */
4852static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4853{
4854 int port;
4855
4856 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Stefan Roeseff572c62017-03-01 13:09:42 +01004857 if (priv->hw_version == MVPP22) {
4858 if (port == 0) {
4859 mvpp2_write(priv,
4860 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4861 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4862 mvpp2_write(priv,
4863 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4864 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4865 } else if (port == 1) {
4866 mvpp2_write(priv,
4867 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4868 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4869 mvpp2_write(priv,
4870 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4871 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4872 } else {
4873 mvpp2_write(priv,
4874 MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4875 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4876 mvpp2_write(priv,
4877 MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4878 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4879 }
4880 } else {
4881 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4882 MVPP21_RX_FIFO_PORT_DATA_SIZE);
4883 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4884 MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4885 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004886 }
4887
4888 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4889 MVPP2_RX_FIFO_PORT_MIN_PKT);
4890 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4891}
4892
Stefan Roeseff572c62017-03-01 13:09:42 +01004893/* Initialize Tx FIFO's */
4894static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4895{
4896 int port, val;
4897
4898 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4899 /* Port 0 supports 10KB TX FIFO */
4900 if (port == 0) {
4901 val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4902 MVPP22_TX_FIFO_SIZE_MASK;
4903 } else {
4904 val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4905 MVPP22_TX_FIFO_SIZE_MASK;
4906 }
4907 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4908 }
4909}
4910
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004911static void mvpp2_axi_init(struct mvpp2 *priv)
4912{
4913 u32 val, rdval, wrval;
4914
4915 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4916
4917 /* AXI Bridge Configuration */
4918
4919 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4920 << MVPP22_AXI_ATTR_CACHE_OFFS;
4921 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4922 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4923
4924 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4925 << MVPP22_AXI_ATTR_CACHE_OFFS;
4926 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4927 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
4928
4929 /* BM */
4930 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4931 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4932
4933 /* Descriptors */
4934 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4935 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4936 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4937 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4938
4939 /* Buffer Data */
4940 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4941 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4942
4943 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4944 << MVPP22_AXI_CODE_CACHE_OFFS;
4945 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4946 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4947 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4948 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4949
4950 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4951 << MVPP22_AXI_CODE_CACHE_OFFS;
4952 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4953 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4954
4955 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4956
4957 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4958 << MVPP22_AXI_CODE_CACHE_OFFS;
4959 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4960 << MVPP22_AXI_CODE_DOMAIN_OFFS;
4961
4962 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4963}
4964
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004965/* Initialize network controller common part HW */
4966static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4967{
4968 const struct mbus_dram_target_info *dram_target_info;
4969 int err, i;
4970 u32 val;
4971
4972 /* Checks for hardware constraints (U-Boot uses only one rxq) */
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01004973 if ((rxq_number > priv->max_port_rxqs) ||
4974 (txq_number > MVPP2_MAX_TXQ)) {
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004975 dev_err(&pdev->dev, "invalid queue size parameter\n");
4976 return -EINVAL;
4977 }
4978
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004979 if (priv->hw_version == MVPP22)
4980 mvpp2_axi_init(priv);
Stefan Chulskid4b0e002017-08-09 10:37:48 +03004981 else {
4982 /* MBUS windows configuration */
4983 dram_target_info = mvebu_mbus_dram_info();
4984 if (dram_target_info)
4985 mvpp2_conf_mbus_windows(dram_target_info, priv);
4986 }
Thomas Petazzonicdf77792017-02-16 08:41:07 +01004987
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004988 if (priv->hw_version == MVPP21) {
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004989 /* Disable HW PHY polling */
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004990 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4991 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
4992 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
4993 } else {
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004994 /* Enable HW PHY polling */
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004995 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
Stefan Roese3e3cbb42017-03-09 12:01:57 +01004996 val |= MVPP22_SMI_POLLING_EN;
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01004997 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
4998 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01004999
5000 /* Allocate and initialize aggregated TXQs */
5001 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
5002 sizeof(struct mvpp2_tx_queue),
5003 GFP_KERNEL);
5004 if (!priv->aggr_txqs)
5005 return -ENOMEM;
5006
5007 for_each_present_cpu(i) {
5008 priv->aggr_txqs[i].id = i;
5009 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5010 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5011 MVPP2_AGGR_TXQ_SIZE, i, priv);
5012 if (err < 0)
5013 return err;
5014 }
5015
5016 /* Rx Fifo Init */
5017 mvpp2_rx_fifo_init(priv);
5018
Stefan Roeseff572c62017-03-01 13:09:42 +01005019 /* Tx Fifo Init */
5020 if (priv->hw_version == MVPP22)
5021 mvpp2_tx_fifo_init(priv);
5022
Thomas Petazzoni7c7311f2017-02-20 11:42:51 +01005023 if (priv->hw_version == MVPP21)
5024 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5025 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005026
5027 /* Allow cache snoop when transmiting packets */
5028 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5029
5030 /* Buffer Manager initialization */
5031 err = mvpp2_bm_init(dev, priv);
5032 if (err < 0)
5033 return err;
5034
5035 /* Parser default initialization */
5036 err = mvpp2_prs_default_init(dev, priv);
5037 if (err < 0)
5038 return err;
5039
5040 /* Classifier default initialization */
5041 mvpp2_cls_init(priv);
5042
5043 return 0;
5044}
5045
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005046static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5047{
5048 struct mvpp2_port *port = dev_get_priv(dev);
5049 struct mvpp2_rx_desc *rx_desc;
5050 struct mvpp2_bm_pool *bm_pool;
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005051 dma_addr_t dma_addr;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005052 u32 bm, rx_status;
5053 int pool, rx_bytes, err;
5054 int rx_received;
5055 struct mvpp2_rx_queue *rxq;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005056 u8 *data;
5057
Nevo Hed2a428702019-08-15 18:08:44 -04005058 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulski13b725f2019-08-15 18:08:41 -04005059 if (!port->phy_dev->link)
5060 return 0;
5061
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005062 /* Process RX packets */
Stefan Chulski16f18d22017-08-09 10:37:49 +03005063 rxq = port->rxqs[0];
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005064
5065 /* Get number of received packets and clamp the to-do */
5066 rx_received = mvpp2_rxq_received(port, rxq->id);
5067
5068 /* Return if no packets are received */
5069 if (!rx_received)
5070 return 0;
5071
5072 rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005073 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5074 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5075 rx_bytes -= MVPP2_MH_SIZE;
5076 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005077
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005078 bm = mvpp2_bm_cookie_build(port, rx_desc);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005079 pool = mvpp2_bm_cookie_pool_get(bm);
5080 bm_pool = &port->priv->bm_pools[pool];
5081
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005082 /* In case of an error, release the requested buffer pointer
5083 * to the Buffer Manager. This request process is controlled
5084 * by the hardware, and the information about the buffer is
5085 * comprised by the RX descriptor.
5086 */
5087 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5088 mvpp2_rx_error(port, rx_desc);
5089 /* Return the buffer to the pool */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005090 mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005091 return 0;
5092 }
5093
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005094 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005095 if (err) {
5096 netdev_err(port->dev, "failed to refill BM pools\n");
5097 return 0;
5098 }
5099
5100 /* Update Rx queue management counters */
5101 mb();
5102 mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5103
5104 /* give packet to stack - skip on first n bytes */
Thomas Petazzoni4dae32e2017-02-20 10:27:51 +01005105 data = (u8 *)dma_addr + 2 + 32;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005106
5107 if (rx_bytes <= 0)
5108 return 0;
5109
5110 /*
5111 * No cache invalidation needed here, since the rx_buffer's are
5112 * located in a uncached memory region
5113 */
5114 *packetp = data;
5115
5116 return rx_bytes;
5117}
5118
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005119static int mvpp2_send(struct udevice *dev, void *packet, int length)
5120{
5121 struct mvpp2_port *port = dev_get_priv(dev);
5122 struct mvpp2_tx_queue *txq, *aggr_txq;
5123 struct mvpp2_tx_desc *tx_desc;
5124 int tx_done;
5125 int timeout;
5126
Nevo Hed2a428702019-08-15 18:08:44 -04005127 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulski13b725f2019-08-15 18:08:41 -04005128 if (!port->phy_dev->link)
5129 return 0;
5130
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005131 txq = port->txqs[0];
5132 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5133
5134 /* Get a descriptor for the first part of the packet */
5135 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005136 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5137 mvpp2_txdesc_size_set(port, tx_desc, length);
5138 mvpp2_txdesc_offset_set(port, tx_desc,
5139 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5140 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5141 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005142 /* First and Last descriptor */
Thomas Petazzonicfa414a2017-02-15 15:35:00 +01005143 mvpp2_txdesc_cmd_set(port, tx_desc,
5144 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5145 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005146
5147 /* Flush tx data */
Stefan Roesef811e042017-02-16 13:58:37 +01005148 flush_dcache_range((unsigned long)packet,
5149 (unsigned long)packet + ALIGN(length, PKTALIGN));
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005150
5151 /* Enable transmit */
5152 mb();
5153 mvpp2_aggr_txq_pend_desc_add(port, 1);
5154
5155 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5156
5157 timeout = 0;
5158 do {
5159 if (timeout++ > 10000) {
5160 printf("timeout: packet not sent from aggregated to phys TXQ\n");
5161 return 0;
5162 }
5163 tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5164 } while (tx_done);
5165
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005166 timeout = 0;
5167 do {
5168 if (timeout++ > 10000) {
5169 printf("timeout: packet not sent\n");
5170 return 0;
5171 }
5172 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5173 } while (!tx_done);
5174
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005175 return 0;
5176}
5177
5178static int mvpp2_start(struct udevice *dev)
5179{
5180 struct eth_pdata *pdata = dev_get_platdata(dev);
5181 struct mvpp2_port *port = dev_get_priv(dev);
5182
5183 /* Load current MAC address */
5184 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5185
5186 /* Reconfigure parser accept the original MAC address */
5187 mvpp2_prs_update_mac_da(port, port->dev_addr);
5188
Stefan Chulskie09d0c82017-04-06 15:39:08 +02005189 switch (port->phy_interface) {
5190 case PHY_INTERFACE_MODE_RGMII:
5191 case PHY_INTERFACE_MODE_RGMII_ID:
5192 case PHY_INTERFACE_MODE_SGMII:
5193 mvpp2_port_power_up(port);
5194 default:
5195 break;
5196 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005197
5198 mvpp2_open(dev, port);
5199
5200 return 0;
5201}
5202
5203static void mvpp2_stop(struct udevice *dev)
5204{
5205 struct mvpp2_port *port = dev_get_priv(dev);
5206
5207 mvpp2_stop_dev(port);
5208 mvpp2_cleanup_rxqs(port);
5209 mvpp2_cleanup_txqs(port);
5210}
5211
Matt Pellanda37c0822019-07-30 09:40:24 -04005212static int mvpp2_write_hwaddr(struct udevice *dev)
5213{
5214 struct mvpp2_port *port = dev_get_priv(dev);
5215
5216 return mvpp2_prs_update_mac_da(port, port->dev_addr);
5217}
5218
Stefan Roesefb640722017-03-10 06:07:45 +01005219static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port)
5220{
5221 writel(port->phyaddr, port->priv->iface_base +
5222 MVPP22_SMI_PHY_ADDR_REG(port->gop_id));
5223
5224 return 0;
5225}
5226
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005227static int mvpp2_base_probe(struct udevice *dev)
5228{
5229 struct mvpp2 *priv = dev_get_priv(dev);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005230 void *bd_space;
5231 u32 size = 0;
5232 int i;
5233
Thomas Petazzoni16a98982017-02-15 14:08:59 +01005234 /* Save hw-version */
5235 priv->hw_version = dev_get_driver_data(dev);
5236
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005237 /*
5238 * U-Boot special buffer handling:
5239 *
5240 * Allocate buffer area for descs and rx_buffers. This is only
5241 * done once for all interfaces. As only one interface can
5242 * be active. Make this area DMA-safe by disabling the D-cache
5243 */
5244
5245 /* Align buffer area for descs and rx_buffers to 1MiB */
5246 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005247 mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5248 BD_SPACE, DCACHE_OFF);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005249
5250 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5251 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
5252
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005253 buffer_loc.tx_descs =
5254 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005255 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
5256
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005257 buffer_loc.rx_descs =
5258 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005259 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
5260
5261 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005262 buffer_loc.bm_pool[i] =
5263 (unsigned long *)((unsigned long)bd_space + size);
Thomas Petazzonic8feeb22017-02-20 11:29:16 +01005264 if (priv->hw_version == MVPP21)
5265 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5266 else
5267 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005268 }
5269
5270 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
Stefan Roesea7c28ff2017-02-15 12:46:18 +01005271 buffer_loc.rx_buffer[i] =
5272 (unsigned long *)((unsigned long)bd_space + size);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005273 size += RX_BUFFER_SIZE;
5274 }
5275
Stefan Roese30edc372017-02-16 13:29:08 +01005276 /* Clear the complete area so that all descriptors are cleared */
5277 memset(bd_space, 0, size);
5278
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005279 /* Save base addresses for later use */
Simon Glassa821c4a2017-05-17 17:18:05 -06005280 priv->base = (void *)devfdt_get_addr_index(dev, 0);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005281 if (IS_ERR(priv->base))
5282 return PTR_ERR(priv->base);
5283
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005284 if (priv->hw_version == MVPP21) {
Simon Glassa821c4a2017-05-17 17:18:05 -06005285 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1);
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005286 if (IS_ERR(priv->lms_base))
5287 return PTR_ERR(priv->lms_base);
5288 } else {
Simon Glassa821c4a2017-05-17 17:18:05 -06005289 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1);
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005290 if (IS_ERR(priv->iface_base))
5291 return PTR_ERR(priv->iface_base);
Stefan Roese0a61e9a2017-02-16 08:31:32 +01005292
Stefan Roese31aa1e32017-03-22 15:07:30 +01005293 /* Store common base addresses for all ports */
5294 priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5295 priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5296 priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
Thomas Petazzoni26a52782017-02-16 08:03:37 +01005297 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005298
Thomas Petazzoni09b3f942017-02-16 09:03:16 +01005299 if (priv->hw_version == MVPP21)
5300 priv->max_port_rxqs = 8;
5301 else
5302 priv->max_port_rxqs = 32;
5303
Baruch Siach21586cd2018-11-21 13:05:34 +02005304 return 0;
5305}
5306
5307static int mvpp2_probe(struct udevice *dev)
5308{
5309 struct mvpp2_port *port = dev_get_priv(dev);
5310 struct mvpp2 *priv = dev_get_priv(dev->parent);
Baruch Siach21586cd2018-11-21 13:05:34 +02005311 int err;
5312
5313 /* Only call the probe function for the parent once */
5314 if (!priv->probe_done)
5315 err = mvpp2_base_probe(dev->parent);
5316
Nevo Hed2a428702019-08-15 18:08:44 -04005317 port->priv = priv;
Stefan Roese66b11cc2017-03-22 14:11:16 +01005318
5319 err = phy_info_parse(dev, port);
5320 if (err)
5321 return err;
5322
5323 /*
5324 * We need the port specific io base addresses at this stage, since
5325 * gop_port_init() accesses these registers
5326 */
5327 if (priv->hw_version == MVPP21) {
5328 int priv_common_regs_num = 2;
5329
Simon Glassa821c4a2017-05-17 17:18:05 -06005330 port->base = (void __iomem *)devfdt_get_addr_index(
Stefan Roese66b11cc2017-03-22 14:11:16 +01005331 dev->parent, priv_common_regs_num + port->id);
5332 if (IS_ERR(port->base))
5333 return PTR_ERR(port->base);
5334 } else {
5335 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5336 "gop-port-id", -1);
5337 if (port->id == -1) {
5338 dev_err(&pdev->dev, "missing gop-port-id value\n");
5339 return -EINVAL;
5340 }
5341
5342 port->base = priv->iface_base + MVPP22_PORT_BASE +
5343 port->gop_id * MVPP22_PORT_OFFSET;
Stefan Roese31aa1e32017-03-22 15:07:30 +01005344
Stefan Roesefb640722017-03-10 06:07:45 +01005345 /* Set phy address of the port */
Nevo Hed2a428702019-08-15 18:08:44 -04005346 if (port->phyaddr < PHY_MAX_ADDR)
Stefan Chulskie09d0c82017-04-06 15:39:08 +02005347 mvpp22_smi_phy_addr_cfg(port);
Stefan Roesefb640722017-03-10 06:07:45 +01005348
Stefan Roese31aa1e32017-03-22 15:07:30 +01005349 /* GoP Init */
5350 gop_port_init(port);
Stefan Roese66b11cc2017-03-22 14:11:16 +01005351 }
5352
Stefan Chulskibb915c82017-08-09 10:37:46 +03005353 if (!priv->probe_done) {
5354 /* Initialize network controller */
5355 err = mvpp2_init(dev, priv);
5356 if (err < 0) {
5357 dev_err(&pdev->dev, "failed to initialize controller\n");
5358 return err;
5359 }
5360 priv->num_ports = 0;
5361 priv->probe_done = 1;
Stefan Roese1fabbd02017-02-16 15:26:06 +01005362 }
5363
Stefan Roese31aa1e32017-03-22 15:07:30 +01005364 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5365 if (err)
5366 return err;
5367
5368 if (priv->hw_version == MVPP22) {
5369 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5370 port->phy_interface);
5371
5372 /* Netcomplex configurations for all ports */
5373 gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5374 gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5375 }
5376
5377 return 0;
Stefan Roese1fabbd02017-02-16 15:26:06 +01005378}
5379
Stefan Roese2f720f12017-03-23 17:01:59 +01005380/*
5381 * Empty BM pool and stop its activity before the OS is started
5382 */
5383static int mvpp2_remove(struct udevice *dev)
5384{
5385 struct mvpp2_port *port = dev_get_priv(dev);
5386 struct mvpp2 *priv = port->priv;
5387 int i;
5388
Stefan Chulskibb915c82017-08-09 10:37:46 +03005389 priv->num_ports--;
5390
5391 if (priv->num_ports)
5392 return 0;
5393
Stefan Roese2f720f12017-03-23 17:01:59 +01005394 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5395 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5396
5397 return 0;
5398}
5399
Stefan Roese1fabbd02017-02-16 15:26:06 +01005400static const struct eth_ops mvpp2_ops = {
5401 .start = mvpp2_start,
5402 .send = mvpp2_send,
5403 .recv = mvpp2_recv,
5404 .stop = mvpp2_stop,
Matt Pellanda37c0822019-07-30 09:40:24 -04005405 .write_hwaddr = mvpp2_write_hwaddr
Stefan Roese1fabbd02017-02-16 15:26:06 +01005406};
5407
5408static struct driver mvpp2_driver = {
5409 .name = "mvpp2",
5410 .id = UCLASS_ETH,
5411 .probe = mvpp2_probe,
Stefan Roese2f720f12017-03-23 17:01:59 +01005412 .remove = mvpp2_remove,
Stefan Roese1fabbd02017-02-16 15:26:06 +01005413 .ops = &mvpp2_ops,
5414 .priv_auto_alloc_size = sizeof(struct mvpp2_port),
5415 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
Stefan Roese2f720f12017-03-23 17:01:59 +01005416 .flags = DM_FLAG_ACTIVE_DMA,
Stefan Roese1fabbd02017-02-16 15:26:06 +01005417};
5418
5419/*
5420 * Use a MISC device to bind the n instances (child nodes) of the
5421 * network base controller in UCLASS_ETH.
5422 */
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005423static int mvpp2_base_bind(struct udevice *parent)
5424{
5425 const void *blob = gd->fdt_blob;
Simon Glasse160f7d2017-01-17 16:52:55 -07005426 int node = dev_of_offset(parent);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005427 struct uclass_driver *drv;
5428 struct udevice *dev;
5429 struct eth_pdata *plat;
5430 char *name;
5431 int subnode;
5432 u32 id;
Stefan Roesec9607c92017-02-24 10:12:41 +01005433 int base_id_add;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005434
5435 /* Lookup eth driver */
5436 drv = lists_uclass_lookup(UCLASS_ETH);
5437 if (!drv) {
5438 puts("Cannot find eth driver\n");
5439 return -ENOENT;
5440 }
5441
Stefan Roesec9607c92017-02-24 10:12:41 +01005442 base_id_add = base_id;
5443
Simon Glassdf87e6b2016-10-02 17:59:29 -06005444 fdt_for_each_subnode(subnode, blob, node) {
Stefan Roesec9607c92017-02-24 10:12:41 +01005445 /* Increment base_id for all subnodes, also the disabled ones */
5446 base_id++;
5447
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005448 /* Skip disabled ports */
5449 if (!fdtdec_get_is_enabled(blob, subnode))
5450 continue;
5451
5452 plat = calloc(1, sizeof(*plat));
5453 if (!plat)
5454 return -ENOMEM;
5455
5456 id = fdtdec_get_int(blob, subnode, "port-id", -1);
Stefan Roesec9607c92017-02-24 10:12:41 +01005457 id += base_id_add;
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005458
5459 name = calloc(1, 16);
Heinrich Schuchardtb24b1e42018-03-07 03:39:04 +01005460 if (!name) {
5461 free(plat);
5462 return -ENOMEM;
5463 }
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005464 sprintf(name, "mvpp2-%d", id);
5465
5466 /* Create child device UCLASS_ETH and bind it */
5467 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
Simon Glasse160f7d2017-01-17 16:52:55 -07005468 dev_set_of_offset(dev, subnode);
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005469 }
5470
5471 return 0;
5472}
5473
5474static const struct udevice_id mvpp2_ids[] = {
Thomas Petazzoni16a98982017-02-15 14:08:59 +01005475 {
5476 .compatible = "marvell,armada-375-pp2",
5477 .data = MVPP21,
5478 },
Thomas Petazzonia83a6412017-02-20 11:54:31 +01005479 {
5480 .compatible = "marvell,armada-7k-pp22",
5481 .data = MVPP22,
5482 },
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005483 { }
5484};
5485
5486U_BOOT_DRIVER(mvpp2_base) = {
5487 .name = "mvpp2_base",
5488 .id = UCLASS_MISC,
5489 .of_match = mvpp2_ids,
5490 .bind = mvpp2_base_bind,
Stefan Roese99d4c6d2016-02-10 07:22:10 +01005491 .priv_auto_alloc_size = sizeof(struct mvpp2),
5492};