blob: cb04b63b9b076dec07efff1488d6339c546ce4be [file] [log] [blame]
Aaron Williamsbb256ec2022-04-07 09:11:16 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * IPD helper functions.
6 */
7
8#include <errno.h>
9#include <log.h>
10#include <time.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/cvmx-csr.h>
15#include <mach/cvmx-bootmem.h>
16#include <mach/octeon-model.h>
17#include <mach/cvmx-fuse.h>
18#include <mach/octeon-feature.h>
19#include <mach/cvmx-qlm.h>
20#include <mach/octeon_qlm.h>
21#include <mach/cvmx-pcie.h>
22#include <mach/cvmx-coremask.h>
23#include <mach/cvmx-range.h>
24#include <mach/cvmx-global-resources.h>
25
26#include <mach/cvmx-agl-defs.h>
27#include <mach/cvmx-bgxx-defs.h>
28#include <mach/cvmx-ciu-defs.h>
29#include <mach/cvmx-gmxx-defs.h>
30#include <mach/cvmx-gserx-defs.h>
31#include <mach/cvmx-ilk-defs.h>
32#include <mach/cvmx-ipd-defs.h>
33#include <mach/cvmx-pcsx-defs.h>
34#include <mach/cvmx-pcsxx-defs.h>
35#include <mach/cvmx-pki-defs.h>
36#include <mach/cvmx-pko-defs.h>
37#include <mach/cvmx-xcv-defs.h>
38
39#include <mach/cvmx-hwpko.h>
40#include <mach/cvmx-ilk.h>
41#include <mach/cvmx-ipd.h>
42#include <mach/cvmx-pki.h>
43#include <mach/cvmx-pko3.h>
44#include <mach/cvmx-pko3-queue.h>
45#include <mach/cvmx-pko3-resources.h>
46#include <mach/cvmx-pip.h>
47
48#include <mach/cvmx-helper.h>
49#include <mach/cvmx-helper-board.h>
50#include <mach/cvmx-helper-cfg.h>
51
52#include <mach/cvmx-helper-bgx.h>
53#include <mach/cvmx-helper-cfg.h>
54#include <mach/cvmx-helper-util.h>
55#include <mach/cvmx-helper-pki.h>
56
57/** It allocate pools for packet and wqe pools
58 * and sets up the FPA hardware
59 */
60int __cvmx_helper_ipd_setup_fpa_pools(void)
61{
62 cvmx_fpa_global_initialize();
63 if (cvmx_ipd_cfg.packet_pool.buffer_count == 0)
64 return 0;
65 __cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.packet_pool.pool_num,
66 cvmx_ipd_cfg.packet_pool.buffer_size,
67 cvmx_ipd_cfg.packet_pool.buffer_count,
68 "Packet Buffers");
69 if (cvmx_ipd_cfg.wqe_pool.buffer_count == 0)
70 return 0;
71 __cvmx_helper_initialize_fpa_pool(cvmx_ipd_cfg.wqe_pool.pool_num,
72 cvmx_ipd_cfg.wqe_pool.buffer_size,
73 cvmx_ipd_cfg.wqe_pool.buffer_count,
74 "WQE Buffers");
75 return 0;
76}
77
78/**
79 * @INTERNAL
80 * Setup global setting for IPD/PIP not related to a specific
81 * interface or port. This must be called before IPD is enabled.
82 *
83 * @return Zero on success, negative on failure.
84 */
85int __cvmx_helper_ipd_global_setup(void)
86{
87 /* Setup the packet and wqe pools*/
88 __cvmx_helper_ipd_setup_fpa_pools();
89 /* Setup the global packet input options */
90 cvmx_ipd_config(cvmx_ipd_cfg.packet_pool.buffer_size / 8,
91 cvmx_ipd_cfg.first_mbuf_skip / 8,
92 cvmx_ipd_cfg.not_first_mbuf_skip / 8,
93 /* The +8 is to account for the next ptr */
94 (cvmx_ipd_cfg.first_mbuf_skip + 8) / 128,
95 /* The +8 is to account for the next ptr */
96 (cvmx_ipd_cfg.not_first_mbuf_skip + 8) / 128,
97 cvmx_ipd_cfg.wqe_pool.pool_num,
98 (cvmx_ipd_mode_t)(cvmx_ipd_cfg.cache_mode), 1);
99 return 0;
100}
101
102/**
103 * Enable or disable FCS stripping for all the ports on an interface.
104 *
105 * @param xiface
106 * @param nports number of ports
107 * @param has_fcs 0 for disable and !0 for enable
108 */
109static int cvmx_helper_fcs_op(int xiface, int nports, int has_fcs)
110{
111 u64 port_bit;
112 int index;
113 int pknd;
114 union cvmx_pip_sub_pkind_fcsx pkind_fcsx;
115 union cvmx_pip_prt_cfgx port_cfg;
116 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
117
118 if (!octeon_has_feature(OCTEON_FEATURE_PKND))
119 return 0;
120 if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
121 cvmx_helper_pki_set_fcs_op(xi.node, xi.interface, nports,
122 has_fcs);
123 return 0;
124 }
125
126 port_bit = 0;
127 for (index = 0; index < nports; index++)
128 port_bit |= ((u64)1 << cvmx_helper_get_pknd(xiface, index));
129
130 pkind_fcsx.u64 = csr_rd(CVMX_PIP_SUB_PKIND_FCSX(0));
131 if (has_fcs)
132 pkind_fcsx.s.port_bit |= port_bit;
133 else
134 pkind_fcsx.s.port_bit &= ~port_bit;
135 csr_wr(CVMX_PIP_SUB_PKIND_FCSX(0), pkind_fcsx.u64);
136
137 for (pknd = 0; pknd < 64; pknd++) {
138 if ((1ull << pknd) & port_bit) {
139 port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
140 port_cfg.s.crc_en = (has_fcs) ? 1 : 0;
141 csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
142 }
143 }
144
145 return 0;
146}
147
148/**
149 * @INTERNAL
150 * Configure the IPD/PIP tagging and QoS options for a specific
151 * port. This function determines the POW work queue entry
152 * contents for a port. The setup performed here is controlled by
153 * the defines in executive-config.h.
154 *
155 * @param ipd_port Port/Port kind to configure. This follows the IPD numbering,
156 * not the per interface numbering
157 *
158 * @return Zero on success, negative on failure
159 */
160static int __cvmx_helper_ipd_port_setup(int ipd_port)
161{
162 union cvmx_pip_prt_cfgx port_config;
163 union cvmx_pip_prt_tagx tag_config;
164
165 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
166 int xiface, index, pknd;
167 union cvmx_pip_prt_cfgbx prt_cfgbx;
168
169 xiface = cvmx_helper_get_interface_num(ipd_port);
170 index = cvmx_helper_get_interface_index_num(ipd_port);
171 pknd = cvmx_helper_get_pknd(xiface, index);
172
173 port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
174 tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(pknd));
175
176 port_config.s.qos = pknd & 0x7;
177
178 /* Default BPID to use for packets on this port-kind */
179 prt_cfgbx.u64 = csr_rd(CVMX_PIP_PRT_CFGBX(pknd));
180 prt_cfgbx.s.bpid = pknd;
181 csr_wr(CVMX_PIP_PRT_CFGBX(pknd), prt_cfgbx.u64);
182 } else {
183 port_config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(ipd_port));
184 tag_config.u64 = csr_rd(CVMX_PIP_PRT_TAGX(ipd_port));
185
186 /* Have each port go to a different POW queue */
187 port_config.s.qos = ipd_port & 0x7;
188 }
189
190 /* Process the headers and place the IP header in the work queue */
191 port_config.s.mode =
192 (cvmx_pip_port_parse_mode_t)cvmx_ipd_cfg.port_config.parse_mode;
193
194 tag_config.s.ip6_src_flag =
195 cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_ip;
196 tag_config.s.ip6_dst_flag =
197 cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_ip;
198 tag_config.s.ip6_sprt_flag =
199 cvmx_ipd_cfg.port_config.tag_fields.ipv6_src_port;
200 tag_config.s.ip6_dprt_flag =
201 cvmx_ipd_cfg.port_config.tag_fields.ipv6_dst_port;
202 tag_config.s.ip6_nxth_flag =
203 cvmx_ipd_cfg.port_config.tag_fields.ipv6_next_header;
204 tag_config.s.ip4_src_flag =
205 cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_ip;
206 tag_config.s.ip4_dst_flag =
207 cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_ip;
208 tag_config.s.ip4_sprt_flag =
209 cvmx_ipd_cfg.port_config.tag_fields.ipv4_src_port;
210 tag_config.s.ip4_dprt_flag =
211 cvmx_ipd_cfg.port_config.tag_fields.ipv4_dst_port;
212 tag_config.s.ip4_pctl_flag =
213 cvmx_ipd_cfg.port_config.tag_fields.ipv4_protocol;
214 tag_config.s.inc_prt_flag =
215 cvmx_ipd_cfg.port_config.tag_fields.input_port;
216 tag_config.s.tcp6_tag_type =
217 (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
218 tag_config.s.tcp4_tag_type =
219 (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
220 tag_config.s.ip6_tag_type =
221 (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
222 tag_config.s.ip4_tag_type =
223 (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
224 tag_config.s.non_tag_type =
225 (cvmx_pow_tag_type_t)cvmx_ipd_cfg.port_config.tag_type;
226
227 /* Put all packets in group 0. Other groups can be used by the app */
228 tag_config.s.grp = 0;
229
230 cvmx_pip_config_port(ipd_port, port_config, tag_config);
231
232 /* Give the user a chance to override our setting for each port */
233 if (cvmx_override_ipd_port_setup)
234 cvmx_override_ipd_port_setup(ipd_port);
235
236 return 0;
237}
238
239/**
240 * @INTERNAL
241 * Setup the IPD/PIP for the ports on an interface. Packet
242 * classification and tagging are set for every port on the
243 * interface. The number of ports on the interface must already
244 * have been probed.
245 *
246 * @param xiface to setup IPD/PIP for
247 *
248 * @return Zero on success, negative on failure
249 */
250int __cvmx_helper_ipd_setup_interface(int xiface)
251{
252 cvmx_helper_interface_mode_t mode;
253 int num_ports = cvmx_helper_ports_on_interface(xiface);
254 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
255 int ipd_port = cvmx_helper_get_ipd_port(xiface, 0);
256 int delta;
257
258 if (num_ports == CVMX_HELPER_CFG_INVALID_VALUE)
259 return 0;
260
261 delta = 1;
262 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
263 if (xi.interface < CVMX_HELPER_MAX_GMX)
264 delta = 16;
265 }
266
267 while (num_ports--) {
268 if (!cvmx_helper_is_port_valid(xiface, num_ports))
269 continue;
270 if (octeon_has_feature(OCTEON_FEATURE_PKI))
271 __cvmx_helper_pki_port_setup(xi.node, ipd_port);
272 else
273 __cvmx_helper_ipd_port_setup(ipd_port);
274 ipd_port += delta;
275 }
276 /* FCS settings */
277 cvmx_helper_fcs_op(xiface, cvmx_helper_ports_on_interface(xiface),
278 __cvmx_helper_get_has_fcs(xiface));
279
280 mode = cvmx_helper_interface_get_mode(xiface);
281
282 if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
283 __cvmx_helper_loop_enable(xiface);
284
285 return 0;
286}