blob: 51fa4fb5f0548fc6642e42b4bc8087bb0f521ec0 [file] [log] [blame]
Aaron Williams755a6ea2022-04-07 09:11:19 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * PKI helper functions.
6 */
7
8#include <time.h>
9#include <log.h>
10#include <linux/delay.h>
11
12#include <mach/cvmx-regs.h>
13#include <mach/cvmx-csr.h>
14#include <mach/cvmx-bootmem.h>
15#include <mach/octeon-model.h>
16#include <mach/cvmx-fuse.h>
17#include <mach/octeon-feature.h>
18#include <mach/cvmx-qlm.h>
19#include <mach/octeon_qlm.h>
20#include <mach/cvmx-pcie.h>
21#include <mach/cvmx-coremask.h>
22
23#include <mach/cvmx-agl-defs.h>
24#include <mach/cvmx-bgxx-defs.h>
25#include <mach/cvmx-ciu-defs.h>
26#include <mach/cvmx-gmxx-defs.h>
27#include <mach/cvmx-gserx-defs.h>
28#include <mach/cvmx-ilk-defs.h>
29#include <mach/cvmx-ipd-defs.h>
30#include <mach/cvmx-pexp-defs.h>
31#include <mach/cvmx-pcsx-defs.h>
32#include <mach/cvmx-pcsxx-defs.h>
33#include <mach/cvmx-pki-defs.h>
34#include <mach/cvmx-pko-defs.h>
35#include <mach/cvmx-sli-defs.h>
36#include <mach/cvmx-xcv-defs.h>
37
38#include <mach/cvmx-hwpko.h>
39#include <mach/cvmx-ilk.h>
40#include <mach/cvmx-pki.h>
41
42#include <mach/cvmx-helper.h>
43#include <mach/cvmx-helper-board.h>
44#include <mach/cvmx-helper-cfg.h>
45#include <mach/cvmx-helper-pki.h>
46
47#include <mach/cvmx-global-resources.h>
48#include <mach/cvmx-pko-internal-ports-range.h>
49#include <mach/cvmx-ilk.h>
50#include <mach/cvmx-pip.h>
51
52static int pki_helper_debug;
53
54bool cvmx_pki_dflt_init[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] = 1 };
55
56static bool cvmx_pki_dflt_bp_en[CVMX_MAX_NODES] = { [0 ... CVMX_MAX_NODES - 1] =
57 true };
58static struct cvmx_pki_cluster_grp_config pki_dflt_clgrp[CVMX_MAX_NODES] = {
59 { 0, 0xf },
60 { 0, 0xf }
61};
62
63struct cvmx_pki_pool_config pki_dflt_pool[CVMX_MAX_NODES] = {
64 [0 ... CVMX_MAX_NODES -
65 1] = { .pool_num = -1, .buffer_size = 2048, .buffer_count = 0 }
66};
67
68struct cvmx_pki_aura_config pki_dflt_aura[CVMX_MAX_NODES] = {
69 [0 ... CVMX_MAX_NODES -
70 1] = { .aura_num = 0, .pool_num = -1, .buffer_count = 0 }
71};
72
73struct cvmx_pki_style_config pki_dflt_style[CVMX_MAX_NODES] = {
74 [0 ... CVMX_MAX_NODES - 1] = { .parm_cfg = { .lenerr_en = 1,
75 .maxerr_en = 1,
76 .minerr_en = 1,
77 .fcs_strip = 1,
78 .fcs_chk = 1,
79 .first_skip = 40,
80 .mbuff_size = 2048 } }
81};
82
83struct cvmx_pki_sso_grp_config pki_dflt_sso_grp[CVMX_MAX_NODES];
84struct cvmx_pki_qpg_config pki_dflt_qpg[CVMX_MAX_NODES];
85struct cvmx_pki_pkind_config pki_dflt_pkind[CVMX_MAX_NODES];
86u64 pkind_style_map[CVMX_MAX_NODES][CVMX_PKI_NUM_PKIND] = {
87 [0 ... CVMX_MAX_NODES -
88 1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
89 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
90 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
91 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }
92};
93
94/* To store the qos watcher values before they are written to pcam when watcher
95 * is enabled. There is no cvmx-pip.c file exist so it ended up here
96 */
97struct cvmx_pki_legacy_qos_watcher qos_watcher[8];
98
99/** @INTERNAL
100 * This function setsup default ltype map
101 * @param node node number
102 */
103void __cvmx_helper_pki_set_dflt_ltype_map(int node)
104{
105 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NONE,
106 CVMX_PKI_BELTYPE_NONE);
107 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ENET,
108 CVMX_PKI_BELTYPE_MISC);
109 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_VLAN,
110 CVMX_PKI_BELTYPE_MISC);
111 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SNAP_PAYLD,
112 CVMX_PKI_BELTYPE_MISC);
113 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_ARP,
114 CVMX_PKI_BELTYPE_MISC);
115 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_RARP,
116 CVMX_PKI_BELTYPE_MISC);
117 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4,
118 CVMX_PKI_BELTYPE_IP4);
119 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP4_OPT,
120 CVMX_PKI_BELTYPE_IP4);
121 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6,
122 CVMX_PKI_BELTYPE_IP6);
123 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IP6_OPT,
124 CVMX_PKI_BELTYPE_IP6);
125 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPSEC_ESP,
126 CVMX_PKI_BELTYPE_MISC);
127 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPFRAG,
128 CVMX_PKI_BELTYPE_MISC);
129 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_IPCOMP,
130 CVMX_PKI_BELTYPE_MISC);
131 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_TCP,
132 CVMX_PKI_BELTYPE_TCP);
133 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP,
134 CVMX_PKI_BELTYPE_UDP);
135 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SCTP,
136 CVMX_PKI_BELTYPE_SCTP);
137 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_UDP_VXLAN,
138 CVMX_PKI_BELTYPE_UDP);
139 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GRE,
140 CVMX_PKI_BELTYPE_MISC);
141 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_NVGRE,
142 CVMX_PKI_BELTYPE_MISC);
143 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_GTP,
144 CVMX_PKI_BELTYPE_MISC);
145 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW28,
146 CVMX_PKI_BELTYPE_MISC);
147 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW29,
148 CVMX_PKI_BELTYPE_MISC);
149 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW30,
150 CVMX_PKI_BELTYPE_MISC);
151 cvmx_pki_write_ltype_map(node, CVMX_PKI_LTYPE_E_SW31,
152 CVMX_PKI_BELTYPE_MISC);
153}
154
155/** @INTERNAL
156 * This function installs the default VLAN entries to identify
157 * the VLAN and set WQE[vv], WQE[vs] if VLAN is found. In 78XX
158 * hardware (PKI) is not hardwired to recognize any 802.1Q VLAN
159 * Ethertypes
160 *
161 * @param node node number
162 */
163int __cvmx_helper_pki_install_dflt_vlan(int node)
164{
165 struct cvmx_pki_pcam_input pcam_input;
166 struct cvmx_pki_pcam_action pcam_action;
167 enum cvmx_pki_term field;
168 int index;
169 int bank;
170 u64 cl_mask = CVMX_PKI_CLUSTER_ALL;
171
172 memset(&pcam_input, 0, sizeof(pcam_input));
173 memset(&pcam_action, 0, sizeof(pcam_action));
174
175 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
176 /* PKI-20858 */
177 int i;
178
179 for (i = 0; i < 4; i++) {
180 union cvmx_pki_clx_ecc_ctl ecc_ctl;
181
182 ecc_ctl.u64 =
183 csr_rd_node(node, CVMX_PKI_CLX_ECC_CTL(i));
184 ecc_ctl.s.pcam_en = 0;
185 ecc_ctl.s.pcam0_cdis = 1;
186 ecc_ctl.s.pcam1_cdis = 1;
187 csr_wr_node(node, CVMX_PKI_CLX_ECC_CTL(i), ecc_ctl.u64);
188 }
189 }
190
191 for (field = CVMX_PKI_PCAM_TERM_ETHTYPE0;
192 field < CVMX_PKI_PCAM_TERM_ETHTYPE2; field++) {
193 bank = field & 0x01;
194
195 index = cvmx_pki_pcam_entry_alloc(
196 node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
197 if (index < 0) {
198 debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
199 node, bank);
200 return -1;
201 }
202 pcam_input.style = 0;
203 pcam_input.style_mask = 0;
204 pcam_input.field = field;
205 pcam_input.field_mask = 0xfd;
206 pcam_input.data = 0x81000000;
207 pcam_input.data_mask = 0xffff0000;
208 pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG;
209 pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_VLAN;
210 pcam_action.style_add = 0;
211 pcam_action.pointer_advance = 4;
212 cvmx_pki_pcam_write_entry(
213 node, index, cl_mask, pcam_input,
214 pcam_action); /*cluster_mask in pass2*/
215
216 index = cvmx_pki_pcam_entry_alloc(
217 node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
218 if (index < 0) {
219 debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
220 node, bank);
221 return -1;
222 }
223 pcam_input.data = 0x88a80000;
224 cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
225 pcam_action);
226
227 index = cvmx_pki_pcam_entry_alloc(
228 node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
229 if (index < 0) {
230 debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
231 node, bank);
232 return -1;
233 }
234 pcam_input.data = 0x92000000;
235 cvmx_pki_pcam_write_entry(
236 node, index, cl_mask, pcam_input,
237 pcam_action); /* cluster_mask in pass2*/
238
239 index = cvmx_pki_pcam_entry_alloc(
240 node, CVMX_PKI_FIND_AVAL_ENTRY, bank, cl_mask);
241 if (index < 0) {
242 debug("ERROR: Allocating pcam entry node=%d bank=%d\n",
243 node, bank);
244 return -1;
245 }
246 pcam_input.data = 0x91000000;
247 cvmx_pki_pcam_write_entry(node, index, cl_mask, pcam_input,
248 pcam_action);
249 }
250 return 0;
251}
252
253static int __cvmx_helper_setup_pki_cluster_groups(int node)
254{
255 u64 cl_mask;
256 int cl_group;
257
258 cl_group =
259 cvmx_pki_cluster_grp_alloc(node, pki_dflt_clgrp[node].grp_num);
260 if (cl_group == CVMX_RESOURCE_ALLOC_FAILED)
261 return -1;
262 else if (cl_group == CVMX_RESOURCE_ALREADY_RESERVED) {
263 if (pki_dflt_clgrp[node].grp_num == -1)
264 return -1;
265 else
266 return 0; /* cluster already configured, share it */
267 }
268 cl_mask = pki_dflt_clgrp[node].cluster_mask;
269 if (pki_helper_debug)
270 debug("pki-helper: setup pki cluster grp %d with cl_mask 0x%llx\n",
271 (int)cl_group, (unsigned long long)cl_mask);
272 cvmx_pki_attach_cluster_to_group(node, cl_group, cl_mask);
273 return 0;
274}
275
276/**
277 * This function sets up pools/auras to be used by PKI
278 * @param node node number
279 */
280static int __cvmx_helper_pki_setup_fpa_pools(int node)
281{
282 u64 buffer_count;
283 u64 buffer_size;
284
285 if (__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura))
286 return 0; /* aura already configured, share it */
287
288 buffer_count = pki_dflt_pool[node].buffer_count;
289 buffer_size = pki_dflt_pool[node].buffer_size;
290
291 if (buffer_count != 0) {
292 pki_dflt_pool[node].pool = cvmx_fpa3_setup_fill_pool(
293 node, pki_dflt_pool[node].pool_num, "PKI POOL DFLT",
294 buffer_size, buffer_count, NULL);
295 if (!__cvmx_fpa3_pool_valid(pki_dflt_pool[node].pool)) {
296 cvmx_printf("ERROR: %s: Failed to allocate pool %d\n",
297 __func__, pki_dflt_pool[node].pool_num);
298 return -1;
299 }
300 pki_dflt_pool[node].pool_num = pki_dflt_pool[node].pool.lpool;
301
302 if (pki_helper_debug)
303 debug("%s pool %d with buffer size %d cnt %d\n",
304 __func__, pki_dflt_pool[node].pool_num,
305 (int)buffer_size, (int)buffer_count);
306
307 pki_dflt_aura[node].pool_num = pki_dflt_pool[node].pool_num;
308 pki_dflt_aura[node].pool = pki_dflt_pool[node].pool;
309 }
310
311 buffer_count = pki_dflt_aura[node].buffer_count;
312
313 if (buffer_count != 0) {
314 pki_dflt_aura[node].aura = cvmx_fpa3_set_aura_for_pool(
315 pki_dflt_aura[node].pool, pki_dflt_aura[node].aura_num,
316 "PKI DFLT AURA", buffer_size, buffer_count);
317
318 if (!__cvmx_fpa3_aura_valid(pki_dflt_aura[node].aura)) {
319 debug("ERROR: %sL Failed to allocate aura %d\n",
320 __func__, pki_dflt_aura[node].aura_num);
321 return -1;
322 }
323 }
324 return 0;
325}
326
327static int __cvmx_helper_setup_pki_qpg_table(int node)
328{
329 int offset;
330
331 offset = cvmx_pki_qpg_entry_alloc(node, pki_dflt_qpg[node].qpg_base, 1);
332 if (offset == CVMX_RESOURCE_ALLOC_FAILED)
333 return -1;
334 else if (offset == CVMX_RESOURCE_ALREADY_RESERVED)
335 return 0; /* share the qpg table entry */
336 if (pki_helper_debug)
337 debug("pki-helper: set qpg entry at offset %d with port add %d aura %d grp_ok %d grp_bad %d\n",
338 offset, pki_dflt_qpg[node].port_add,
339 pki_dflt_qpg[node].aura_num, pki_dflt_qpg[node].grp_ok,
340 pki_dflt_qpg[node].grp_bad);
341 cvmx_pki_write_qpg_entry(node, offset, &pki_dflt_qpg[node]);
342 return 0;
343}
344
345int __cvmx_helper_pki_port_setup(int node, int ipd_port)
346{
347 int xiface, index;
348 int pknd, style_num;
349 int rs;
350 struct cvmx_pki_pkind_config pkind_cfg;
351
352 if (!cvmx_pki_dflt_init[node])
353 return 0;
354 xiface = cvmx_helper_get_interface_num(ipd_port);
355 index = cvmx_helper_get_interface_index_num(ipd_port);
356
357 pknd = cvmx_helper_get_pknd(xiface, index);
358 style_num = pkind_style_map[node][pknd];
359
360 /* try to reserve the style, if it is not configured already, reserve
361 and configure it */
362 rs = cvmx_pki_style_alloc(node, style_num);
363 if (rs < 0) {
364 if (rs == CVMX_RESOURCE_ALLOC_FAILED)
365 return -1;
366 } else {
367 if (pki_helper_debug)
368 debug("pki-helper: set style %d with default parameters\n",
369 style_num);
370 pkind_style_map[node][pknd] = style_num;
371 /* configure style with default parameters */
372 cvmx_pki_write_style_config(node, style_num,
373 CVMX_PKI_CLUSTER_ALL,
374 &pki_dflt_style[node]);
375 }
376 if (pki_helper_debug)
377 debug("pki-helper: set pkind %d with initial style %d\n", pknd,
378 style_num);
379 /* write pkind configuration */
380 pkind_cfg = pki_dflt_pkind[node];
381 pkind_cfg.initial_style = style_num;
382 cvmx_pki_write_pkind_config(node, pknd, &pkind_cfg);
383 return 0;
384}
385
386int __cvmx_helper_pki_global_setup(int node)
387{
388 __cvmx_helper_pki_set_dflt_ltype_map(node);
389 if (!cvmx_pki_dflt_init[node])
390 return 0;
391 /* Setup the packet pools*/
392 __cvmx_helper_pki_setup_fpa_pools(node);
393 /*set up default cluster*/
394 __cvmx_helper_setup_pki_cluster_groups(node);
395 //__cvmx_helper_pki_setup_sso_groups(node);
396 __cvmx_helper_setup_pki_qpg_table(node);
397 /*
398 * errata PKI-19103 backward compat has only 1 aura
399 * no head line blocking
400 */
401 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) {
402 cvmx_pki_buf_ctl_t buf_ctl;
403
404 buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
405 buf_ctl.s.fpa_wait = 1;
406 csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
407 }
408 return 0;
409}
410
411/**
412 * This function Enabled the PKI hardware to
413 * start accepting/processing packets.
414 *
415 * @param node node number
416 */
417void cvmx_helper_pki_enable(int node)
418{
419 if (pki_helper_debug)
420 debug("enable PKI on node %d\n", node);
421 __cvmx_helper_pki_install_dflt_vlan(node);
422 cvmx_pki_setup_clusters(node);
423 if (cvmx_pki_dflt_bp_en[node])
424 cvmx_pki_enable_backpressure(node);
425 cvmx_pki_parse_enable(node, 0);
426 cvmx_pki_enable(node);
427}
428
429/**
430 * This function setups the qos table by allocating qpg entry and writing
431 * the provided parameters to that entry (offset).
432 * @param node node number.
433 * @param qpg_cfg pointer to struct containing qpg configuration
434 */
435int cvmx_helper_pki_set_qpg_entry(int node, struct cvmx_pki_qpg_config *qpg_cfg)
436{
437 int offset;
438
439 offset = cvmx_pki_qpg_entry_alloc(node, qpg_cfg->qpg_base, 1);
440 if (pki_helper_debug)
441 debug("pki-helper:set qpg entry at offset %d\n", offset);
442 if (offset == CVMX_RESOURCE_ALREADY_RESERVED) {
443 debug("INFO:setup_qpg_table: offset %d already reserved\n",
444 qpg_cfg->qpg_base);
445 return CVMX_RESOURCE_ALREADY_RESERVED;
446 } else if (offset == CVMX_RESOURCE_ALLOC_FAILED) {
447 debug("ERROR:setup_qpg_table: no more entries available\n");
448 return CVMX_RESOURCE_ALLOC_FAILED;
449 }
450 qpg_cfg->qpg_base = offset;
451 cvmx_pki_write_qpg_entry(node, offset, qpg_cfg);
452 return offset;
453}
454
455/**
456 * This function gets all the PKI parameters related to that
457 * particular port from hardware.
458 * @param xipd_port xipd_port port number with node to get parameter of
459 * @param port_cfg pointer to structure where to store read parameters
460 */
461void cvmx_pki_get_port_config(int xipd_port,
462 struct cvmx_pki_port_config *port_cfg)
463{
464 int xiface, index, pknd;
465 int style, cl_mask;
466 cvmx_pki_icgx_cfg_t pki_cl_msk;
467 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
468
469 /* get the pkind used by this ipd port */
470 xiface = cvmx_helper_get_interface_num(xipd_port);
471 index = cvmx_helper_get_interface_index_num(xipd_port);
472 pknd = cvmx_helper_get_pknd(xiface, index);
473
474 cvmx_pki_read_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg);
475 style = port_cfg->pkind_cfg.initial_style;
476 pki_cl_msk.u64 = csr_rd_node(
477 xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
478 cl_mask = pki_cl_msk.s.clusters;
479 cvmx_pki_read_style_config(xp.node, style, cl_mask,
480 &port_cfg->style_cfg);
481}
482
483/**
484 * This function sets all the PKI parameters related to that
485 * particular port in hardware.
486 * @param xipd_port ipd port number with node to get parameter of
487 * @param port_cfg pointer to structure containing port parameters
488 */
489void cvmx_pki_set_port_config(int xipd_port,
490 struct cvmx_pki_port_config *port_cfg)
491{
492 int xiface, index, pknd;
493 int style, cl_mask;
494 cvmx_pki_icgx_cfg_t pki_cl_msk;
495 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(xipd_port);
496
497 /* get the pkind used by this ipd port */
498 xiface = cvmx_helper_get_interface_num(xipd_port);
499 index = cvmx_helper_get_interface_index_num(xipd_port);
500 pknd = cvmx_helper_get_pknd(xiface, index);
501
502 if (cvmx_pki_write_pkind_config(xp.node, pknd, &port_cfg->pkind_cfg))
503 return;
504 style = port_cfg->pkind_cfg.initial_style;
505 pki_cl_msk.u64 = csr_rd_node(
506 xp.node, CVMX_PKI_ICGX_CFG(port_cfg->pkind_cfg.cluster_grp));
507 cl_mask = pki_cl_msk.s.clusters;
508 cvmx_pki_write_style_config(xp.node, style, cl_mask,
509 &port_cfg->style_cfg);
510}
511
512/**
513 * This function sets up all th eports of particular interface
514 * for chosen fcs mode. (only use for backward compatibility).
515 * New application can control it via init_interface calls.
516 * @param node node number.
517 * @param interface interface number.
518 * @param nports number of ports
519 * @param has_fcs 1 -- enable fcs check and fcs strip.
520 * 0 -- disable fcs check.
521 */
522void cvmx_helper_pki_set_fcs_op(int node, int interface, int nports,
523 int has_fcs)
524{
525 int xiface, index;
526 int pknd;
527 unsigned int cluster = 0;
528 cvmx_pki_clx_pkindx_cfg_t pkind_cfg;
529
530 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
531 for (index = 0; index < nports; index++) {
532 pknd = cvmx_helper_get_pknd(xiface, index);
533 while (cluster < CVMX_PKI_NUM_CLUSTER) {
534 /*find the cluster in use pass2*/
535 pkind_cfg.u64 = csr_rd_node(
536 node, CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster));
537 pkind_cfg.s.fcs_pres = has_fcs;
538 csr_wr_node(node,
539 CVMX_PKI_CLX_PKINDX_CFG(pknd, cluster),
540 pkind_cfg.u64);
541 cluster++;
542 }
543 /* make sure fcs_strip and fcs_check is also enable/disable
544 * for the style used by that port
545 */
546 cvmx_pki_endis_fcs_check(node, pknd, has_fcs, has_fcs);
547 cluster = 0;
548 }
549}