blob: d8ce02a2717a9a1ed66ed42ad5729ec8c6e006e9 [file] [log] [blame]
Aaron Williams0f640572022-04-07 09:11:37 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 */
5
6#include <errno.h>
7#include <log.h>
8#include <time.h>
9#include <linux/delay.h>
10
11#include <mach/cvmx-regs.h>
12#include <mach/cvmx-csr.h>
13#include <mach/cvmx-bootmem.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-coremask.h>
21
22#include <mach/cvmx-agl-defs.h>
23#include <mach/cvmx-bgxx-defs.h>
24#include <mach/cvmx-ciu-defs.h>
25#include <mach/cvmx-gmxx-defs.h>
26#include <mach/cvmx-gserx-defs.h>
27#include <mach/cvmx-ilk-defs.h>
28#include <mach/cvmx-ipd-defs.h>
29#include <mach/cvmx-pcsx-defs.h>
30#include <mach/cvmx-pcsxx-defs.h>
31#include <mach/cvmx-pki-defs.h>
32#include <mach/cvmx-pko-defs.h>
33#include <mach/cvmx-xcv-defs.h>
34
35#include <mach/cvmx-hwpko.h>
36#include <mach/cvmx-ilk.h>
37#include <mach/cvmx-pki.h>
38#include <mach/cvmx-pko3.h>
39#include <mach/cvmx-pko3-queue.h>
40#include <mach/cvmx-pko3-resources.h>
41
42#include <mach/cvmx-helper.h>
43#include <mach/cvmx-helper-board.h>
44#include <mach/cvmx-helper-cfg.h>
45
46#include <mach/cvmx-helper-bgx.h>
47#include <mach/cvmx-helper-cfg.h>
48#include <mach/cvmx-helper-util.h>
49#include <mach/cvmx-helper-pki.h>
50
51static const int debug;
52
53#define CVMX_DUMP_REGX(reg) \
54 if (debug) \
55 debug("%s=%#llx\n", #reg, (long long)csr_rd_node(node, reg))
56
57static int cvmx_pko_setup_macs(int node);
58
59/*
60 * PKO descriptor queue operation error string
61 *
62 * @param dqstatus is the enumeration returned from hardware,
63 * PKO_QUERY_RTN_S[DQSTATUS].
64 *
65 * @return static constant string error description
66 */
67const char *pko_dqstatus_error(pko_query_dqstatus_t dqstatus)
68{
69 char *str = "PKO Undefined error";
70
71 switch (dqstatus) {
72 case PKO_DQSTATUS_PASS:
73 str = "No error";
74 break;
75 case PKO_DQSTATUS_BADSTATE:
76 str = "PKO queue not ready";
77 break;
78 case PKO_DQSTATUS_NOFPABUF:
79 str = "PKO failed to allocate buffer from FPA";
80 break;
81 case PKO_DQSTATUS_NOPKOBUF:
82 str = "PKO out of buffers";
83 break;
84 case PKO_DQSTATUS_FAILRTNPTR:
85 str = "PKO failed to return buffer to FPA";
86 break;
87 case PKO_DQSTATUS_ALREADY:
88 str = "PKO queue already opened";
89 break;
90 case PKO_DQSTATUS_NOTCREATED:
91 str = "PKO queue has not been created";
92 break;
93 case PKO_DQSTATUS_NOTEMPTY:
94 str = "PKO queue is not empty";
95 break;
96 case PKO_DQSTATUS_SENDPKTDROP:
97 str = "Illegal PKO command construct";
98 break;
99 }
100 return str;
101}
102
103/*
104 * PKO global initialization for 78XX.
105 *
106 * @param node is the node on which PKO block is initialized.
107 * @return none.
108 */
109int cvmx_pko3_hw_init_global(int node, uint16_t aura)
110{
111 cvmx_pko_dpfi_flush_t pko_flush;
112 cvmx_pko_dpfi_fpa_aura_t pko_aura;
113 cvmx_pko_dpfi_ena_t dpfi_enable;
114 cvmx_pko_ptf_iobp_cfg_t ptf_iobp_cfg;
115 cvmx_pko_pdm_cfg_t pko_pdm_cfg;
116 cvmx_pko_enable_t pko_enable;
117 cvmx_pko_dpfi_status_t dpfi_status;
118 cvmx_pko_status_t pko_status;
119 cvmx_pko_shaper_cfg_t shaper_cfg;
120 u64 cycles;
121 const unsigned int timeout = 100; /* 100 milliseconds */
122
123 if (node != (aura >> 10))
124 cvmx_printf("WARNING: AURA vs PKO node mismatch\n");
125
126 pko_enable.u64 = csr_rd_node(node, CVMX_PKO_ENABLE);
127 if (pko_enable.s.enable) {
128 cvmx_printf("WARNING: %s: PKO already enabled on node %u\n",
129 __func__, node);
130 return 0;
131 }
132 /* Enable color awareness. */
133 shaper_cfg.u64 = csr_rd_node(node, CVMX_PKO_SHAPER_CFG);
134 shaper_cfg.s.color_aware = 1;
135 csr_wr_node(node, CVMX_PKO_SHAPER_CFG, shaper_cfg.u64);
136
137 /* Clear FLUSH command to be sure */
138 pko_flush.u64 = 0;
139 pko_flush.s.flush_en = 0;
140 csr_wr_node(node, CVMX_PKO_DPFI_FLUSH, pko_flush.u64);
141
142 /* set the aura number in pko, use aura node from parameter */
143 pko_aura.u64 = 0;
144 pko_aura.s.node = aura >> 10;
145 pko_aura.s.laura = aura;
146 csr_wr_node(node, CVMX_PKO_DPFI_FPA_AURA, pko_aura.u64);
147
148 CVMX_DUMP_REGX(CVMX_PKO_DPFI_FPA_AURA);
149
150 dpfi_enable.u64 = 0;
151 dpfi_enable.s.enable = 1;
152 csr_wr_node(node, CVMX_PKO_DPFI_ENA, dpfi_enable.u64);
153
154 /* Prepare timeout */
155 cycles = get_timer(0);
156
157 /* Wait until all pointers have been returned */
158 do {
159 pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
160 if (get_timer(cycles) > timeout)
161 break;
162 } while (!pko_status.s.pko_rdy);
163
164 if (!pko_status.s.pko_rdy) {
165 dpfi_status.u64 = csr_rd_node(node, CVMX_PKO_DPFI_STATUS);
166 cvmx_printf("ERROR: %s: PKO DFPI failed, PKO_STATUS=%#llx DPFI_STATUS=%#llx\n",
167 __func__, (unsigned long long)pko_status.u64,
168 (unsigned long long)dpfi_status.u64);
169 return -1;
170 }
171
172 /* Set max outstanding requests in IOBP for any FIFO.*/
173 ptf_iobp_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTF_IOBP_CFG);
174 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
175 ptf_iobp_cfg.s.max_read_size = 0x10; /* Recommended by HRM.*/
176 else
177 /* Reduce the value from recommended 0x10 to avoid
178 * getting "underflow" condition in the BGX TX FIFO.
179 */
180 ptf_iobp_cfg.s.max_read_size = 3;
181 csr_wr_node(node, CVMX_PKO_PTF_IOBP_CFG, ptf_iobp_cfg.u64);
182
183 /* Set minimum packet size per Ethernet standard */
184 pko_pdm_cfg.u64 = 0;
185 pko_pdm_cfg.s.pko_pad_minlen = 0x3c; /* 60 bytes before FCS */
186 csr_wr_node(node, CVMX_PKO_PDM_CFG, pko_pdm_cfg.u64);
187
188 /* Initialize MACs and FIFOs */
189 cvmx_pko_setup_macs(node);
190
191 /* enable PKO, although interfaces and queues are not up yet */
192 pko_enable.u64 = 0;
193 pko_enable.s.enable = 1;
194 csr_wr_node(node, CVMX_PKO_ENABLE, pko_enable.u64);
195
196 /* PKO_RDY set indicates successful initialization */
197 pko_status.u64 = csr_rd_node(node, CVMX_PKO_STATUS);
198 if (pko_status.s.pko_rdy)
199 return 0;
200
201 cvmx_printf("ERROR: %s: failed, PKO_STATUS=%#llx\n", __func__,
202 (unsigned long long)pko_status.u64);
203 return -1;
204}
205
206/*
207 * Configure Channel credit level in PKO.
208 *
209 * @param node is to specify the node to which this configuration is applied.
210 * @param level specifies the level at which pko channel queues will be configured,
211 * @return returns 0 if successful and -1 on failure.
212 */
213int cvmx_pko3_channel_credit_level(int node, enum cvmx_pko3_level_e level)
214{
215 union cvmx_pko_channel_level channel_level;
216
217 channel_level.u64 = 0;
218
219 if (level == CVMX_PKO_L2_QUEUES)
220 channel_level.s.cc_level = 0;
221 else if (level == CVMX_PKO_L3_QUEUES)
222 channel_level.s.cc_level = 1;
223 else
224 return -1;
225
226 csr_wr_node(node, CVMX_PKO_CHANNEL_LEVEL, channel_level.u64);
227
228 return 0;
229}
230
231/** Open configured descriptor queues before queueing packets into them.
232 *
233 * @param node is to specify the node to which this configuration is applied.
234 * @param dq is the descriptor queue number to be opened.
235 * @return returns 0 on success or -1 on failure.
236 */
237int cvmx_pko_dq_open(int node, int dq)
238{
239 cvmx_pko_query_rtn_t pko_status;
240 pko_query_dqstatus_t dqstatus;
241 cvmx_pko3_dq_params_t *p_param;
242
243 if (debug)
244 debug("%s: DEBUG: dq %u\n", __func__, dq);
245
246 __cvmx_pko3_dq_param_setup(node);
247
248 pko_status = __cvmx_pko3_do_dma(node, dq, NULL, 0, CVMX_PKO_DQ_OPEN);
249
250 dqstatus = pko_status.s.dqstatus;
251
252 if (dqstatus == PKO_DQSTATUS_ALREADY)
253 return 0;
254 if (dqstatus != PKO_DQSTATUS_PASS) {
255 cvmx_printf("%s: ERROR: Failed to open dq :%u: %s\n", __func__,
256 dq, pko_dqstatus_error(dqstatus));
257 return -1;
258 }
259
260 /* Setup the descriptor queue software parameters */
261 p_param = cvmx_pko3_dq_parameters(node, dq);
262 if (p_param) {
263 p_param->depth = pko_status.s.depth;
264 if (p_param->limit == 0)
265 p_param->limit = 1024; /* last-resort default */
266 }
267
268 return 0;
269}
270
271/*
272 * PKO initialization of MACs and FIFOs
273 *
274 * All MACs are configured and assigned a specific FIFO,
275 * and each FIFO is configured with size for a best utilization
276 * of available FIFO resources.
277 *
278 * @param node is to specify which node's pko block for this setup.
279 * @return returns 0 if successful and -1 on failure.
280 *
281 * Note: This function contains model-specific code.
282 */
283static int cvmx_pko_setup_macs(int node)
284{
285 unsigned int interface;
286 unsigned int port, num_ports;
287 unsigned int mac_num, fifo, pri, cnt;
288 cvmx_helper_interface_mode_t mode;
289 const unsigned int num_interfaces =
290 cvmx_helper_get_number_of_interfaces();
291 u8 fifo_group_cfg[8];
292 u8 fifo_group_spd[8];
293 unsigned int fifo_count = 0;
294 unsigned int max_fifos = 0, fifo_groups = 0;
295 struct {
296 u8 fifo_cnt;
297 u8 fifo_id;
298 u8 pri;
299 u8 spd;
300 u8 mac_fifo_cnt;
301 } cvmx_pko3_mac_table[32];
302
303 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
304 max_fifos = 28; /* exclusive of NULL FIFO */
305 fifo_groups = 8; /* inclusive of NULL PTGF */
306 }
307 if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
308 max_fifos = 16;
309 fifo_groups = 5;
310 }
311
312 /* Initialize FIFO allocation table */
313 memset(&fifo_group_cfg, 0, sizeof(fifo_group_cfg));
314 memset(&fifo_group_spd, 0, sizeof(fifo_group_spd));
315 memset(cvmx_pko3_mac_table, 0, sizeof(cvmx_pko3_mac_table));
316
317 /* Initialize all MACs as disabled */
318 for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
319 cvmx_pko3_mac_table[mac_num].pri = 0;
320 cvmx_pko3_mac_table[mac_num].fifo_cnt = 0;
321 cvmx_pko3_mac_table[mac_num].fifo_id = 0x1f;
322 }
323
324 for (interface = 0; interface < num_interfaces; interface++) {
325 int xiface =
326 cvmx_helper_node_interface_to_xiface(node, interface);
327 /* Interface type for ALL interfaces */
328 mode = cvmx_helper_interface_get_mode(xiface);
329 num_ports = cvmx_helper_interface_enumerate(xiface);
330
331 if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
332 continue;
333 /*
334 * Non-BGX interfaces:
335 * Each of these interfaces has a single MAC really.
336 */
337 if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
338 mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
339 mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
340 num_ports = 1;
341
342 for (port = 0; port < num_ports; port++) {
343 int i;
344
345 /* Get the per-port mode for BGX-interfaces */
346 if (interface < CVMX_HELPER_MAX_GMX)
347 mode = cvmx_helper_bgx_get_mode(xiface, port);
348 /* In MIXED mode, LMACs can run different protocols */
349
350 /* convert interface/port to mac number */
351 i = __cvmx_pko3_get_mac_num(xiface, port);
352 if (i < 0 || i >= (int)__cvmx_pko3_num_macs()) {
353 cvmx_printf("%s: ERROR: interface %d:%u port %d has no MAC %d/%d\n",
354 __func__, node, interface, port, i,
355 __cvmx_pko3_num_macs());
356 continue;
357 }
358
359 if (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI) {
360 unsigned int bgx_fifo_size =
361 __cvmx_helper_bgx_fifo_size(xiface,
362 port);
363
364 cvmx_pko3_mac_table[i].mac_fifo_cnt =
365 bgx_fifo_size /
366 (CVMX_BGX_TX_FIFO_SIZE / 4);
367 cvmx_pko3_mac_table[i].pri = 2;
368 cvmx_pko3_mac_table[i].spd = 10;
369 cvmx_pko3_mac_table[i].fifo_cnt = 2;
370 } else if (mode == CVMX_HELPER_INTERFACE_MODE_XLAUI) {
371 unsigned int bgx_fifo_size =
372 __cvmx_helper_bgx_fifo_size(xiface,
373 port);
374
375 cvmx_pko3_mac_table[i].mac_fifo_cnt =
376 bgx_fifo_size /
377 (CVMX_BGX_TX_FIFO_SIZE / 4);
378 cvmx_pko3_mac_table[i].pri = 4;
379 cvmx_pko3_mac_table[i].spd = 40;
380 cvmx_pko3_mac_table[i].fifo_cnt = 4;
381 } else if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI) {
382 unsigned int bgx_fifo_size =
383 __cvmx_helper_bgx_fifo_size(xiface,
384 port);
385
386 cvmx_pko3_mac_table[i].mac_fifo_cnt =
387 bgx_fifo_size /
388 (CVMX_BGX_TX_FIFO_SIZE / 4);
389 cvmx_pko3_mac_table[i].pri = 3;
390 cvmx_pko3_mac_table[i].fifo_cnt = 4;
391 /* DXAUI at 20G, or XAU at 10G */
392 cvmx_pko3_mac_table[i].spd = 20;
393 } else if (mode == CVMX_HELPER_INTERFACE_MODE_XFI) {
394 unsigned int bgx_fifo_size =
395 __cvmx_helper_bgx_fifo_size(xiface,
396 port);
397
398 cvmx_pko3_mac_table[i].mac_fifo_cnt =
399 bgx_fifo_size /
400 (CVMX_BGX_TX_FIFO_SIZE / 4);
401 cvmx_pko3_mac_table[i].pri = 3;
402 cvmx_pko3_mac_table[i].fifo_cnt = 4;
403 cvmx_pko3_mac_table[i].spd = 10;
404 } else if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
405 cvmx_pko3_mac_table[i].fifo_cnt = 1;
406 cvmx_pko3_mac_table[i].pri = 1;
407 cvmx_pko3_mac_table[i].spd = 1;
408 cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
409 } else if (mode == CVMX_HELPER_INTERFACE_MODE_ILK ||
410 mode == CVMX_HELPER_INTERFACE_MODE_SRIO) {
411 cvmx_pko3_mac_table[i].fifo_cnt = 4;
412 cvmx_pko3_mac_table[i].pri = 3;
413 /* ILK/SRIO: speed depends on lane count */
414 cvmx_pko3_mac_table[i].spd = 40;
415 cvmx_pko3_mac_table[i].mac_fifo_cnt = 4;
416 } else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
417 cvmx_pko3_mac_table[i].fifo_cnt = 4;
418 cvmx_pko3_mac_table[i].pri = 2;
419 /* Actual speed depends on PCIe lanes/mode */
420 cvmx_pko3_mac_table[i].spd = 50;
421 /* SLI Tx FIFO size to be revisitted */
422 cvmx_pko3_mac_table[i].mac_fifo_cnt = 1;
423 } else {
424 /* Other BGX interface modes: SGMII/RGMII */
425 unsigned int bgx_fifo_size =
426 __cvmx_helper_bgx_fifo_size(xiface,
427 port);
428
429 cvmx_pko3_mac_table[i].mac_fifo_cnt =
430 bgx_fifo_size /
431 (CVMX_BGX_TX_FIFO_SIZE / 4);
432 cvmx_pko3_mac_table[i].fifo_cnt = 1;
433 cvmx_pko3_mac_table[i].pri = 1;
434 cvmx_pko3_mac_table[i].spd = 1;
435 }
436
437 if (debug)
438 debug("%s: intf %d:%u port %u %s mac %02u cnt %u macfifo %uk spd %u\n",
439 __func__, node, interface, port,
440 cvmx_helper_interface_mode_to_string(mode),
441 i, cvmx_pko3_mac_table[i].fifo_cnt,
442 cvmx_pko3_mac_table[i].mac_fifo_cnt * 8,
443 cvmx_pko3_mac_table[i].spd);
444
445 } /* for port */
446 } /* for interface */
447
448 /* Count the number of requested FIFOs */
449 for (fifo_count = mac_num = 0; mac_num < __cvmx_pko3_num_macs();
450 mac_num++)
451 fifo_count += cvmx_pko3_mac_table[mac_num].fifo_cnt;
452
453 if (debug)
454 debug("%s: initially requested FIFO count %u\n", __func__,
455 fifo_count);
456
457 /* Heuristically trim FIFO count to fit in available number */
458 pri = 1;
459 cnt = 4;
460 while (fifo_count > max_fifos) {
461 for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
462 if (cvmx_pko3_mac_table[mac_num].fifo_cnt == cnt &&
463 cvmx_pko3_mac_table[mac_num].pri <= pri) {
464 cvmx_pko3_mac_table[mac_num].fifo_cnt >>= 1;
465 fifo_count -=
466 cvmx_pko3_mac_table[mac_num].fifo_cnt;
467 }
468 if (fifo_count <= max_fifos)
469 break;
470 }
471 if (pri >= 4) {
472 pri = 1;
473 cnt >>= 1;
474 } else {
475 pri++;
476 }
477 if (cnt == 0)
478 break;
479 }
480
481 if (debug)
482 debug("%s: adjusted FIFO count %u\n", __func__, fifo_count);
483
484 /* Special case for NULL Virtual FIFO */
485 fifo_group_cfg[fifo_groups - 1] = 0;
486 /* there is no MAC connected to NULL FIFO */
487
488 /* Configure MAC units, and attach a FIFO to each */
489 for (fifo = 0, cnt = 4; cnt > 0; cnt >>= 1) {
490 unsigned int g;
491
492 for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
493 if (cvmx_pko3_mac_table[mac_num].fifo_cnt < cnt ||
494 cvmx_pko3_mac_table[mac_num].fifo_id != 0x1f)
495 continue;
496
497 /* Attach FIFO to MAC */
498 cvmx_pko3_mac_table[mac_num].fifo_id = fifo;
499 g = fifo >> 2;
500 /* Sum speed for FIFO group */
501 fifo_group_spd[g] += cvmx_pko3_mac_table[mac_num].spd;
502
503 if (cnt == 4)
504 fifo_group_cfg[g] = 4; /* 10k,0,0,0 */
505 else if (cnt == 2 && (fifo & 0x3) == 0)
506 fifo_group_cfg[g] = 3; /* 5k,0,5k,0 */
507 else if (cnt == 2 && fifo_group_cfg[g] == 3)
508 /* no change */;
509 else if (cnt == 1 && (fifo & 0x2) &&
510 fifo_group_cfg[g] == 3)
511 fifo_group_cfg[g] = 1; /* 5k,0,2.5k 2.5k*/
512 else if (cnt == 1 && (fifo & 0x3) == 0x3)
513 /* no change */;
514 else if (cnt == 1)
515 fifo_group_cfg[g] = 0; /* 2.5k x 4 */
516 else
517 cvmx_printf("ERROR: %s: internal error\n",
518 __func__);
519
520 fifo += cnt;
521 }
522 }
523
524 /* Check if there was no error in FIFO allocation */
525 if (fifo > max_fifos) {
526 cvmx_printf("ERROR: %s: Internal error FIFO %u\n", __func__,
527 fifo);
528 return -1;
529 }
530
531 if (debug)
532 debug("%s: used %u of FIFOs\n", __func__, fifo);
533
534 /* Now configure all FIFO groups */
535 for (fifo = 0; fifo < fifo_groups; fifo++) {
536 cvmx_pko_ptgfx_cfg_t pko_ptgfx_cfg;
537
538 pko_ptgfx_cfg.u64 = csr_rd_node(node, CVMX_PKO_PTGFX_CFG(fifo));
539 if (pko_ptgfx_cfg.s.size != fifo_group_cfg[fifo])
540 pko_ptgfx_cfg.s.reset = 1;
541 pko_ptgfx_cfg.s.size = fifo_group_cfg[fifo];
542 if (fifo_group_spd[fifo] >= 40)
543 if (pko_ptgfx_cfg.s.size >= 3)
544 pko_ptgfx_cfg.s.rate = 3; /* 50 Gbps */
545 else
546 pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
547 else if (fifo_group_spd[fifo] >= 20)
548 pko_ptgfx_cfg.s.rate = 2; /* 25 Gbps */
549 else if (fifo_group_spd[fifo] >= 10)
550 pko_ptgfx_cfg.s.rate = 1; /* 12.5 Gbps */
551 else
552 pko_ptgfx_cfg.s.rate = 0; /* 6.25 Gbps */
553
554 if (debug)
555 debug("%s: FIFO %#x-%#x size=%u speed=%d rate=%d\n",
556 __func__, fifo * 4, fifo * 4 + 3,
557 pko_ptgfx_cfg.s.size, fifo_group_spd[fifo],
558 pko_ptgfx_cfg.s.rate);
559
560 csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
561 pko_ptgfx_cfg.s.reset = 0;
562 csr_wr_node(node, CVMX_PKO_PTGFX_CFG(fifo), pko_ptgfx_cfg.u64);
563 }
564
565 /* Configure all MACs assigned FIFO number */
566 for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
567 cvmx_pko_macx_cfg_t pko_mac_cfg;
568
569 if (debug)
570 debug("%s: mac#%02u: fifo=%#x cnt=%u speed=%d\n",
571 __func__, mac_num,
572 cvmx_pko3_mac_table[mac_num].fifo_id,
573 cvmx_pko3_mac_table[mac_num].fifo_cnt,
574 cvmx_pko3_mac_table[mac_num].spd);
575
576 pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
577 pko_mac_cfg.s.fifo_num = cvmx_pko3_mac_table[mac_num].fifo_id;
578 csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
579 }
580
581 /* Setup PKO MCI0/MCI1/SKID credits */
582 for (mac_num = 0; mac_num < __cvmx_pko3_num_macs(); mac_num++) {
583 cvmx_pko_mci0_max_credx_t pko_mci0_max_cred;
584 cvmx_pko_mci1_max_credx_t pko_mci1_max_cred;
585 cvmx_pko_macx_cfg_t pko_mac_cfg;
586 unsigned int fifo_credit, mac_credit, skid_credit;
587 unsigned int pko_fifo_cnt, fifo_size;
588 unsigned int mac_fifo_cnt;
589 unsigned int tmp;
590 int saved_fifo_num;
591
592 pko_fifo_cnt = cvmx_pko3_mac_table[mac_num].fifo_cnt;
593 mac_fifo_cnt = cvmx_pko3_mac_table[mac_num].mac_fifo_cnt;
594
595 /* Skip unused MACs */
596 if (pko_fifo_cnt == 0)
597 continue;
598
599 /* Check for sanity */
600 if (pko_fifo_cnt > 4)
601 pko_fifo_cnt = 1;
602
603 fifo_size = (2 * 1024) + (1024 / 2); /* 2.5KiB */
604 fifo_credit = pko_fifo_cnt * fifo_size;
605
606 if (mac_num == 0) {
607 /* loopback */
608 mac_credit = 4096; /* From HRM Sec 13.0 */
609 skid_credit = 0;
610 } else if (mac_num == 1) {
611 /* DPI */
612 mac_credit = 2 * 1024;
613 skid_credit = 0;
614 } else if (octeon_has_feature(OCTEON_FEATURE_ILK) &&
615 (mac_num & 0xfe) == 2) {
616 /* ILK0, ILK1: MAC 2,3 */
617 mac_credit = 4 * 1024; /* 4KB fifo */
618 skid_credit = 0;
619 } else if (octeon_has_feature(OCTEON_FEATURE_SRIO) &&
620 (mac_num >= 6) && (mac_num <= 9)) {
621 /* SRIO0, SRIO1: MAC 6..9 */
622 mac_credit = 1024 / 2;
623 skid_credit = 0;
624 } else {
625 /* BGX */
626 mac_credit = mac_fifo_cnt * 8 * 1024;
627 skid_credit = mac_fifo_cnt * 256;
628 }
629
630 if (debug)
631 debug("%s: mac %u pko_fifo_credit=%u mac_credit=%u\n",
632 __func__, mac_num, fifo_credit, mac_credit);
633
634 tmp = (fifo_credit + mac_credit) / 16;
635 pko_mci0_max_cred.u64 = 0;
636 pko_mci0_max_cred.s.max_cred_lim = tmp;
637
638 /* Check for overflow */
639 if (pko_mci0_max_cred.s.max_cred_lim != tmp) {
640 cvmx_printf("WARNING: %s: MCI0 credit overflow\n",
641 __func__);
642 pko_mci0_max_cred.s.max_cred_lim = 0xfff;
643 }
644
645 /* Pass 2 PKO hardware does not use the MCI0 credits */
646 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
647 csr_wr_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num),
648 pko_mci0_max_cred.u64);
649
650 /* The original CSR formula is the correct one after all */
651 tmp = (mac_credit) / 16;
652 pko_mci1_max_cred.u64 = 0;
653 pko_mci1_max_cred.s.max_cred_lim = tmp;
654
655 /* Check for overflow */
656 if (pko_mci1_max_cred.s.max_cred_lim != tmp) {
657 cvmx_printf("WARNING: %s: MCI1 credit overflow\n",
658 __func__);
659 pko_mci1_max_cred.s.max_cred_lim = 0xfff;
660 }
661
662 csr_wr_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num),
663 pko_mci1_max_cred.u64);
664
665 tmp = (skid_credit / 256) >> 1; /* valid 0,1,2 */
666 pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
667
668 /* The PKO_MACX_CFG bits cannot be changed unless FIFO_MUM=0x1f (unused fifo) */
669 saved_fifo_num = pko_mac_cfg.s.fifo_num;
670 pko_mac_cfg.s.fifo_num = 0x1f;
671 pko_mac_cfg.s.skid_max_cnt = tmp;
672 csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
673
674 pko_mac_cfg.u64 = csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
675 pko_mac_cfg.s.fifo_num = saved_fifo_num;
676 csr_wr_node(node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
677
678 if (debug) {
679 pko_mci0_max_cred.u64 =
680 csr_rd_node(node, CVMX_PKO_MCI0_MAX_CREDX(mac_num));
681 pko_mci1_max_cred.u64 =
682 csr_rd_node(node, CVMX_PKO_MCI1_MAX_CREDX(mac_num));
683 pko_mac_cfg.u64 =
684 csr_rd_node(node, CVMX_PKO_MACX_CFG(mac_num));
685 debug("%s: mac %u PKO_MCI0_MAX_CREDX=%u PKO_MCI1_MAX_CREDX=%u PKO_MACX_CFG[SKID_MAX_CNT]=%u\n",
686 __func__, mac_num,
687 pko_mci0_max_cred.s.max_cred_lim,
688 pko_mci1_max_cred.s.max_cred_lim,
689 pko_mac_cfg.s.skid_max_cnt);
690 }
691 } /* for mac_num */
692
693 return 0;
694}
695
696/** Set MAC options
697 *
698 * The options supported are the parameters below:
699 *
700 * @param xiface The physical interface number
701 * @param index The physical sub-interface port
702 * @param fcs_enable Enable FCS generation
703 * @param pad_enable Enable padding to minimum packet size
704 * @param fcs_sop_off Number of bytes at start of packet to exclude from FCS
705 *
706 * The typical use for `fcs_sop_off` is when the interface is configured
707 * to use a header such as HighGig to precede every Ethernet packet,
708 * such a header usually does not partake in the CRC32 computation stream,
709 * and its size must be set with this parameter.
710 *
711 * @return Returns 0 on success, -1 if interface/port is invalid.
712 */
713int cvmx_pko3_interface_options(int xiface, int index, bool fcs_enable,
714 bool pad_enable, unsigned int fcs_sop_off)
715{
716 int mac_num;
717 cvmx_pko_macx_cfg_t pko_mac_cfg;
718 unsigned int fifo_num;
719 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
720
721 if (debug)
722 debug("%s: intf %u:%u/%u fcs=%d pad=%d\n", __func__, xi.node,
723 xi.interface, index, fcs_enable, pad_enable);
724
725 mac_num = __cvmx_pko3_get_mac_num(xiface, index);
726 if (mac_num < 0) {
727 cvmx_printf("ERROR: %s: invalid interface %u:%u/%u\n", __func__,
728 xi.node, xi.interface, index);
729 return -1;
730 }
731
732 pko_mac_cfg.u64 = csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num));
733
734 /* If MAC is not assigned, return an error */
735 if (pko_mac_cfg.s.fifo_num == 0x1f) {
736 cvmx_printf("ERROR: %s: unused interface %u:%u/%u\n", __func__,
737 xi.node, xi.interface, index);
738 return -1;
739 }
740
741 if (pko_mac_cfg.s.min_pad_ena == pad_enable &&
742 pko_mac_cfg.s.fcs_ena == fcs_enable) {
743 if (debug)
744 debug("%s: mac %#x unchanged\n", __func__, mac_num);
745 return 0;
746 }
747
748 /* WORKAROUND: Pass1 won't allow change any bits unless FIFO_NUM=0x1f */
749 fifo_num = pko_mac_cfg.s.fifo_num;
750 pko_mac_cfg.s.fifo_num = 0x1f;
751
752 pko_mac_cfg.s.min_pad_ena = pad_enable;
753 pko_mac_cfg.s.fcs_ena = fcs_enable;
754 pko_mac_cfg.s.fcs_sop_off = fcs_sop_off;
755
756 csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
757
758 pko_mac_cfg.s.fifo_num = fifo_num;
759 csr_wr_node(xi.node, CVMX_PKO_MACX_CFG(mac_num), pko_mac_cfg.u64);
760
761 if (debug)
762 debug("%s: PKO_MAC[%u]CFG=%#llx\n", __func__, mac_num,
763 (unsigned long long)csr_rd_node(xi.node, CVMX_PKO_MACX_CFG(mac_num)));
764
765 return 0;
766}
767
768/** Set Descriptor Queue options
769 *
770 * The `min_pad` parameter must be in agreement with the interface-level
771 * padding option for all descriptor queues assigned to that particular
772 * interface/port.
773 *
774 * @param node on which to operate
775 * @param dq descriptor queue to set
776 * @param min_pad minimum padding to set for dq
777 */
778void cvmx_pko3_dq_options(unsigned int node, unsigned int dq, bool min_pad)
779{
780 cvmx_pko_pdm_dqx_minpad_t reg;
781
782 dq &= (1 << 10) - 1;
783 reg.u64 = csr_rd_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq));
784 reg.s.minpad = min_pad;
785 csr_wr_node(node, CVMX_PKO_PDM_DQX_MINPAD(dq), reg.u64);
786}