blob: 494108f0cdb7528d46b23c898689bb470d22386d [file] [log] [blame]
Aaron Williamsb9a60982020-12-11 17:05:58 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Marvell International Ltd.
4 *
5 * Helper Functions for the Configuration Framework
6 */
7
8#include <log.h>
9#include <linux/delay.h>
10
11#include <mach/cvmx-regs.h>
12#include <mach/cvmx-csr.h>
13#include <mach/cvmx-bootmem.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-coremask.h>
21
22#include <mach/cvmx-agl-defs.h>
23#include <mach/cvmx-bgxx-defs.h>
24#include <mach/cvmx-gmxx-defs.h>
25#include <mach/cvmx-ipd-defs.h>
26#include <mach/cvmx-pki-defs.h>
27
28#include <mach/cvmx-helper.h>
29#include <mach/cvmx-helper-board.h>
30#include <mach/cvmx-helper-fdt.h>
31#include <mach/cvmx-helper-bgx.h>
32#include <mach/cvmx-helper-cfg.h>
33#include <mach/cvmx-helper-util.h>
34#include <mach/cvmx-helper-pki.h>
35
36#include <mach/cvmx-global-resources.h>
37#include <mach/cvmx-pko-internal-ports-range.h>
38#include <mach/cvmx-ilk.h>
39#include <mach/cvmx-pip.h>
40
41DECLARE_GLOBAL_DATA_PTR;
42
43int cvmx_npi_max_pknds;
44static bool port_cfg_data_initialized;
45
46struct cvmx_cfg_port_param cvmx_cfg_port[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE]
47 [CVMX_HELPER_CFG_MAX_PORT_PER_IFACE];
48/*
49 * Indexed by the pko_port number
50 */
51static int __cvmx_cfg_pko_highest_queue;
52struct cvmx_cfg_pko_port_param
53cvmx_pko_queue_table[CVMX_HELPER_CFG_MAX_PKO_PORT] = {
54 [0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] = {
55 CVMX_HELPER_CFG_INVALID_VALUE,
56 CVMX_HELPER_CFG_INVALID_VALUE
57 }
58};
59
60cvmx_user_static_pko_queue_config_t
61__cvmx_pko_queue_static_config[CVMX_MAX_NODES];
62
63struct cvmx_cfg_pko_port_map
64cvmx_cfg_pko_port_map[CVMX_HELPER_CFG_MAX_PKO_PORT] = {
65 [0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] = {
66 CVMX_HELPER_CFG_INVALID_VALUE,
67 CVMX_HELPER_CFG_INVALID_VALUE,
68 CVMX_HELPER_CFG_INVALID_VALUE
69 }
70};
71
72/*
73 * This array assists translation from ipd_port to pko_port.
74 * The ``16'' is the rounded value for the 3rd 4-bit value of
75 * ipd_port, used to differentiate ``interfaces.''
76 */
77static struct cvmx_cfg_pko_port_pair
78ipd2pko_port_cache[16][CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] = {
79 [0 ... 15] = {
80 [0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] = {
81 CVMX_HELPER_CFG_INVALID_VALUE,
82 CVMX_HELPER_CFG_INVALID_VALUE
83 }
84 }
85};
86
87/*
88 * Options
89 *
90 * Each array-elem's initial value is also the option's default value.
91 */
92static u64 cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] = {
93 [0 ... CVMX_HELPER_CFG_OPT_MAX - 1] = 1
94};
95
96/*
97 * MISC
98 */
99
100static int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines allocated */
101static int cvmx_pko_queue_alloc(u64 port, int count);
102static void cvmx_init_port_cfg(void);
103static const int dbg;
104
105int __cvmx_helper_cfg_pknd(int xiface, int index)
106{
107 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
108 int pkind;
109
110 if (!port_cfg_data_initialized)
111 cvmx_init_port_cfg();
112
113 /*
114 * Only 8 PKNDs are assigned to ILK channels. The channels are wrapped
115 * if more than 8 channels are configured, fix the index accordingly.
116 */
117 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
118 if (cvmx_helper_interface_get_mode(xiface) ==
119 CVMX_HELPER_INTERFACE_MODE_ILK)
120 index %= 8;
121 }
122
123 pkind = cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pknd;
124 return pkind;
125}
126
127int __cvmx_helper_cfg_bpid(int xiface, int index)
128{
129 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
130
131 if (!port_cfg_data_initialized)
132 cvmx_init_port_cfg();
133
134 /*
135 * Only 8 BIDs are assigned to ILK channels. The channels are wrapped
136 * if more than 8 channels are configured, fix the index accordingly.
137 */
138 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
139 if (cvmx_helper_interface_get_mode(xiface) ==
140 CVMX_HELPER_INTERFACE_MODE_ILK)
141 index %= 8;
142 }
143
144 return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_bpid;
145}
146
147int __cvmx_helper_cfg_pko_port_base(int xiface, int index)
148{
149 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
150
151 if (!port_cfg_data_initialized)
152 cvmx_init_port_cfg();
153
154 return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pko_port_base;
155}
156
157int __cvmx_helper_cfg_pko_port_num(int xiface, int index)
158{
159 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
160
161 if (!port_cfg_data_initialized)
162 cvmx_init_port_cfg();
163
164 return cvmx_cfg_port[xi.node][xi.interface][index].ccpp_pko_num_ports;
165}
166
167int __cvmx_helper_cfg_pko_queue_num(int pko_port)
168{
169 return cvmx_pko_queue_table[pko_port].ccppp_num_queues;
170}
171
172int __cvmx_helper_cfg_pko_queue_base(int pko_port)
173{
174 return cvmx_pko_queue_table[pko_port].ccppp_queue_base;
175}
176
177int __cvmx_helper_cfg_pko_max_queue(void)
178{
179 return __cvmx_cfg_pko_highest_queue;
180}
181
182int __cvmx_helper_cfg_pko_max_engine(void)
183{
184 return cvmx_cfg_max_pko_engines;
185}
186
187int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
188{
189 if (opt >= CVMX_HELPER_CFG_OPT_MAX)
190 return -1;
191
192 cvmx_cfg_opts[opt] = val;
193
194 return 0;
195}
196
197uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
198{
199 if (opt >= CVMX_HELPER_CFG_OPT_MAX)
200 return (uint64_t)CVMX_HELPER_CFG_INVALID_VALUE;
201
202 return cvmx_cfg_opts[opt];
203}
204
205/*
206 * initialize the queue allocation list. the existing static allocation result
207 * is used as a starting point to ensure backward compatibility.
208 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100209 * Return: 0 on success
Aaron Williamsb9a60982020-12-11 17:05:58 +0100210 * -1 on failure
211 */
212int cvmx_pko_queue_grp_alloc(u64 start, uint64_t end, uint64_t count)
213{
214 u64 port;
215 int ret_val;
216
217 for (port = start; port < end; port++) {
218 ret_val = cvmx_pko_queue_alloc(port, count);
219 if (ret_val == -1) {
220 printf("ERROR: %sL Failed to allocate queue for port=%d count=%d\n",
221 __func__, (int)port, (int)count);
222 return ret_val;
223 }
224 }
225 return 0;
226}
227
228int cvmx_pko_queue_init_from_cvmx_config_non_pknd(void)
229{
230 int ret_val = -1;
231 u64 count, start, end;
232
233 start = 0;
234 end = __cvmx_pko_queue_static_config[0].non_pknd.pko_ports_per_interface[0];
235 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[0];
236 cvmx_pko_queue_grp_alloc(start, end, count);
237
238 start = 16;
239 end = start + __cvmx_pko_queue_static_config[0].non_pknd.pko_ports_per_interface[1];
240 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[1];
241 ret_val = cvmx_pko_queue_grp_alloc(start, end, count);
242 if (ret_val != 0)
243 return -1;
244
245 if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
246 /* Interface 4: AGL, PKO port 24 only, DPI 32-35 */
247 start = 24;
248 end = start + 1;
249 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_interface[4];
250 ret_val = cvmx_pko_queue_grp_alloc(start, end, count);
251
252 if (ret_val != 0)
253 return -1;
254 end = 32; /* DPI first PKO poty */
255 }
256
257 start = end;
258 end = 36;
259 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_pci;
260 cvmx_pko_queue_grp_alloc(start, end, count);
261 if (ret_val != 0)
262 return -1;
263
264 start = end;
265 end = 40;
266 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_loop;
267 cvmx_pko_queue_grp_alloc(start, end, count);
268 if (ret_val != 0)
269 return -1;
270
271 start = end;
272 end = 42;
273 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[0];
274 cvmx_pko_queue_grp_alloc(start, end, count);
275 if (ret_val != 0)
276 return -1;
277
278 start = end;
279 end = 44;
280 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[1];
281 cvmx_pko_queue_grp_alloc(start, end, count);
282 if (ret_val != 0)
283 return -1;
284
285 start = end;
286 end = 46;
287 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[2];
288 cvmx_pko_queue_grp_alloc(start, end, count);
289 if (ret_val != 0)
290 return -1;
291
292 start = end;
293 end = 48;
294 count = __cvmx_pko_queue_static_config[0].non_pknd.pko_queues_per_port_srio[3];
295 cvmx_pko_queue_grp_alloc(start, end, count);
296 if (ret_val != 0)
297 return -1;
298 return 0;
299}
300
301int cvmx_helper_pko_queue_config_get(int node, cvmx_user_static_pko_queue_config_t *cfg)
302{
303 *cfg = __cvmx_pko_queue_static_config[node];
304 return 0;
305}
306
307int cvmx_helper_pko_queue_config_set(int node, cvmx_user_static_pko_queue_config_t *cfg)
308{
309 __cvmx_pko_queue_static_config[node] = *cfg;
310 return 0;
311}
312
313static int queue_range_init;
314
315int init_cvmx_pko_que_range(void)
316{
317 int rv = 0;
318
319 if (queue_range_init)
320 return 0;
321 queue_range_init = 1;
322 rv = cvmx_create_global_resource_range(CVMX_GR_TAG_PKO_QUEUES,
323 CVMX_HELPER_CFG_MAX_PKO_QUEUES);
324 if (rv != 0)
325 printf("ERROR: %s: Failed to initialize pko queues range\n", __func__);
326
327 return rv;
328}
329
330/*
331 * get a block of "count" queues for "port"
332 *
333 * @param port the port for which the queues are requested
334 * @param count the number of queues requested
335 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100336 * Return: 0 on success
Aaron Williamsb9a60982020-12-11 17:05:58 +0100337 * -1 on failure
338 */
339static int cvmx_pko_queue_alloc(u64 port, int count)
340{
341 int ret_val = -1;
342 int highest_queue;
343
344 init_cvmx_pko_que_range();
345
346 if (cvmx_pko_queue_table[port].ccppp_num_queues == count)
347 return cvmx_pko_queue_table[port].ccppp_queue_base;
348
349 if (cvmx_pko_queue_table[port].ccppp_num_queues > 0) {
350 printf("WARNING: %s port=%d already %d queues\n",
351 __func__, (int)port,
352 (int)cvmx_pko_queue_table[port].ccppp_num_queues);
353 return -1;
354 }
355
356 if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
357 printf("ERROR: %s port=%d > %d\n", __func__, (int)port,
358 CVMX_HELPER_CFG_MAX_PKO_QUEUES);
359 return -1;
360 }
361
362 ret_val = cvmx_allocate_global_resource_range(CVMX_GR_TAG_PKO_QUEUES,
363 port, count, 1);
364
365 debug("%s: pko_e_port=%i q_base=%i q_count=%i\n",
366 __func__, (int)port, ret_val, (int)count);
367
368 if (ret_val == -1)
369 return ret_val;
370 cvmx_pko_queue_table[port].ccppp_queue_base = ret_val;
371 cvmx_pko_queue_table[port].ccppp_num_queues = count;
372
373 highest_queue = ret_val + count - 1;
374 if (highest_queue > __cvmx_cfg_pko_highest_queue)
375 __cvmx_cfg_pko_highest_queue = highest_queue;
376 return 0;
377}
378
379/*
380 * return the queues for "port"
381 *
382 * @param port the port for which the queues are returned
383 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100384 * Return: 0 on success
Aaron Williamsb9a60982020-12-11 17:05:58 +0100385 * -1 on failure
386 */
387int cvmx_pko_queue_free(uint64_t port)
388{
389 int ret_val = -1;
390
391 init_cvmx_pko_que_range();
392 if (port >= CVMX_HELPER_CFG_MAX_PKO_QUEUES) {
393 debug("ERROR: %s port=%d > %d", __func__, (int)port,
394 CVMX_HELPER_CFG_MAX_PKO_QUEUES);
395 return -1;
396 }
397
398 ret_val = cvmx_free_global_resource_range_with_base(
399 CVMX_GR_TAG_PKO_QUEUES, cvmx_pko_queue_table[port].ccppp_queue_base,
400 cvmx_pko_queue_table[port].ccppp_num_queues);
401 if (ret_val != 0)
402 return ret_val;
403
404 cvmx_pko_queue_table[port].ccppp_num_queues = 0;
405 cvmx_pko_queue_table[port].ccppp_queue_base = CVMX_HELPER_CFG_INVALID_VALUE;
406 ret_val = 0;
407 return ret_val;
408}
409
410void cvmx_pko_queue_free_all(void)
411{
412 int i;
413
414 for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
415 if (cvmx_pko_queue_table[i].ccppp_queue_base !=
416 CVMX_HELPER_CFG_INVALID_VALUE)
417 cvmx_pko_queue_free(i);
418}
419
420void cvmx_pko_queue_show(void)
421{
422 int i;
423
424 cvmx_show_global_resource_range(CVMX_GR_TAG_PKO_QUEUES);
425 for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
426 if (cvmx_pko_queue_table[i].ccppp_queue_base !=
427 CVMX_HELPER_CFG_INVALID_VALUE)
428 debug("port=%d que_base=%d que_num=%d\n", i,
429 (int)cvmx_pko_queue_table[i].ccppp_queue_base,
430 (int)cvmx_pko_queue_table[i].ccppp_num_queues);
431}
432
433void cvmx_helper_cfg_show_cfg(void)
434{
435 int i, j;
436
437 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
438 debug("%s: interface%d mode %10s nports%4d\n", __func__, i,
439 cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
440 cvmx_helper_interface_enumerate(i));
441
442 for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
443 debug("\tpknd[%i][%d]%d", i, j,
444 __cvmx_helper_cfg_pknd(i, j));
445 debug(" pko_port_base[%i][%d]%d", i, j,
446 __cvmx_helper_cfg_pko_port_base(i, j));
447 debug(" pko_port_num[%i][%d]%d\n", i, j,
448 __cvmx_helper_cfg_pko_port_num(i, j));
449 }
450 }
451
452 for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++) {
453 if (__cvmx_helper_cfg_pko_queue_base(i) !=
454 CVMX_HELPER_CFG_INVALID_VALUE) {
455 debug("%s: pko_port%d qbase%d nqueues%d interface%d index%d\n",
456 __func__, i, __cvmx_helper_cfg_pko_queue_base(i),
457 __cvmx_helper_cfg_pko_queue_num(i),
458 __cvmx_helper_cfg_pko_port_interface(i),
459 __cvmx_helper_cfg_pko_port_index(i));
460 }
461 }
462}
463
464/*
465 * initialize cvmx_cfg_pko_port_map
466 */
467void cvmx_helper_cfg_init_pko_port_map(void)
468{
469 int i, j, k;
470 int pko_eid;
471 int pko_port_base, pko_port_max;
472 cvmx_helper_interface_mode_t mode;
473
474 if (!port_cfg_data_initialized)
475 cvmx_init_port_cfg();
476 /*
477 * one pko_eid is allocated to each port except for ILK, NPI, and
478 * LOOP. Each of the three has one eid.
479 */
480 pko_eid = 0;
481 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
482 mode = cvmx_helper_interface_get_mode(i);
483 for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
484 pko_port_base = cvmx_cfg_port[0][i][j].ccpp_pko_port_base;
485 pko_port_max = pko_port_base + cvmx_cfg_port[0][i][j].ccpp_pko_num_ports;
486 if (!octeon_has_feature(OCTEON_FEATURE_PKO3)) {
487 cvmx_helper_cfg_assert(pko_port_base !=
488 CVMX_HELPER_CFG_INVALID_VALUE);
489 cvmx_helper_cfg_assert(pko_port_max >= pko_port_base);
490 }
491 for (k = pko_port_base; k < pko_port_max; k++) {
492 cvmx_cfg_pko_port_map[k].ccppl_interface = i;
493 cvmx_cfg_pko_port_map[k].ccppl_index = j;
494 cvmx_cfg_pko_port_map[k].ccppl_eid = pko_eid;
495 }
496
497 if (!(mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
498 mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
499 mode == CVMX_HELPER_INTERFACE_MODE_ILK))
500 pko_eid++;
501 }
502
503 if (mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
504 mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
505 mode == CVMX_HELPER_INTERFACE_MODE_ILK)
506 pko_eid++;
507 }
508
509 /*
510 * Legal pko_eids [0, 0x13] should not be exhausted.
511 */
512 if (!octeon_has_feature(OCTEON_FEATURE_PKO3))
513 cvmx_helper_cfg_assert(pko_eid <= 0x14);
514
515 cvmx_cfg_max_pko_engines = pko_eid;
516}
517
518void cvmx_helper_cfg_set_jabber_and_frame_max(void)
519{
520 int interface, port;
521 /*Set the frame max size and jabber size to 65535. */
522 const unsigned int max_frame = 65535;
523
524 // FIXME: should support node argument for remote node init
525 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
526 int ipd_port;
527 int node = cvmx_get_node_num();
528
529 for (interface = 0;
530 interface < cvmx_helper_get_number_of_interfaces();
531 interface++) {
532 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
533 cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
534 int num_ports = cvmx_helper_ports_on_interface(xiface);
535
536 // FIXME: should be an easier way to determine
537 // that an interface is Ethernet/BGX
538 switch (imode) {
539 case CVMX_HELPER_INTERFACE_MODE_SGMII:
540 case CVMX_HELPER_INTERFACE_MODE_XAUI:
541 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
542 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
543 case CVMX_HELPER_INTERFACE_MODE_XFI:
544 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
545 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
546 for (port = 0; port < num_ports; port++) {
547 ipd_port = cvmx_helper_get_ipd_port(xiface, port);
548 cvmx_pki_set_max_frm_len(ipd_port, max_frame);
549 cvmx_helper_bgx_set_jabber(xiface, port, max_frame);
550 }
551 break;
552 default:
553 break;
554 }
555 }
556 } else {
557 /*Set the frame max size and jabber size to 65535. */
558 for (interface = 0; interface < cvmx_helper_get_number_of_interfaces();
559 interface++) {
560 int xiface = cvmx_helper_node_interface_to_xiface(cvmx_get_node_num(),
561 interface);
562 /*
563 * Set the frame max size and jabber size to 65535, as the defaults
564 * are too small.
565 */
566 cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(xiface);
567 int num_ports = cvmx_helper_ports_on_interface(xiface);
568
569 switch (imode) {
570 case CVMX_HELPER_INTERFACE_MODE_SGMII:
571 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
572 case CVMX_HELPER_INTERFACE_MODE_XAUI:
573 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
574 for (port = 0; port < num_ports; port++)
575 csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
576 /* Set max and min value for frame check */
577 cvmx_pip_set_frame_check(interface, -1);
578 break;
579
580 case CVMX_HELPER_INTERFACE_MODE_RGMII:
581 case CVMX_HELPER_INTERFACE_MODE_GMII:
582 /* Set max and min value for frame check */
583 cvmx_pip_set_frame_check(interface, -1);
584 for (port = 0; port < num_ports; port++) {
585 csr_wr(CVMX_GMXX_RXX_FRM_MAX(port, interface), 65535);
586 csr_wr(CVMX_GMXX_RXX_JABBER(port, interface), 65535);
587 }
588 break;
589 case CVMX_HELPER_INTERFACE_MODE_ILK:
590 /* Set max and min value for frame check */
591 cvmx_pip_set_frame_check(interface, -1);
592 for (port = 0; port < num_ports; port++) {
593 int ipd_port = cvmx_helper_get_ipd_port(interface, port);
594
595 cvmx_ilk_enable_la_header(ipd_port, 0);
596 }
597 break;
598 case CVMX_HELPER_INTERFACE_MODE_SRIO:
599 /* Set max and min value for frame check */
600 cvmx_pip_set_frame_check(interface, -1);
601 break;
602 case CVMX_HELPER_INTERFACE_MODE_AGL:
603 /* Set max and min value for frame check */
604 cvmx_pip_set_frame_check(interface, -1);
605 csr_wr(CVMX_AGL_GMX_RXX_FRM_MAX(0), 65535);
606 csr_wr(CVMX_AGL_GMX_RXX_JABBER(0), 65535);
607 break;
608 default:
609 break;
610 }
611 }
612 }
613}
614
615/**
616 * Enable storing short packets only in the WQE
617 * unless NO_WPTR is set, which already has the same effect
618 */
619void cvmx_helper_cfg_store_short_packets_in_wqe(void)
620{
621 int interface, port;
622 cvmx_ipd_ctl_status_t ipd_ctl_status;
623 unsigned int dyn_rs = 1;
624
625 if (octeon_has_feature(OCTEON_FEATURE_PKI))
626 return;
627
628 /* NO_WPTR combines WQE with 1st MBUF, RS is redundant */
629 ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
630 if (ipd_ctl_status.s.no_wptr) {
631 dyn_rs = 0;
632 /* Note: consider also setting 'ignrs' wtn NO_WPTR is set */
633 }
634
635 for (interface = 0; interface < cvmx_helper_get_number_of_interfaces(); interface++) {
636 int num_ports = cvmx_helper_ports_on_interface(interface);
637
638 for (port = 0; port < num_ports; port++) {
639 cvmx_pip_port_cfg_t port_cfg;
640 int pknd = port;
641
642 if (octeon_has_feature(OCTEON_FEATURE_PKND))
643 pknd = cvmx_helper_get_pknd(interface, port);
644 else
645 pknd = cvmx_helper_get_ipd_port(interface, port);
646 port_cfg.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd));
647 port_cfg.s.dyn_rs = dyn_rs;
648 csr_wr(CVMX_PIP_PRT_CFGX(pknd), port_cfg.u64);
649 }
650 }
651}
652
653int __cvmx_helper_cfg_pko_port_interface(int pko_port)
654{
655 return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
656}
657
658int __cvmx_helper_cfg_pko_port_index(int pko_port)
659{
660 return cvmx_cfg_pko_port_map[pko_port].ccppl_index;
661}
662
663int __cvmx_helper_cfg_pko_port_eid(int pko_port)
664{
665 return cvmx_cfg_pko_port_map[pko_port].ccppl_eid;
666}
667
668#define IPD2PKO_CACHE_Y(ipd_port) (ipd_port) >> 8
669#define IPD2PKO_CACHE_X(ipd_port) (ipd_port) & 0xff
670
671static inline int __cvmx_helper_cfg_ipd2pko_cachex(int ipd_port)
672{
673 int ipd_x = IPD2PKO_CACHE_X(ipd_port);
674
675 if (ipd_port & 0x800)
676 ipd_x = (ipd_x >> 4) & 3;
677 return ipd_x;
678}
679
680/*
681 * ipd_port to pko_port translation cache
682 */
683int __cvmx_helper_cfg_init_ipd2pko_cache(void)
684{
685 int i, j, n;
686 int ipd_y, ipd_x, ipd_port;
687
688 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
689 n = cvmx_helper_interface_enumerate(i);
690
691 for (j = 0; j < n; j++) {
692 ipd_port = cvmx_helper_get_ipd_port(i, j);
693 ipd_y = IPD2PKO_CACHE_Y(ipd_port);
694 ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
695 ipd2pko_port_cache[ipd_y][ipd_x] = (struct cvmx_cfg_pko_port_pair){
696 __cvmx_helper_cfg_pko_port_base(i, j),
697 __cvmx_helper_cfg_pko_port_num(i, j)
698 };
699 }
700 }
701
702 return 0;
703}
704
705int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port)
706{
707 int ipd_y, ipd_x;
708
709 /* Internal PKO ports are not present in PKO3 */
710 if (octeon_has_feature(OCTEON_FEATURE_PKI))
711 return ipd_port;
712
713 ipd_y = IPD2PKO_CACHE_Y(ipd_port);
714 ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
715
716 return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_base_port;
717}
718
719int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
720{
721 int ipd_y, ipd_x;
722
723 ipd_y = IPD2PKO_CACHE_Y(ipd_port);
724 ipd_x = __cvmx_helper_cfg_ipd2pko_cachex(ipd_port);
725
726 return ipd2pko_port_cache[ipd_y][ipd_x].ccppp_nports;
727}
728
729/**
730 * Return the number of queues to be assigned to this pko_port
731 *
732 * @param pko_port
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100733 * Return: the number of queues for this pko_port
Aaron Williamsb9a60982020-12-11 17:05:58 +0100734 *
735 */
736static int cvmx_helper_cfg_dft_nqueues(int pko_port)
737{
738 cvmx_helper_interface_mode_t mode;
739 int interface;
740 int n;
741 int ret;
742
743 interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
744 mode = cvmx_helper_interface_get_mode(interface);
745
746 n = NUM_ELEMENTS(__cvmx_pko_queue_static_config[0].pknd.pko_cfg_iface);
747
748 if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) {
749 ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_loop.queues_per_port;
750 } else if (mode == CVMX_HELPER_INTERFACE_MODE_NPI) {
751 ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_npi.queues_per_port;
752 }
753
754 else if ((interface >= 0) && (interface < n)) {
755 ret = __cvmx_pko_queue_static_config[0].pknd.pko_cfg_iface[interface].queues_per_port;
756 } else {
757 /* Should never be called */
758 ret = 1;
759 }
760 /* Override for sanity in case of empty static config table */
761 if (ret == 0)
762 ret = 1;
763 return ret;
764}
765
766static int cvmx_helper_cfg_init_pko_iports_and_queues_using_static_config(void)
767{
768 int pko_port_base = 0;
769 int cvmx_cfg_default_pko_nports = 1;
770 int i, j, n, k;
771 int rv = 0;
772
773 if (!port_cfg_data_initialized)
774 cvmx_init_port_cfg();
775
776 /* When not using config file, each port is assigned one internal pko port*/
777 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
778 n = cvmx_helper_interface_enumerate(i);
779 for (j = 0; j < n; j++) {
780 cvmx_cfg_port[0][i][j].ccpp_pko_port_base = pko_port_base;
781 cvmx_cfg_port[0][i][j].ccpp_pko_num_ports = cvmx_cfg_default_pko_nports;
782 /*
783 * Initialize interface early here so that the
784 * cvmx_helper_cfg_dft_nqueues() below
785 * can get the interface number corresponding to the
786 * pko port
787 */
788 for (k = pko_port_base; k < pko_port_base + cvmx_cfg_default_pko_nports;
789 k++) {
790 cvmx_cfg_pko_port_map[k].ccppl_interface = i;
791 }
792 pko_port_base += cvmx_cfg_default_pko_nports;
793 }
794 }
795 cvmx_helper_cfg_assert(pko_port_base <= CVMX_HELPER_CFG_MAX_PKO_PORT);
796
797 /* Assigning queues per pko */
798 for (i = 0; i < pko_port_base; i++) {
799 int base;
800
801 n = cvmx_helper_cfg_dft_nqueues(i);
802 base = cvmx_pko_queue_alloc(i, n);
803 if (base == -1) {
804 printf("ERROR: %s: failed to alloc %d queues for pko port=%d\n", __func__,
805 n, i);
806 rv = -1;
807 }
808 }
809 return rv;
810}
811
812/**
813 * Returns if port is valid for a given interface
814 *
815 * @param xiface interface to check
816 * @param index port index in the interface
817 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100818 * Return: status of the port present or not.
Aaron Williamsb9a60982020-12-11 17:05:58 +0100819 */
820int cvmx_helper_is_port_valid(int xiface, int index)
821{
822 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
823
824 if (!port_cfg_data_initialized)
825 cvmx_init_port_cfg();
826 return cvmx_cfg_port[xi.node][xi.interface][index].valid;
827}
828
829void cvmx_helper_set_port_valid(int xiface, int index, bool valid)
830{
831 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
832
833 if (!port_cfg_data_initialized)
834 cvmx_init_port_cfg();
835 cvmx_cfg_port[xi.node][xi.interface][index].valid = valid;
836}
837
838void cvmx_helper_set_mac_phy_mode(int xiface, int index, bool valid)
839{
840 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
841
842 if (!port_cfg_data_initialized)
843 cvmx_init_port_cfg();
844 cvmx_cfg_port[xi.node][xi.interface][index].sgmii_phy_mode = valid;
845}
846
847bool cvmx_helper_get_mac_phy_mode(int xiface, int index)
848{
849 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
850
851 if (!port_cfg_data_initialized)
852 cvmx_init_port_cfg();
853 return cvmx_cfg_port[xi.node][xi.interface][index].sgmii_phy_mode;
854}
855
856void cvmx_helper_set_1000x_mode(int xiface, int index, bool valid)
857{
858 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
859
860 if (!port_cfg_data_initialized)
861 cvmx_init_port_cfg();
862 cvmx_cfg_port[xi.node][xi.interface][index].sgmii_1000x_mode = valid;
863}
864
865bool cvmx_helper_get_1000x_mode(int xiface, int index)
866{
867 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
868
869 if (!port_cfg_data_initialized)
870 cvmx_init_port_cfg();
871 return cvmx_cfg_port[xi.node][xi.interface][index].sgmii_1000x_mode;
872}
873
874void cvmx_helper_set_agl_rx_clock_delay_bypass(int xiface, int index, bool valid)
875{
876 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
877
878 if (!port_cfg_data_initialized)
879 cvmx_init_port_cfg();
880 cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_delay_bypass = valid;
881}
882
883bool cvmx_helper_get_agl_rx_clock_delay_bypass(int xiface, int index)
884{
885 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
886
887 if (!port_cfg_data_initialized)
888 cvmx_init_port_cfg();
889 return cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_delay_bypass;
890}
891
892void cvmx_helper_set_agl_rx_clock_skew(int xiface, int index, uint8_t value)
893{
894 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
895
896 if (!port_cfg_data_initialized)
897 cvmx_init_port_cfg();
898 cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_skew = value;
899}
900
901uint8_t cvmx_helper_get_agl_rx_clock_skew(int xiface, int index)
902{
903 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
904
905 if (!port_cfg_data_initialized)
906 cvmx_init_port_cfg();
907 return cvmx_cfg_port[xi.node][xi.interface][index].agl_rx_clk_skew;
908}
909
910void cvmx_helper_set_agl_refclk_sel(int xiface, int index, uint8_t value)
911{
912 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
913
914 if (!port_cfg_data_initialized)
915 cvmx_init_port_cfg();
916 cvmx_cfg_port[xi.node][xi.interface][index].agl_refclk_sel = value;
917}
918
919uint8_t cvmx_helper_get_agl_refclk_sel(int xiface, int index)
920{
921 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
922
923 if (!port_cfg_data_initialized)
924 cvmx_init_port_cfg();
925 return cvmx_cfg_port[xi.node][xi.interface][index].agl_refclk_sel;
926}
927
928void cvmx_helper_set_port_force_link_up(int xiface, int index, bool value)
929{
930 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
931
932 if (!port_cfg_data_initialized)
933 cvmx_init_port_cfg();
934 cvmx_cfg_port[xi.node][xi.interface][index].force_link_up = value;
935}
936
937bool cvmx_helper_get_port_force_link_up(int xiface, int index)
938{
939 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
940
941 if (!port_cfg_data_initialized)
942 cvmx_init_port_cfg();
943 return cvmx_cfg_port[xi.node][xi.interface][index].force_link_up;
944}
945
946void cvmx_helper_set_port_phy_present(int xiface, int index, bool value)
947{
948 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
949
950 if (!port_cfg_data_initialized)
951 cvmx_init_port_cfg();
952 cvmx_cfg_port[xi.node][xi.interface][index].phy_present = value;
953}
954
955bool cvmx_helper_get_port_phy_present(int xiface, int index)
956{
957 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
958
959 if (!port_cfg_data_initialized)
960 cvmx_init_port_cfg();
961 return cvmx_cfg_port[xi.node][xi.interface][index].phy_present;
962}
963
964int __cvmx_helper_init_port_valid(void)
965{
966 int i, j, node;
967 bool valid;
968 static void *fdt_addr;
969 int rc;
970 struct cvmx_coremask pcm;
971
972 octeon_get_available_coremask(&pcm);
973
974 if (fdt_addr == 0)
975 fdt_addr = __cvmx_phys_addr_to_ptr((u64)gd->fdt_blob, 128 * 1024);
976
977 if (!port_cfg_data_initialized)
978 cvmx_init_port_cfg();
979 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
980 rc = __cvmx_helper_parse_bgx_dt(fdt_addr);
981 if (!rc)
982 rc = __cvmx_fdt_parse_vsc7224(fdt_addr);
983 if (!rc)
984 rc = __cvmx_fdt_parse_avsp5410(fdt_addr);
985 if (!rc && octeon_has_feature(OCTEON_FEATURE_BGX_XCV))
986 rc = __cvmx_helper_parse_bgx_rgmii_dt(fdt_addr);
987
988 /* Some ports are not in sequence, the device tree does not
989 * clear them.
990 *
991 * Also clear any ports that are not defined in the device tree.
992 * Apply this to each node.
993 */
994 for (node = 0; node < CVMX_MAX_NODES; node++) {
995 if (!cvmx_coremask_get64_node(&pcm, node))
996 continue;
997 for (i = 0; i < CVMX_HELPER_MAX_GMX; i++) {
998 int j;
999 int xiface = cvmx_helper_node_interface_to_xiface(node, i);
1000
1001 for (j = 0; j < cvmx_helper_interface_enumerate(i); j++) {
1002 cvmx_bgxx_cmrx_config_t cmr_config;
1003
1004 cmr_config.u64 =
1005 csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(j, i));
1006 if ((cmr_config.s.lane_to_sds == 0xe4 &&
1007 cmr_config.s.lmac_type != 4 &&
1008 cmr_config.s.lmac_type != 1 &&
1009 cmr_config.s.lmac_type != 5) ||
1010 ((cvmx_helper_get_port_fdt_node_offset(xiface, j) ==
1011 CVMX_HELPER_CFG_INVALID_VALUE)))
1012 cvmx_helper_set_port_valid(xiface, j, false);
1013 }
1014 }
1015 }
1016 return rc;
1017 }
1018
1019 /* TODO: Update this to behave more like 78XX */
1020 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
1021 int n = cvmx_helper_interface_enumerate(i);
1022
1023 for (j = 0; j < n; j++) {
1024 int ipd_port = cvmx_helper_get_ipd_port(i, j);
1025
1026 valid = (__cvmx_helper_board_get_port_from_dt(fdt_addr, ipd_port) == 1);
1027 cvmx_helper_set_port_valid(i, j, valid);
1028 }
1029 }
1030 return 0;
1031}
1032
1033typedef int (*cvmx_import_config_t)(void);
1034cvmx_import_config_t cvmx_import_app_config;
1035
1036int __cvmx_helper_init_port_config_data_local(void)
1037{
1038 int rv = 0;
1039 int dbg = 0;
1040
1041 if (!port_cfg_data_initialized)
1042 cvmx_init_port_cfg();
1043
1044 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1045 if (cvmx_import_app_config) {
1046 rv = (*cvmx_import_app_config)();
1047 if (rv != 0) {
1048 debug("failed to import config\n");
1049 return -1;
1050 }
1051 }
1052
1053 cvmx_helper_cfg_init_pko_port_map();
1054 __cvmx_helper_cfg_init_ipd2pko_cache();
1055 } else {
1056 if (cvmx_import_app_config) {
1057 rv = (*cvmx_import_app_config)();
1058 if (rv != 0) {
1059 debug("failed to import config\n");
1060 return -1;
1061 }
1062 }
1063 }
1064 if (dbg) {
1065 cvmx_helper_cfg_show_cfg();
1066 cvmx_pko_queue_show();
1067 }
1068 return rv;
1069}
1070
1071/*
1072 * This call is made from Linux octeon_ethernet driver
1073 * to setup the PKO with a specific queue count and
1074 * internal port count configuration.
1075 */
1076int cvmx_pko_alloc_iport_and_queues(int interface, int port, int port_cnt, int queue_cnt)
1077{
1078 int rv, p, port_start, cnt;
1079
1080 if (dbg)
1081 debug("%s: intf %d/%d pcnt %d qcnt %d\n", __func__, interface, port, port_cnt,
1082 queue_cnt);
1083
1084 if (!port_cfg_data_initialized)
1085 cvmx_init_port_cfg();
1086 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1087 rv = cvmx_pko_internal_ports_alloc(interface, port, port_cnt);
1088 if (rv < 0) {
1089 printf("ERROR: %s: failed to allocate internal ports forinterface=%d port=%d cnt=%d\n",
1090 __func__, interface, port, port_cnt);
1091 return -1;
1092 }
1093 port_start = __cvmx_helper_cfg_pko_port_base(interface, port);
1094 cnt = __cvmx_helper_cfg_pko_port_num(interface, port);
1095 } else {
1096 port_start = cvmx_helper_get_ipd_port(interface, port);
1097 cnt = 1;
1098 }
1099
1100 for (p = port_start; p < port_start + cnt; p++) {
1101 rv = cvmx_pko_queue_alloc(p, queue_cnt);
1102 if (rv < 0) {
1103 printf("ERROR: %s: failed to allocate queues for port=%d cnt=%d\n",
1104 __func__, p, queue_cnt);
1105 return -1;
1106 }
1107 }
1108 return 0;
1109}
1110
1111static void cvmx_init_port_cfg(void)
1112{
1113 int node, i, j;
1114
1115 if (port_cfg_data_initialized)
1116 return;
1117
1118 for (node = 0; node < CVMX_MAX_NODES; node++) {
1119 for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
1120 for (j = 0; j < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; j++) {
1121 struct cvmx_cfg_port_param *pcfg;
1122 struct cvmx_srio_port_param *sr;
1123
1124 pcfg = &cvmx_cfg_port[node][i][j];
1125 memset(pcfg, 0, sizeof(*pcfg));
1126
1127 pcfg->port_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
1128 pcfg->phy_fdt_node = CVMX_HELPER_CFG_INVALID_VALUE;
1129 pcfg->phy_info = NULL;
1130 pcfg->ccpp_pknd = CVMX_HELPER_CFG_INVALID_VALUE;
1131 pcfg->ccpp_bpid = CVMX_HELPER_CFG_INVALID_VALUE;
1132 pcfg->ccpp_pko_port_base = CVMX_HELPER_CFG_INVALID_VALUE;
1133 pcfg->ccpp_pko_num_ports = CVMX_HELPER_CFG_INVALID_VALUE;
1134 pcfg->agl_rx_clk_skew = 0;
1135 pcfg->valid = true;
1136 pcfg->sgmii_phy_mode = false;
1137 pcfg->sgmii_1000x_mode = false;
1138 pcfg->agl_rx_clk_delay_bypass = false;
1139 pcfg->force_link_up = false;
1140 pcfg->disable_an = false;
1141 pcfg->link_down_pwr_dn = false;
1142 pcfg->phy_present = false;
1143 pcfg->tx_clk_delay_bypass = false;
1144 pcfg->rgmii_tx_clk_delay = 0;
1145 pcfg->enable_fec = false;
1146 sr = &pcfg->srio_short;
1147 sr->srio_rx_ctle_agc_override = false;
1148 sr->srio_rx_ctle_zero = 0x6;
1149 sr->srio_rx_agc_pre_ctle = 0x5;
1150 sr->srio_rx_agc_post_ctle = 0x4;
1151 sr->srio_tx_swing_override = false;
1152 sr->srio_tx_swing = 0x7;
1153 sr->srio_tx_premptap_override = false;
1154 sr->srio_tx_premptap_pre = 0;
1155 sr->srio_tx_premptap_post = 0xF;
1156 sr->srio_tx_gain_override = false;
1157 sr->srio_tx_gain = 0x3;
1158 sr->srio_tx_vboost_override = 0;
1159 sr->srio_tx_vboost = true;
1160 sr = &pcfg->srio_long;
1161 sr->srio_rx_ctle_agc_override = false;
1162 sr->srio_rx_ctle_zero = 0x6;
1163 sr->srio_rx_agc_pre_ctle = 0x5;
1164 sr->srio_rx_agc_post_ctle = 0x4;
1165 sr->srio_tx_swing_override = false;
1166 sr->srio_tx_swing = 0x7;
1167 sr->srio_tx_premptap_override = false;
1168 sr->srio_tx_premptap_pre = 0;
1169 sr->srio_tx_premptap_post = 0xF;
1170 sr->srio_tx_gain_override = false;
1171 sr->srio_tx_gain = 0x3;
1172 sr->srio_tx_vboost_override = 0;
1173 sr->srio_tx_vboost = true;
1174 pcfg->agl_refclk_sel = 0;
1175 pcfg->sfp_of_offset = -1;
1176 pcfg->vsc7224_chan = NULL;
1177 }
1178 }
1179 }
1180 port_cfg_data_initialized = true;
1181}
1182
1183int __cvmx_helper_init_port_config_data(int node)
1184{
1185 int rv = 0;
1186 int i, j, n;
1187 int num_interfaces, interface;
1188 int pknd = 0, bpid = 0;
1189 const int use_static_config = 1;
1190
1191 if (dbg)
1192 printf("%s:\n", __func__);
1193
1194 if (!port_cfg_data_initialized)
1195 cvmx_init_port_cfg();
1196
1197 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1198 /* PKO3: only needs BPID, PKND to be setup,
1199 * while the rest of PKO3 init is done in cvmx-helper-pko3.c
1200 */
1201 pknd = 0;
1202 bpid = 0;
1203 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
1204 int xiface = cvmx_helper_node_interface_to_xiface(node, i);
1205
1206 n = cvmx_helper_interface_enumerate(xiface);
1207 /*
1208 * Assign 8 pknds to ILK interface, these pknds will be
1209 * distributed among the channels configured
1210 */
1211 if (cvmx_helper_interface_get_mode(xiface) ==
1212 CVMX_HELPER_INTERFACE_MODE_ILK) {
1213 if (n > 8)
1214 n = 8;
1215 }
1216 if (cvmx_helper_interface_get_mode(xiface) !=
1217 CVMX_HELPER_INTERFACE_MODE_NPI) {
1218 for (j = 0; j < n; j++) {
1219 struct cvmx_cfg_port_param *pcfg;
1220
1221 pcfg = &cvmx_cfg_port[node][i][j];
1222 pcfg->ccpp_pknd = pknd++;
1223 pcfg->ccpp_bpid = bpid++;
1224 }
1225 } else {
1226 for (j = 0; j < n; j++) {
1227 if (j == n / cvmx_npi_max_pknds) {
1228 pknd++;
1229 bpid++;
1230 }
1231 cvmx_cfg_port[node][i][j].ccpp_pknd = pknd;
1232 cvmx_cfg_port[node][i][j].ccpp_bpid = bpid;
1233 }
1234 pknd++;
1235 bpid++;
1236 }
1237 } /* for i=0 */
1238 cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
1239 cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
1240 } else if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1241 if (use_static_config)
1242 cvmx_helper_cfg_init_pko_iports_and_queues_using_static_config();
1243
1244 /* Initialize pknd and bpid */
1245 for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++) {
1246 n = cvmx_helper_interface_enumerate(i);
1247 for (j = 0; j < n; j++) {
1248 cvmx_cfg_port[0][i][j].ccpp_pknd = pknd++;
1249 cvmx_cfg_port[0][i][j].ccpp_bpid = bpid++;
1250 }
1251 }
1252 cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
1253 cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
1254 } else {
1255 if (use_static_config)
1256 cvmx_pko_queue_init_from_cvmx_config_non_pknd();
1257 }
1258
1259 /* Remainder not used for PKO3 */
1260 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1261 return 0;
1262
1263 /* init ports, queues which are not initialized */
1264 num_interfaces = cvmx_helper_get_number_of_interfaces();
1265 for (interface = 0; interface < num_interfaces; interface++) {
1266 int num_ports = __cvmx_helper_early_ports_on_interface(interface);
1267 int port, port_base, queue;
1268
1269 for (port = 0; port < num_ports; port++) {
1270 bool init_req = false;
1271
1272 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1273 port_base = __cvmx_helper_cfg_pko_port_base(interface, port);
1274 if (port_base == CVMX_HELPER_CFG_INVALID_VALUE)
1275 init_req = true;
1276 } else {
1277 port_base = cvmx_helper_get_ipd_port(interface, port);
1278 queue = __cvmx_helper_cfg_pko_queue_base(port_base);
1279 if (queue == CVMX_HELPER_CFG_INVALID_VALUE)
1280 init_req = true;
1281 }
1282
1283 if (init_req) {
1284 rv = cvmx_pko_alloc_iport_and_queues(interface, port, 1, 1);
1285 if (rv < 0) {
1286 debug("cvm_pko_alloc_iport_and_queues failed.\n");
1287 return rv;
1288 }
1289 }
1290 }
1291 }
1292
1293 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1294 cvmx_helper_cfg_init_pko_port_map();
1295 __cvmx_helper_cfg_init_ipd2pko_cache();
1296 }
1297
1298 if (dbg) {
1299 cvmx_helper_cfg_show_cfg();
1300 cvmx_pko_queue_show();
1301 }
1302 return rv;
1303}
1304
1305/**
1306 * @INTERNAL
1307 * Store the FDT node offset in the device tree of a port
1308 *
1309 * @param xiface node and interface
1310 * @param index port index
1311 * @param node_offset node offset to store
1312 */
1313void cvmx_helper_set_port_fdt_node_offset(int xiface, int index, int node_offset)
1314{
1315 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1316
1317 if (!port_cfg_data_initialized)
1318 cvmx_init_port_cfg();
1319 cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node = node_offset;
1320}
1321
1322/**
1323 * @INTERNAL
1324 * Return the FDT node offset in the device tree of a port
1325 *
1326 * @param xiface node and interface
1327 * @param index port index
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001328 * Return: node offset of port or -1 if invalid
Aaron Williamsb9a60982020-12-11 17:05:58 +01001329 */
1330int cvmx_helper_get_port_fdt_node_offset(int xiface, int index)
1331{
1332 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1333
1334 if (!port_cfg_data_initialized)
1335 cvmx_init_port_cfg();
1336 return cvmx_cfg_port[xi.node][xi.interface][index].port_fdt_node;
1337}
1338
1339/**
1340 * Search for a port based on its FDT node offset
1341 *
1342 * @param of_offset Node offset of port to search for
1343 * @param[out] xiface xinterface of match
1344 * @param[out] index port index of match
1345 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001346 * Return: 0 if found, -1 if not found
Aaron Williamsb9a60982020-12-11 17:05:58 +01001347 */
1348int cvmx_helper_cfg_get_xiface_index_by_fdt_node_offset(int of_offset, int *xiface, int *index)
1349{
1350 int iface;
1351 int i;
1352 int node;
1353 struct cvmx_cfg_port_param *pcfg = NULL;
1354 *xiface = -1;
1355 *index = -1;
1356
1357 for (node = 0; node < CVMX_MAX_NODES; node++) {
1358 for (iface = 0; iface < CVMX_HELPER_MAX_IFACE; iface++) {
1359 for (i = 0; i < CVMX_HELPER_CFG_MAX_PORT_PER_IFACE; i++) {
1360 pcfg = &cvmx_cfg_port[node][iface][i];
1361 if (pcfg->valid && pcfg->port_fdt_node == of_offset) {
1362 *xiface = cvmx_helper_node_interface_to_xiface(node, iface);
1363 *index = i;
1364 return 0;
1365 }
1366 }
1367 }
1368 }
1369 return -1;
1370}
1371
1372/**
1373 * @INTERNAL
1374 * Store the FDT node offset in the device tree of a phy
1375 *
1376 * @param xiface node and interface
1377 * @param index port index
1378 * @param node_offset node offset to store
1379 */
1380void cvmx_helper_set_phy_fdt_node_offset(int xiface, int index, int node_offset)
1381{
1382 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1383
1384 if (!port_cfg_data_initialized)
1385 cvmx_init_port_cfg();
1386 cvmx_cfg_port[xi.node][xi.interface][index].phy_fdt_node = node_offset;
1387}
1388
1389/**
1390 * @INTERNAL
1391 * Return the FDT node offset in the device tree of a phy
1392 *
1393 * @param xiface node and interface
1394 * @param index port index
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001395 * Return: node offset of phy or -1 if invalid
Aaron Williamsb9a60982020-12-11 17:05:58 +01001396 */
1397int cvmx_helper_get_phy_fdt_node_offset(int xiface, int index)
1398{
1399 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1400
1401 if (!port_cfg_data_initialized)
1402 cvmx_init_port_cfg();
1403 return cvmx_cfg_port[xi.node][xi.interface][index].phy_fdt_node;
1404}
1405
1406/**
1407 * @INTERNAL
1408 * Override default autonegotiation for a port
1409 *
1410 * @param xiface node and interface
1411 * @param index port index
1412 * @param enable true to enable autonegotiation, false to force full
1413 * duplex, full speed.
1414 */
1415void cvmx_helper_set_port_autonegotiation(int xiface, int index, bool enable)
1416{
1417 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1418
1419 if (!port_cfg_data_initialized)
1420 cvmx_init_port_cfg();
1421 cvmx_cfg_port[xi.node][xi.interface][index].disable_an = !enable;
1422}
1423
1424/**
1425 * @INTERNAL
1426 * Returns if autonegotiation is enabled or not.
1427 *
1428 * @param xiface node and interface
1429 * @param index port index
1430 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001431 * Return: 0 if autonegotiation is disabled, 1 if enabled.
Aaron Williamsb9a60982020-12-11 17:05:58 +01001432 */
1433bool cvmx_helper_get_port_autonegotiation(int xiface, int index)
1434{
1435 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1436
1437 if (!port_cfg_data_initialized)
1438 cvmx_init_port_cfg();
1439 return !cvmx_cfg_port[xi.node][xi.interface][index].disable_an;
1440}
1441
1442/**
1443 * @INTERNAL
1444 * Override default forward error correction for a port
1445 *
1446 * @param xiface node and interface
1447 * @param index port index
1448 * @param enable true to enable fec, false to disable it
1449 */
1450void cvmx_helper_set_port_fec(int xiface, int index, bool enable)
1451{
1452 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1453
1454 if (!port_cfg_data_initialized)
1455 cvmx_init_port_cfg();
1456 cvmx_cfg_port[xi.node][xi.interface][index].enable_fec = enable;
1457}
1458
1459/**
1460 * @INTERNAL
1461 * Returns if forward error correction is enabled or not.
1462 *
1463 * @param xiface node and interface
1464 * @param index port index
1465 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001466 * Return: false if fec is disabled, true if enabled.
Aaron Williamsb9a60982020-12-11 17:05:58 +01001467 */
1468bool cvmx_helper_get_port_fec(int xiface, int index)
1469{
1470 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1471
1472 if (!port_cfg_data_initialized)
1473 cvmx_init_port_cfg();
1474 return cvmx_cfg_port[xi.node][xi.interface][index].enable_fec;
1475}
1476
1477/**
1478 * @INTERNAL
1479 * Configure the SRIO RX interface AGC settings for host mode
1480 *
1481 * @param xiface node and interface
1482 * @param index lane
1483 * @param long_run true for long run, false for short run
1484 * @param agc_override true to put AGC in manual mode
1485 * @param ctle_zero RX equalizer peaking control (default 0x6)
1486 * @param agc_pre_ctle AGC pre-CTLE gain (default 0x5)
1487 * @param agc_post_ctle AGC post-CTLE gain (default 0x4)
1488 *
1489 * NOTE: This must be called before SRIO is initialized to take effect
1490 */
1491void cvmx_helper_set_srio_rx(int xiface, int index, bool long_run, bool ctle_zero_override,
1492 u8 ctle_zero, bool agc_override, uint8_t agc_pre_ctle,
1493 uint8_t agc_post_ctle)
1494{
1495 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1496 struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
1497 struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
1498
1499 if (!port_cfg_data_initialized)
1500 cvmx_init_port_cfg();
1501 sr->srio_rx_ctle_zero_override = ctle_zero_override;
1502 sr->srio_rx_ctle_zero = ctle_zero;
1503 sr->srio_rx_ctle_agc_override = agc_override;
1504 sr->srio_rx_agc_pre_ctle = agc_pre_ctle;
1505 sr->srio_rx_agc_post_ctle = agc_post_ctle;
1506}
1507
1508/**
1509 * @INTERNAL
1510 * Get the SRIO RX interface AGC settings for host mode
1511 *
1512 * @param xiface node and interface
1513 * @param index lane
1514 * @param long_run true for long run, false for short run
1515 * @param[out] agc_override true to put AGC in manual mode
1516 * @param[out] ctle_zero RX equalizer peaking control (default 0x6)
1517 * @param[out] agc_pre_ctle AGC pre-CTLE gain (default 0x5)
1518 * @param[out] agc_post_ctle AGC post-CTLE gain (default 0x4)
1519 */
1520void cvmx_helper_get_srio_rx(int xiface, int index, bool long_run, bool *ctle_zero_override,
1521 u8 *ctle_zero, bool *agc_override, uint8_t *agc_pre_ctle,
1522 uint8_t *agc_post_ctle)
1523{
1524 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1525 struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
1526 struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
1527
1528 if (!port_cfg_data_initialized)
1529 cvmx_init_port_cfg();
1530 if (ctle_zero_override)
1531 *ctle_zero_override = sr->srio_rx_ctle_zero_override;
1532 if (ctle_zero)
1533 *ctle_zero = sr->srio_rx_ctle_zero;
1534 if (agc_override)
1535 *agc_override = sr->srio_rx_ctle_agc_override;
1536 if (agc_pre_ctle)
1537 *agc_pre_ctle = sr->srio_rx_agc_pre_ctle;
1538 if (agc_post_ctle)
1539 *agc_post_ctle = sr->srio_rx_agc_post_ctle;
1540}
1541
1542/**
1543 * @INTERNAL
1544 * Configure the SRIO TX interface for host mode
1545 *
1546 * @param xiface node and interface
1547 * @param index lane
1548 * @param long_run true for long run, false for short run
1549 * @param tx_swing tx swing value to use (default 0x7), -1 to not
1550 * override.
1551 * @param tx_gain PCS SDS TX gain (default 0x3), -1 to not
1552 * override
1553 * @param tx_premptap_override true to override preemphasis control
1554 * @param tx_premptap_pre preemphasis pre tap value (default 0x0)
1555 * @param tx_premptap_post preemphasis post tap value (default 0xF)
1556 * @param tx_vboost vboost enable (1 = enable, -1 = don't override)
1557 * hardware default is 1.
1558 *
1559 * NOTE: This must be called before SRIO is initialized to take effect
1560 */
1561void cvmx_helper_set_srio_tx(int xiface, int index, bool long_run, int tx_swing, int tx_gain,
1562 bool tx_premptap_override, uint8_t tx_premptap_pre,
1563 u8 tx_premptap_post, int tx_vboost)
1564{
1565 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1566 struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
1567 struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
1568
1569 if (!port_cfg_data_initialized)
1570 cvmx_init_port_cfg();
1571
1572 sr->srio_tx_swing_override = (tx_swing != -1);
1573 sr->srio_tx_swing = tx_swing != -1 ? tx_swing : 0x7;
1574 sr->srio_tx_gain_override = (tx_gain != -1);
1575 sr->srio_tx_gain = tx_gain != -1 ? tx_gain : 0x3;
1576 sr->srio_tx_premptap_override = tx_premptap_override;
1577 sr->srio_tx_premptap_pre = tx_premptap_override ? tx_premptap_pre : 0;
1578 sr->srio_tx_premptap_post = tx_premptap_override ? tx_premptap_post : 0xF;
1579 sr->srio_tx_vboost_override = tx_vboost != -1;
1580 sr->srio_tx_vboost = (tx_vboost != -1) ? tx_vboost : 1;
1581}
1582
1583/**
1584 * @INTERNAL
1585 * Get the SRIO TX interface settings for host mode
1586 *
1587 * @param xiface node and interface
1588 * @param index lane
1589 * @param long_run true for long run, false for short run
1590 * @param[out] tx_swing_override true to override pcs_sds_txX_swing
1591 * @param[out] tx_swing tx swing value to use (default 0x7)
1592 * @param[out] tx_gain_override true to override default gain
1593 * @param[out] tx_gain PCS SDS TX gain (default 0x3)
1594 * @param[out] tx_premptap_override true to override preemphasis control
1595 * @param[out] tx_premptap_pre preemphasis pre tap value (default 0x0)
1596 * @param[out] tx_premptap_post preemphasis post tap value (default 0xF)
1597 * @param[out] tx_vboost_override override vboost setting
1598 * @param[out] tx_vboost vboost enable (default true)
1599 */
1600void cvmx_helper_get_srio_tx(int xiface, int index, bool long_run, bool *tx_swing_override,
1601 u8 *tx_swing, bool *tx_gain_override, uint8_t *tx_gain,
1602 bool *tx_premptap_override, uint8_t *tx_premptap_pre,
1603 u8 *tx_premptap_post, bool *tx_vboost_override, bool *tx_vboost)
1604{
1605 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1606 struct cvmx_cfg_port_param *pcfg = &cvmx_cfg_port[xi.node][xi.interface][index];
1607 struct cvmx_srio_port_param *sr = long_run ? &pcfg->srio_long : &pcfg->srio_short;
1608
1609 if (!port_cfg_data_initialized)
1610 cvmx_init_port_cfg();
1611
1612 if (tx_swing_override)
1613 *tx_swing_override = sr->srio_tx_swing_override;
1614 if (tx_swing)
1615 *tx_swing = sr->srio_tx_swing;
1616 if (tx_gain_override)
1617 *tx_gain_override = sr->srio_tx_gain_override;
1618 if (tx_gain)
1619 *tx_gain = sr->srio_tx_gain;
1620 if (tx_premptap_override)
1621 *tx_premptap_override = sr->srio_tx_premptap_override;
1622 if (tx_premptap_pre)
1623 *tx_premptap_pre = sr->srio_tx_premptap_pre;
1624 if (tx_premptap_post)
1625 *tx_premptap_post = sr->srio_tx_premptap_post;
1626 if (tx_vboost_override)
1627 *tx_vboost_override = sr->srio_tx_vboost_override;
1628 if (tx_vboost)
1629 *tx_vboost = sr->srio_tx_vboost;
1630}
1631
1632/**
1633 * @INTERNAL
1634 * Sets the PHY info data structure
1635 *
1636 * @param xiface node and interface
1637 * @param index port index
1638 * @param[in] phy_info phy information data structure pointer
1639 */
1640void cvmx_helper_set_port_phy_info(int xiface, int index, struct cvmx_phy_info *phy_info)
1641{
1642 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1643
1644 if (!port_cfg_data_initialized)
1645 cvmx_init_port_cfg();
1646 cvmx_cfg_port[xi.node][xi.interface][index].phy_info = phy_info;
1647}
1648
1649/**
1650 * @INTERNAL
1651 * Returns the PHY information data structure for a port
1652 *
1653 * @param xiface node and interface
1654 * @param index port index
1655 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001656 * Return: pointer to PHY information data structure or NULL if not set
Aaron Williamsb9a60982020-12-11 17:05:58 +01001657 */
1658struct cvmx_phy_info *cvmx_helper_get_port_phy_info(int xiface, int index)
1659{
1660 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1661
1662 if (!port_cfg_data_initialized)
1663 cvmx_init_port_cfg();
1664 return cvmx_cfg_port[xi.node][xi.interface][index].phy_info;
1665}
1666
1667/**
1668 * @INTERNAL
1669 * Returns a pointer to the PHY LED configuration (if local GPIOs drive them)
1670 *
1671 * @param xiface node and interface
1672 * @param index portindex
1673 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001674 * Return: pointer to the PHY LED information data structure or NULL if not
Aaron Williamsb9a60982020-12-11 17:05:58 +01001675 * present
1676 */
1677struct cvmx_phy_gpio_leds *cvmx_helper_get_port_phy_leds(int xiface, int index)
1678{
1679 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1680
1681 if (!port_cfg_data_initialized)
1682 cvmx_init_port_cfg();
1683 return cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds;
1684}
1685
1686/**
1687 * @INTERNAL
1688 * Sets a pointer to the PHY LED configuration (if local GPIOs drive them)
1689 *
1690 * @param xiface node and interface
1691 * @param index portindex
1692 * @param leds pointer to led data structure
1693 */
1694void cvmx_helper_set_port_phy_leds(int xiface, int index, struct cvmx_phy_gpio_leds *leds)
1695{
1696 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1697
1698 if (!port_cfg_data_initialized)
1699 cvmx_init_port_cfg();
1700 cvmx_cfg_port[xi.node][xi.interface][index].gpio_leds = leds;
1701}
1702
1703/**
1704 * @INTERNAL
1705 * Disables RGMII TX clock bypass and sets delay value
1706 *
1707 * @param xiface node and interface
1708 * @param index portindex
1709 * @param bypass Set true to enable the clock bypass and false
1710 * to sync clock and data synchronously.
1711 * Default is false.
1712 * @param clk_delay Delay value to skew TXC from TXD
1713 */
1714void cvmx_helper_cfg_set_rgmii_tx_clk_delay(int xiface, int index, bool bypass, int clk_delay)
1715{
1716 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1717
1718 if (!port_cfg_data_initialized)
1719 cvmx_init_port_cfg();
1720 cvmx_cfg_port[xi.node][xi.interface][index].tx_clk_delay_bypass = bypass;
1721 cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay = clk_delay;
1722}
1723
1724/**
1725 * @INTERNAL
1726 * Gets RGMII TX clock bypass and delay value
1727 *
1728 * @param xiface node and interface
1729 * @param index portindex
1730 * @param bypass Set true to enable the clock bypass and false
1731 * to sync clock and data synchronously.
1732 * Default is false.
1733 * @param clk_delay Delay value to skew TXC from TXD, default is 0.
1734 */
1735void cvmx_helper_cfg_get_rgmii_tx_clk_delay(int xiface, int index, bool *bypass, int *clk_delay)
1736{
1737 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1738
1739 if (!port_cfg_data_initialized)
1740 cvmx_init_port_cfg();
1741 *bypass = cvmx_cfg_port[xi.node][xi.interface][index].tx_clk_delay_bypass;
1742
1743 *clk_delay = cvmx_cfg_port[xi.node][xi.interface][index].rgmii_tx_clk_delay;
1744}
1745
1746/**
1747 * @INTERNAL
1748 * Retrieve the SFP node offset in the device tree
1749 *
1750 * @param xiface node and interface
1751 * @param index port index
1752 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001753 * Return: offset in device tree or -1 if error or not defined.
Aaron Williamsb9a60982020-12-11 17:05:58 +01001754 */
1755int cvmx_helper_cfg_get_sfp_fdt_offset(int xiface, int index)
1756{
1757 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1758
1759 if (!port_cfg_data_initialized)
1760 cvmx_init_port_cfg();
1761 return cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset;
1762}
1763
1764/**
1765 * @INTERNAL
1766 * Sets the SFP node offset
1767 *
1768 * @param xiface node and interface
1769 * @param index port index
1770 * @param sfp_of_offset Offset of SFP node in device tree
1771 */
1772void cvmx_helper_cfg_set_sfp_fdt_offset(int xiface, int index, int sfp_of_offset)
1773{
1774 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1775
1776 if (!port_cfg_data_initialized)
1777 cvmx_init_port_cfg();
1778 cvmx_cfg_port[xi.node][xi.interface][index].sfp_of_offset = sfp_of_offset;
1779}
1780
1781/**
1782 * Get data structure defining the Microsemi VSC7224 channel info
1783 * or NULL if not present
1784 *
1785 * @param xiface node and interface
1786 * @param index port index
1787 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001788 * Return: pointer to vsc7224 data structure or NULL if not present
Aaron Williamsb9a60982020-12-11 17:05:58 +01001789 */
1790struct cvmx_vsc7224_chan *cvmx_helper_cfg_get_vsc7224_chan_info(int xiface, int index)
1791{
1792 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1793
1794 if (!port_cfg_data_initialized)
1795 cvmx_init_port_cfg();
1796 return cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan;
1797}
1798
1799/**
1800 * Sets the Microsemi VSC7224 channel info data structure
1801 *
1802 * @param xiface node and interface
1803 * @param index port index
1804 * @param[in] vsc7224_info Microsemi VSC7224 data structure
1805 */
1806void cvmx_helper_cfg_set_vsc7224_chan_info(int xiface, int index,
1807 struct cvmx_vsc7224_chan *vsc7224_chan_info)
1808{
1809 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1810
1811 if (!port_cfg_data_initialized)
1812 cvmx_init_port_cfg();
1813 cvmx_cfg_port[xi.node][xi.interface][index].vsc7224_chan = vsc7224_chan_info;
1814}
1815
1816/**
1817 * Get data structure defining the Avago AVSP5410 phy info
1818 * or NULL if not present
1819 *
1820 * @param xiface node and interface
1821 * @param index port index
1822 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001823 * Return: pointer to avsp5410 data structure or NULL if not present
Aaron Williamsb9a60982020-12-11 17:05:58 +01001824 */
1825struct cvmx_avsp5410 *cvmx_helper_cfg_get_avsp5410_info(int xiface, int index)
1826{
1827 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1828
1829 if (!port_cfg_data_initialized)
1830 cvmx_init_port_cfg();
1831 return cvmx_cfg_port[xi.node][xi.interface][index].avsp5410;
1832}
1833
1834/**
1835 * Sets the Avago AVSP5410 phy info data structure
1836 *
1837 * @param xiface node and interface
1838 * @param index port index
1839 * @param[in] avsp5410_info Avago AVSP5410 data structure
1840 */
1841void cvmx_helper_cfg_set_avsp5410_info(int xiface, int index, struct cvmx_avsp5410 *avsp5410_info)
1842{
1843 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1844
1845 if (!port_cfg_data_initialized)
1846 cvmx_init_port_cfg();
1847 cvmx_cfg_port[xi.node][xi.interface][index].avsp5410 = avsp5410_info;
1848}
1849
1850/**
1851 * Gets the SFP data associated with a port
1852 *
1853 * @param xiface node and interface
1854 * @param index port index
1855 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001856 * Return: pointer to SFP data structure or NULL if none
Aaron Williamsb9a60982020-12-11 17:05:58 +01001857 */
1858struct cvmx_fdt_sfp_info *cvmx_helper_cfg_get_sfp_info(int xiface, int index)
1859{
1860 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1861
1862 if (!port_cfg_data_initialized)
1863 cvmx_init_port_cfg();
1864 return cvmx_cfg_port[xi.node][xi.interface][index].sfp_info;
1865}
1866
1867/**
1868 * Sets the SFP data associated with a port
1869 *
1870 * @param xiface node and interface
1871 * @param index port index
1872 * @param[in] sfp_info port SFP data or NULL for none
1873 */
1874void cvmx_helper_cfg_set_sfp_info(int xiface, int index, struct cvmx_fdt_sfp_info *sfp_info)
1875{
1876 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1877
1878 if (!port_cfg_data_initialized)
1879 cvmx_init_port_cfg();
1880 cvmx_cfg_port[xi.node][xi.interface][index].sfp_info = sfp_info;
1881}
1882
1883/**
1884 * Returns a pointer to the phy device associated with a port
1885 *
1886 * @param xiface node and interface
1887 * @param index port index
1888 *
1889 * return pointer to phy device or NULL if none
1890 */
1891struct phy_device *cvmx_helper_cfg_get_phy_device(int xiface, int index)
1892{
1893 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1894
1895 if (!port_cfg_data_initialized)
1896 cvmx_init_port_cfg();
1897 return cvmx_cfg_port[xi.node][xi.interface][index].phydev;
1898}
1899
1900/**
1901 * Sets the phy device associated with a port
1902 *
1903 * @param xiface node and interface
1904 * @param index port index
1905 * @param[in] phydev phy device to assiciate
1906 */
1907void cvmx_helper_cfg_set_phy_device(int xiface, int index, struct phy_device *phydev)
1908{
1909 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1910
1911 if (!port_cfg_data_initialized)
1912 cvmx_init_port_cfg();
1913 cvmx_cfg_port[xi.node][xi.interface][index].phydev = phydev;
1914}