blob: 9e882f13332381d44706a60445da5394b838b992 [file] [log] [blame]
Aaron Williams3bb644d2022-04-07 09:11:15 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * Functions for ILK initialization, configuration,
6 * and monitoring.
7 */
8
9#include <time.h>
10#include <log.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/cvmx-csr.h>
15#include <mach/cvmx-bootmem.h>
16#include <mach/octeon-model.h>
17#include <mach/cvmx-fuse.h>
18#include <mach/octeon-feature.h>
19#include <mach/cvmx-qlm.h>
20#include <mach/octeon_qlm.h>
21#include <mach/cvmx-pcie.h>
22#include <mach/cvmx-coremask.h>
23
24#include <mach/cvmx-agl-defs.h>
25#include <mach/cvmx-bgxx-defs.h>
26#include <mach/cvmx-ciu-defs.h>
27#include <mach/cvmx-gmxx-defs.h>
28#include <mach/cvmx-ilk-defs.h>
29#include <mach/cvmx-ipd-defs.h>
30#include <mach/cvmx-pcsx-defs.h>
31#include <mach/cvmx-pcsxx-defs.h>
32#include <mach/cvmx-pki-defs.h>
33#include <mach/cvmx-pko-defs.h>
34#include <mach/cvmx-xcv-defs.h>
35
36#include <mach/cvmx-hwpko.h>
37#include <mach/cvmx-ilk.h>
38#include <mach/cvmx-pki.h>
39
40#include <mach/cvmx-helper.h>
41#include <mach/cvmx-helper-board.h>
42#include <mach/cvmx-helper-cfg.h>
43
44int __cvmx_helper_ilk_enumerate(int xiface)
45{
46 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
47
48 xi.interface -= CVMX_ILK_GBL_BASE();
49 return cvmx_ilk_chans[xi.node][xi.interface];
50}
51
52/**
53 * @INTERNAL
54 * Initialize all tx calendar entries to the xoff state.
55 * Initialize all rx calendar entries to the xon state. The rx calendar entries
56 * must be in the xon state to allow new pko pipe assignments. If a calendar
57 * entry is assigned a different pko pipe while in the xoff state, the old pko
58 * pipe will stay in the xoff state even when no longer used by ilk.
59 *
60 * @param intf Interface whose calendar are to be initialized.
61 */
62static void __cvmx_ilk_clear_cal_cn78xx(int intf)
63{
64 cvmx_ilk_txx_cal_entryx_t tx_entry;
65 cvmx_ilk_rxx_cal_entryx_t rx_entry;
66 int i;
67 int node = (intf >> 4) & 0xf;
68 int interface = (intf & 0xf);
69
70 /* Initialize all tx calendar entries to off */
71 tx_entry.u64 = 0;
72 tx_entry.s.ctl = XOFF;
73 for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
74 csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(i, interface),
75 tx_entry.u64);
76 }
77
78 /* Initialize all rx calendar entries to on */
79 rx_entry.u64 = 0;
80 rx_entry.s.ctl = XOFF;
81 for (i = 0; i < CVMX_ILK_MAX_CAL; i++) {
82 csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(i, interface),
83 rx_entry.u64);
84 }
85}
86
87/**
88 * @INTERNAL
89 * Initialize all tx calendar entries to the xoff state.
90 * Initialize all rx calendar entries to the xon state. The rx calendar entries
91 * must be in the xon state to allow new pko pipe assignments. If a calendar
92 * entry is assigned a different pko pipe while in the xoff state, the old pko
93 * pipe will stay in the xoff state even when no longer used by ilk.
94 *
95 * @param interface whose calendar are to be initialized.
96 */
97static void __cvmx_ilk_clear_cal_cn68xx(int interface)
98{
99 cvmx_ilk_txx_idx_cal_t tx_idx;
100 cvmx_ilk_txx_mem_cal0_t tx_cal0;
101 cvmx_ilk_txx_mem_cal1_t tx_cal1;
102 cvmx_ilk_rxx_idx_cal_t rx_idx;
103 cvmx_ilk_rxx_mem_cal0_t rx_cal0;
104 cvmx_ilk_rxx_mem_cal1_t rx_cal1;
105 int i;
106
107 /*
108 * First we initialize the tx calendar starting from entry 0,
109 * incrementing the entry with every write.
110 */
111 tx_idx.u64 = 0;
112 tx_idx.s.inc = 1;
113 csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
114
115 /* Set state to xoff for all entries */
116 tx_cal0.u64 = 0;
117 tx_cal0.s.entry_ctl0 = XOFF;
118 tx_cal0.s.entry_ctl1 = XOFF;
119 tx_cal0.s.entry_ctl2 = XOFF;
120 tx_cal0.s.entry_ctl3 = XOFF;
121
122 tx_cal1.u64 = 0;
123 tx_cal1.s.entry_ctl4 = XOFF;
124 tx_cal1.s.entry_ctl5 = XOFF;
125 tx_cal1.s.entry_ctl6 = XOFF;
126 tx_cal1.s.entry_ctl7 = XOFF;
127
128 /* Write all 288 entries */
129 for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
130 csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
131 csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
132 }
133
134 /*
135 * Next we initialize the rx calendar starting from entry 0,
136 * incrementing the entry with every write.
137 */
138 rx_idx.u64 = 0;
139 rx_idx.s.inc = 1;
140 csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
141
142 /* Set state to xon for all entries */
143 rx_cal0.u64 = 0;
144 rx_cal0.s.entry_ctl0 = XON;
145 rx_cal0.s.entry_ctl1 = XON;
146 rx_cal0.s.entry_ctl2 = XON;
147 rx_cal0.s.entry_ctl3 = XON;
148
149 rx_cal1.u64 = 0;
150 rx_cal1.s.entry_ctl4 = XON;
151 rx_cal1.s.entry_ctl5 = XON;
152 rx_cal1.s.entry_ctl6 = XON;
153 rx_cal1.s.entry_ctl7 = XON;
154
155 /* Write all 288 entries */
156 for (i = 0; i < CVMX_ILK_MAX_CAL_IDX; i++) {
157 csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
158 csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
159 }
160}
161
162/**
163 * @INTERNAL
164 * Initialize all calendar entries.
165 *
166 * @param interface whose calendar is to be initialized.
167 */
168void __cvmx_ilk_clear_cal(int interface)
169{
170 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
171 __cvmx_ilk_clear_cal_cn68xx(interface);
172 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
173 __cvmx_ilk_clear_cal_cn78xx(interface);
174}
175
176void __cvmx_ilk_write_tx_cal_entry_cn68xx(int interface, int channel,
177 unsigned char bpid)
178{
179 cvmx_ilk_txx_idx_cal_t tx_idx;
180 cvmx_ilk_txx_mem_cal0_t tx_cal0;
181 cvmx_ilk_txx_mem_cal1_t tx_cal1;
182 int entry;
183 int window;
184 int window_entry;
185
186 /*
187 * The calendar has 288 entries. Each calendar entry represents a
188 * channel's flow control state or the link flow control state.
189 * Starting with the first entry, every sixteenth entry is used for the
190 * link flow control state. The other 15 entries are used for the
191 * channels flow control state:
192 * entry 0 ----> link flow control state
193 * entry 1 ----> channel 0 flow control state
194 * entry 2 ----> channel 1 flow control state
195 * ...
196 * entry 15 ----> channel 14 flow control state
197 * entry 16 ----> link flow control state
198 * entry 17 ----> channel 15 flow control state
199 *
200 * Also, the calendar is accessed via windows into it. Each window maps
201 * to 8 entries.
202 */
203 entry = 1 + channel + (channel / 15);
204 window = entry / 8;
205 window_entry = entry % 8;
206
207 /* Indicate the window we need to access */
208 tx_idx.u64 = 0;
209 tx_idx.s.index = window;
210 csr_wr(CVMX_ILK_TXX_IDX_CAL(interface), tx_idx.u64);
211
212 /* Get the window's current value */
213 tx_cal0.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL0(interface));
214 tx_cal1.u64 = csr_rd(CVMX_ILK_TXX_MEM_CAL1(interface));
215
216 /* Force every sixteenth entry as link flow control state */
217 if ((window & 1) == 0)
218 tx_cal0.s.entry_ctl0 = LINK;
219
220 /* Update the entry */
221 switch (window_entry) {
222 case 0:
223 tx_cal0.s.entry_ctl0 = 0;
224 tx_cal0.s.bpid0 = bpid;
225 break;
226 case 1:
227 tx_cal0.s.entry_ctl1 = 0;
228 tx_cal0.s.bpid1 = bpid;
229 break;
230 case 2:
231 tx_cal0.s.entry_ctl2 = 0;
232 tx_cal0.s.bpid2 = bpid;
233 break;
234 case 3:
235 tx_cal0.s.entry_ctl3 = 0;
236 tx_cal0.s.bpid3 = bpid;
237 break;
238 case 4:
239 tx_cal1.s.entry_ctl4 = 0;
240 tx_cal1.s.bpid4 = bpid;
241 break;
242 case 5:
243 tx_cal1.s.entry_ctl5 = 0;
244 tx_cal1.s.bpid5 = bpid;
245 break;
246 case 6:
247 tx_cal1.s.entry_ctl6 = 0;
248 tx_cal1.s.bpid6 = bpid;
249 break;
250 case 7:
251 tx_cal1.s.entry_ctl7 = 0;
252 tx_cal1.s.bpid7 = bpid;
253 break;
254 }
255
256 /* Write the window new value */
257 csr_wr(CVMX_ILK_TXX_MEM_CAL0(interface), tx_cal0.u64);
258 csr_wr(CVMX_ILK_TXX_MEM_CAL1(interface), tx_cal1.u64);
259}
260
261void __cvmx_ilk_write_tx_cal_entry_cn78xx(int intf, int channel,
262 unsigned char bpid)
263{
264 cvmx_ilk_txx_cal_entryx_t tx_cal;
265 int calender_16_block = channel / 15;
266 int calender_16_index = channel % 15 + 1;
267 int index = calender_16_block * 16 + calender_16_index;
268 int node = (intf >> 4) & 0xf;
269 int interface = intf & 0xf;
270
271 /* Program the link status on first channel */
272 if (calender_16_index == 1) {
273 tx_cal.u64 = 0;
274 tx_cal.s.ctl = 1;
275 csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index - 1, interface),
276 tx_cal.u64);
277 }
278 tx_cal.u64 = 0;
279 tx_cal.s.ctl = 0;
280 tx_cal.s.channel = channel;
281 csr_wr_node(node, CVMX_ILK_TXX_CAL_ENTRYX(index, interface),
282 tx_cal.u64);
283}
284
285/**
286 * @INTERNAL
287 * Setup the channel's tx calendar entry.
288 *
289 * @param interface channel belongs to
290 * @param channel whose calendar entry is to be updated
291 * @param bpid assigned to the channel
292 */
293void __cvmx_ilk_write_tx_cal_entry(int interface, int channel,
294 unsigned char bpid)
295{
296 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
297 __cvmx_ilk_write_tx_cal_entry_cn68xx(interface, channel, bpid);
298 else
299 __cvmx_ilk_write_tx_cal_entry_cn78xx(interface, channel, bpid);
300}
301
302void __cvmx_ilk_write_rx_cal_entry_cn78xx(int intf, int channel,
303 unsigned char bpid)
304{
305 cvmx_ilk_rxx_cal_entryx_t rx_cal;
306 int calender_16_block = channel / 15;
307 int calender_16_index = channel % 15 + 1;
308 int index = calender_16_block * 16 + calender_16_index;
309 int node = (intf >> 4) & 0xf;
310 int interface = intf & 0xf;
311
312 /* Program the link status on first channel */
313 if (calender_16_index == 1) {
314 rx_cal.u64 = 0;
315 rx_cal.s.ctl = 1;
316 csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index - 1, interface),
317 rx_cal.u64);
318 }
319 rx_cal.u64 = 0;
320 rx_cal.s.ctl = 0;
321 rx_cal.s.channel = channel;
322 csr_wr_node(node, CVMX_ILK_RXX_CAL_ENTRYX(index, interface),
323 rx_cal.u64);
324}
325
326void __cvmx_ilk_write_rx_cal_entry_cn68xx(int interface, int channel,
327 unsigned char pipe)
328{
329 cvmx_ilk_rxx_idx_cal_t rx_idx;
330 cvmx_ilk_rxx_mem_cal0_t rx_cal0;
331 cvmx_ilk_rxx_mem_cal1_t rx_cal1;
332 int entry;
333 int window;
334 int window_entry;
335
336 /*
337 * The calendar has 288 entries. Each calendar entry represents a
338 * channel's flow control state or the link flow control state.
339 * Starting with the first entry, every sixteenth entry is used for the
340 * link flow control state. The other 15 entries are used for the
341 * channels flow control state:
342 * entry 0 ----> link flow control state
343 * entry 1 ----> channel 0 flow control state
344 * entry 2 ----> channel 1 flow control state
345 * ...
346 * entry 15 ----> channel 14 flow control state
347 * entry 16 ----> link flow control state
348 * entry 17 ----> channel 15 flow control state
349 *
350 * Also, the calendar is accessed via windows into it. Each window maps
351 * to 8 entries.
352 */
353 entry = 1 + channel + (channel / 15);
354 window = entry / 8;
355 window_entry = entry % 8;
356
357 /* Indicate the window we need to access */
358 rx_idx.u64 = 0;
359 rx_idx.s.index = window;
360 csr_wr(CVMX_ILK_RXX_IDX_CAL(interface), rx_idx.u64);
361
362 /* Get the window's current value */
363 rx_cal0.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL0(interface));
364 rx_cal1.u64 = csr_rd(CVMX_ILK_RXX_MEM_CAL1(interface));
365
366 /* Force every sixteenth entry as link flow control state */
367 if ((window & 1) == 0)
368 rx_cal0.s.entry_ctl0 = LINK;
369
370 /* Update the entry */
371 switch (window_entry) {
372 case 0:
373 rx_cal0.s.entry_ctl0 = 0;
374 rx_cal0.s.port_pipe0 = pipe;
375 break;
376 case 1:
377 rx_cal0.s.entry_ctl1 = 0;
378 rx_cal0.s.port_pipe1 = pipe;
379 break;
380 case 2:
381 rx_cal0.s.entry_ctl2 = 0;
382 rx_cal0.s.port_pipe2 = pipe;
383 break;
384 case 3:
385 rx_cal0.s.entry_ctl3 = 0;
386 rx_cal0.s.port_pipe3 = pipe;
387 break;
388 case 4:
389 rx_cal1.s.entry_ctl4 = 0;
390 rx_cal1.s.port_pipe4 = pipe;
391 break;
392 case 5:
393 rx_cal1.s.entry_ctl5 = 0;
394 rx_cal1.s.port_pipe5 = pipe;
395 break;
396 case 6:
397 rx_cal1.s.entry_ctl6 = 0;
398 rx_cal1.s.port_pipe6 = pipe;
399 break;
400 case 7:
401 rx_cal1.s.entry_ctl7 = 0;
402 rx_cal1.s.port_pipe7 = pipe;
403 break;
404 }
405
406 /* Write the window new value */
407 csr_wr(CVMX_ILK_RXX_MEM_CAL0(interface), rx_cal0.u64);
408 csr_wr(CVMX_ILK_RXX_MEM_CAL1(interface), rx_cal1.u64);
409}
410
411/**
412 * @INTERNAL
413 * Setup the channel's rx calendar entry.
414 *
415 * @param interface channel belongs to
416 * @param channel whose calendar entry is to be updated
417 * @param pipe PKO assigned to the channel
418 */
419void __cvmx_ilk_write_rx_cal_entry(int interface, int channel,
420 unsigned char pipe)
421{
422 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
423 __cvmx_ilk_write_rx_cal_entry_cn68xx(interface, channel, pipe);
424 else
425 __cvmx_ilk_write_rx_cal_entry_cn78xx(interface, channel, pipe);
426}
427
428/**
429 * @INTERNAL
430 * Probe a ILK interface and determine the number of ports
431 * connected to it. The ILK interface should still be down
432 * after this call.
433 *
434 * @param xiface Interface to probe
435 *
436 * @return Number of ports on the interface. Zero to disable.
437 */
438int __cvmx_helper_ilk_probe(int xiface)
439{
440 int res = 0;
441 int interface;
442 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
443
444 if (!octeon_has_feature(OCTEON_FEATURE_ILK))
445 return res;
446
447 interface = xi.interface - CVMX_ILK_GBL_BASE();
448 if (interface >= CVMX_NUM_ILK_INTF)
449 return 0;
450
451 /* the configuration should be done only once */
452 if (cvmx_ilk_get_intf_ena(xiface))
453 return cvmx_ilk_chans[xi.node][interface];
454
455 /* configure lanes and enable the link */
456 res = cvmx_ilk_start_interface(((xi.node << 4) | interface),
457 cvmx_ilk_lane_mask[xi.node][interface]);
458 if (res < 0)
459 return 0;
460
461 res = __cvmx_helper_ilk_enumerate(xiface);
462
463 return res;
464}
465
466static int __cvmx_helper_ilk_init_port_cn68xx(int xiface)
467{
468 int i, j, res = -1;
469 static int pipe_base = 0, pknd_base;
470 static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
471 static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
472 static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
473 int enable_rx_cal = 1;
474 int interface;
475 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
476 int intf;
477 int num_chans;
478
479 interface = xi.interface - CVMX_ILK_GBL_BASE();
480 intf = (xi.node << 4) | interface;
481 if (interface >= CVMX_NUM_ILK_INTF)
482 return 0;
483
484 num_chans = cvmx_ilk_chans[0][interface];
485
486 /* set up channel to pkind mapping */
487 if (pknd_base == 0)
488 pknd_base = cvmx_helper_get_pknd(xiface, 0);
489
490 /* set up the group of pipes available to ilk */
491 if (pipe_base == 0)
492 pipe_base =
493 __cvmx_pko_get_pipe(interface + CVMX_ILK_GBL_BASE(), 0);
494
495 if (pipe_base == -1) {
496 pipe_base = 0;
497 return 0;
498 }
499
500 res = cvmx_ilk_set_pipe(xiface, pipe_base,
501 cvmx_ilk_chans[0][interface]);
502 if (res < 0)
503 return 0;
504
505 /* set up pipe to channel mapping */
506 i = pipe_base;
507 if (!pch) {
508 pch = (cvmx_ilk_pipe_chan_t *)cvmx_bootmem_alloc(
509 num_chans * sizeof(cvmx_ilk_pipe_chan_t),
510 sizeof(cvmx_ilk_pipe_chan_t));
511 if (!pch)
512 return 0;
513 }
514
515 memset(pch, 0, num_chans * sizeof(cvmx_ilk_pipe_chan_t));
516 tmp = pch;
517 for (j = 0; j < num_chans; j++) {
518 tmp->pipe = i++;
519 tmp->chan = j;
520 tmp++;
521 }
522 res = cvmx_ilk_tx_set_channel(interface, pch,
523 cvmx_ilk_chans[0][interface]);
524 if (res < 0) {
525 res = 0;
526 goto err_free_pch;
527 }
528 pipe_base += cvmx_ilk_chans[0][interface];
529 i = pknd_base;
530 if (!chpknd) {
531 chpknd = (cvmx_ilk_chan_pknd_t *)cvmx_bootmem_alloc(
532 CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
533 sizeof(cvmx_ilk_chan_pknd_t));
534 if (!chpknd) {
535 pipe_base -= cvmx_ilk_chans[xi.node][interface];
536 res = 0;
537 goto err_free_pch;
538 }
539 }
540
541 memset(chpknd, 0, CVMX_ILK_MAX_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
542 tmp1 = chpknd;
543 for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
544 tmp1->chan = j;
545 tmp1->pknd = i++;
546 tmp1++;
547 }
548
549 res = cvmx_ilk_rx_set_pknd(xiface, chpknd,
550 cvmx_ilk_chans[xi.node][interface]);
551 if (res < 0) {
552 pipe_base -= cvmx_ilk_chans[xi.node][interface];
553 res = 0;
554 goto err_free_chpknd;
555 }
556 pknd_base += cvmx_ilk_chans[xi.node][interface];
557
558 /* Set up tx calendar */
559 if (!calent) {
560 calent = (cvmx_ilk_cal_entry_t *)cvmx_bootmem_alloc(
561 CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t),
562 sizeof(cvmx_ilk_cal_entry_t));
563 if (!calent) {
564 pipe_base -= cvmx_ilk_chans[xi.node][interface];
565 pknd_base -= cvmx_ilk_chans[xi.node][interface];
566 res = 0;
567 goto err_free_chpknd;
568 }
569 }
570
571 memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
572 tmp1 = chpknd;
573 tmp2 = calent;
574 for (j = 0; j < cvmx_ilk_chans[xi.node][interface]; j++) {
575 tmp2->pipe_bpid = tmp1->pknd;
576 tmp2->ent_ctrl = PIPE_BPID;
577 tmp1++;
578 tmp2++;
579 }
580 res = cvmx_ilk_cal_setup_tx(intf, cvmx_ilk_chans[xi.node][interface],
581 calent, 1);
582 if (res < 0) {
583 pipe_base -= cvmx_ilk_chans[xi.node][interface];
584 pknd_base -= cvmx_ilk_chans[xi.node][interface];
585 res = 0;
586 goto err_free_calent;
587 }
588
589 /* set up rx calendar. allocated memory can be reused.
590 * this is because max pkind is always less than max pipe
591 */
592 memset(calent, 0, CVMX_ILK_MAX_PIPES * sizeof(cvmx_ilk_cal_entry_t));
593 tmp = pch;
594 tmp2 = calent;
595 for (j = 0; j < cvmx_ilk_chans[0][interface]; j++) {
596 tmp2->pipe_bpid = tmp->pipe;
597 tmp2->ent_ctrl = PIPE_BPID;
598 tmp++;
599 tmp2++;
600 }
601 if (cvmx_ilk_use_la_mode(interface, 0))
602 enable_rx_cal = cvmx_ilk_la_mode_enable_rx_calendar(interface);
603 else
604 enable_rx_cal = 1;
605
606 res = cvmx_ilk_cal_setup_rx(intf, cvmx_ilk_chans[xi.node][interface],
607 calent, CVMX_ILK_RX_FIFO_WM, enable_rx_cal);
608 if (res < 0) {
609 pipe_base -= cvmx_ilk_chans[xi.node][interface];
610 pknd_base -= cvmx_ilk_chans[xi.node][interface];
611 res = 0;
612 goto err_free_calent;
613 }
614 goto out;
615
616err_free_calent:
617 /* no free() for cvmx_bootmem_alloc() */
618
619err_free_chpknd:
620 /* no free() for cvmx_bootmem_alloc() */
621
622err_free_pch:
623 /* no free() for cvmx_bootmem_alloc() */
624out:
625 return res;
626}
627
628static int __cvmx_helper_ilk_init_port_cn78xx(int xiface)
629{
630 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
631 int interface;
632 int intf;
633
634 interface = xi.interface - CVMX_ILK_GBL_BASE();
635 intf = (xi.node << 4) | interface;
636 if (interface >= CVMX_NUM_ILK_INTF)
637 return 0;
638
639 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
640 struct cvmx_pki_style_config style_cfg;
641 int num_channels = cvmx_ilk_chans[xi.node][interface];
642 int index, i;
643
644 for (i = 0; i < num_channels; i++) {
645 int pknd;
646
647 index = (i % 8);
648
649 /* Set jabber to allow max sized packets */
650 if (i == 0)
651 csr_wr_node(xi.node,
652 CVMX_ILK_RXX_JABBER(interface),
653 0xfff8);
654
655 /* Setup PKND */
656 pknd = cvmx_helper_get_pknd(xiface, index);
657 csr_wr_node(xi.node, CVMX_ILK_RXX_CHAX(i, interface),
658 pknd);
659 cvmx_pki_read_style_config(
660 0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
661 style_cfg.parm_cfg.qpg_port_sh = 0;
662 /* 256 channels */
663 style_cfg.parm_cfg.qpg_port_msb = 8;
664 cvmx_pki_write_style_config(
665 0, pknd, CVMX_PKI_CLUSTER_ALL, &style_cfg);
666 }
667
668 cvmx_ilk_cal_setup_tx(intf, num_channels, NULL, 1);
669 cvmx_ilk_cal_setup_rx(intf, num_channels, NULL,
670 CVMX_ILK_RX_FIFO_WM, 1);
671 }
672 return 0;
673}
674
675static int __cvmx_helper_ilk_init_port(int xiface)
676{
677 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
678 return __cvmx_helper_ilk_init_port_cn68xx(xiface);
679 else
680 return __cvmx_helper_ilk_init_port_cn78xx(xiface);
681}
682
683/**
684 * @INTERNAL
685 * Bringup and enable ILK interface. After this call packet
686 * I/O should be fully functional. This is called with IPD
687 * enabled but PKO disabled.
688 *
689 * @param xiface Interface to bring up
690 *
691 * @return Zero on success, negative on failure
692 */
693int __cvmx_helper_ilk_enable(int xiface)
694{
695 if (__cvmx_helper_ilk_init_port(xiface) < 0)
696 return -1;
697
698 return cvmx_ilk_enable(xiface);
699}
700
701/**
702 * @INTERNAL
703 * Return the link state of an IPD/PKO port as returned by ILK link status.
704 *
705 * @param ipd_port IPD/PKO port to query
706 *
707 * @return Link state
708 */
709cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
710{
711 cvmx_helper_link_info_t result;
712 int xiface = cvmx_helper_get_interface_num(ipd_port);
713 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
714 int interface;
715 int retry_count = 0;
716 cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
717 cvmx_ilk_rxx_int_t ilk_rxx_int;
718 int lane_mask = 0;
719 int i;
720 int node = xi.node;
721
722 result.u64 = 0;
723 interface = xi.interface - CVMX_ILK_GBL_BASE();
724
725retry:
726 retry_count++;
727 if (retry_count > 200)
728 goto fail;
729
730 /* Read RX config and status bits */
731 ilk_rxx_cfg1.u64 = csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
732 ilk_rxx_int.u64 = csr_rd_node(node, CVMX_ILK_RXX_INT(interface));
733
734 if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0) {
735 /* (GSER-21957) GSER RX Equalization may make >= 5gbaud non-KR
736 * channel better
737 */
738 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
739 int qlm, lane_mask;
740
741 for (qlm = 4; qlm < 8; qlm++) {
742 lane_mask = 1 << (qlm - 4) * 4;
743 if (lane_mask &
744 cvmx_ilk_lane_mask[node][interface]) {
745 if (__cvmx_qlm_rx_equalization(
746 node, qlm, -1))
747 goto retry;
748 }
749 }
750 }
751
752 /* Clear the boundary lock status bit */
753 ilk_rxx_int.u64 = 0;
754 ilk_rxx_int.s.word_sync_done = 1;
755 csr_wr_node(node, CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
756
757 /* We need to start looking for word boundary lock */
758 ilk_rxx_cfg1.s.rx_bdry_lock_ena =
759 cvmx_ilk_lane_mask[node][interface];
760 ilk_rxx_cfg1.s.rx_align_ena = 0;
761 csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
762 ilk_rxx_cfg1.u64);
763 //debug("ILK%d: Looking for word boundary lock\n", interface);
764 udelay(50);
765 goto retry;
766 }
767
768 if (ilk_rxx_cfg1.s.rx_align_ena == 0) {
769 if (ilk_rxx_int.s.word_sync_done) {
770 /* Clear the lane align status bits */
771 ilk_rxx_int.u64 = 0;
772 ilk_rxx_int.s.lane_align_fail = 1;
773 ilk_rxx_int.s.lane_align_done = 1;
774 csr_wr_node(node, CVMX_ILK_RXX_INT(interface),
775 ilk_rxx_int.u64);
776
777 ilk_rxx_cfg1.s.rx_align_ena = 1;
778 csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
779 ilk_rxx_cfg1.u64);
780 //printf("ILK%d: Looking for lane alignment\n", interface);
781 }
782 udelay(50);
783 goto retry;
784 }
785
786 if (ilk_rxx_int.s.lane_align_fail) {
787 ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
788 ilk_rxx_cfg1.s.rx_align_ena = 0;
789 csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
790 ilk_rxx_cfg1.u64);
791 //debug("ILK%d: Lane alignment failed\n", interface);
792 goto fail;
793 }
794
795 lane_mask = ilk_rxx_cfg1.s.rx_bdry_lock_ena;
796
797 if (ilk_rxx_cfg1.s.pkt_ena == 0 && ilk_rxx_int.s.lane_align_done) {
798 cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
799
800 ilk_txx_cfg1.u64 =
801 csr_rd_node(node, CVMX_ILK_TXX_CFG1(interface));
802 ilk_rxx_cfg1.u64 =
803 csr_rd_node(node, CVMX_ILK_RXX_CFG1(interface));
804 ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
805 csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
806 ilk_rxx_cfg1.u64);
807
808 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
809 /*
810 * Enable rxf_ctl_perr, rxf_lnk0_perr, rxf_lnk1_perr,
811 * pop_empty, push_full.
812 */
813 csr_wr(CVMX_ILK_GBL_INT_EN, 0x1f);
814 /* Enable bad_pipe, bad_seq, txf_err */
815 csr_wr(CVMX_ILK_TXX_INT_EN(interface), 0x7);
816
817 /*
818 * Enable crc24_err, lane_bad_word,
819 * pkt_drop_{rid,rxf,sop}
820 */
821 csr_wr(CVMX_ILK_RXX_INT_EN(interface), 0x1e2);
822 }
823 /* Need to enable ILK interrupts for 78xx */
824
825 for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
826 if ((1 << i) & lane_mask) {
827 /* clear pending interrupts, before enabling. */
828 csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
829 0x1ff);
830 /* Enable bad_64b67b, bdry_sync_loss, crc32_err,
831 * dskew_fifo_ovfl, scrm_sync_loss,
832 * serdes_lock_loss, stat_msg, ukwn_cntl_word
833 */
834 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
835 csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
836 0x1ff);
837 }
838 }
839
840 //debug("ILK%d: Lane alignment complete\n", interface);
841 }
842
843 /* Enable error interrupts, now link is up */
844 cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK,
845 node | (interface << 2) | (lane_mask << 4));
846
847 result.s.link_up = 1;
848 result.s.full_duplex = 1;
849 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
850 int qlm = cvmx_qlm_lmac(xiface, 0);
851
852 result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 64 / 67;
853 } else {
854 result.s.speed =
855 cvmx_qlm_get_gbaud_mhz(1 + interface) * 64 / 67;
856 }
857 result.s.speed *= cvmx_pop(lane_mask);
858
859 return result;
860
861fail:
862 if (ilk_rxx_cfg1.s.pkt_ena) {
863 /* Disable the interface */
864 ilk_rxx_cfg1.s.pkt_ena = 0;
865 csr_wr_node(node, CVMX_ILK_RXX_CFG1(interface),
866 ilk_rxx_cfg1.u64);
867
868 /* Disable error interrupts */
869 for (i = 0; i < CVMX_ILK_MAX_LANES(); i++) {
870 /* Disable bad_64b67b, bdry_sync_loss, crc32_err,
871 * dskew_fifo_ovfl, scrm_sync_loss, serdes_lock_loss,
872 * stat_msg, ukwn_cntl_word
873 */
874 if ((1 << i) & lane_mask) {
875 csr_wr_node(node, CVMX_ILK_RX_LNEX_INT(i),
876 0x1ff);
877 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
878 csr_wr(CVMX_ILK_RX_LNEX_INT_EN(i),
879 ~0x1ff);
880 }
881 }
882 /* Disable error interrupts */
883 cvmx_error_enable_group(CVMX_ERROR_GROUP_ILK, 0);
884 }
885
886 return result;
887}
888
889/**
890 * @INTERNAL
891 * Set the link state of an IPD/PKO port.
892 *
893 * @param ipd_port IPD/PKO port to configure
894 * @param link_info The new link state
895 *
896 * @return Zero on success, negative on failure
897 */
898int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
899{
900 /* Do nothing */
901 return 0;
902}