blob: 1be7434e78cce6114307e9069f5e393a07d8ac0e [file] [log] [blame]
Aaron Williams5aeac5c2020-12-11 17:06:06 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Marvell International Ltd.
4 */
5
6#include <dm.h>
7#include <time.h>
8#include <linux/delay.h>
9
10#include <mach/cvmx-regs.h>
11#include <mach/octeon-model.h>
12#include <mach/cvmx-fuse.h>
13#include <mach/cvmx-qlm.h>
14#include <mach/octeon_qlm.h>
15#include <mach/cvmx-pcie.h>
16
17#include <mach/cvmx-bgxx-defs.h>
18#include <mach/cvmx-ciu-defs.h>
19#include <mach/cvmx-gmxx-defs.h>
20#include <mach/cvmx-gserx-defs.h>
21#include <mach/cvmx-mio-defs.h>
22#include <mach/cvmx-pciercx-defs.h>
23#include <mach/cvmx-pemx-defs.h>
24#include <mach/cvmx-pexp-defs.h>
25#include <mach/cvmx-rst-defs.h>
26#include <mach/cvmx-sata-defs.h>
27#include <mach/cvmx-sli-defs.h>
28#include <mach/cvmx-sriomaintx-defs.h>
29#include <mach/cvmx-sriox-defs.h>
30
31DECLARE_GLOBAL_DATA_PTR;
32
33/** 2.5GHz with 100MHz reference clock */
34#define R_2_5G_REFCLK100 0x0
35/** 5.0GHz with 100MHz reference clock */
36#define R_5G_REFCLK100 0x1
37/** 8.0GHz with 100MHz reference clock */
38#define R_8G_REFCLK100 0x2
39/** 1.25GHz with 156.25MHz reference clock */
40#define R_125G_REFCLK15625_KX 0x3
41/** 3.125Ghz with 156.25MHz reference clock (XAUI) */
42#define R_3125G_REFCLK15625_XAUI 0x4
43/** 10.3125GHz with 156.25MHz reference clock (XFI/XLAUI) */
44#define R_103125G_REFCLK15625_KR 0x5
45/** 1.25GHz with 156.25MHz reference clock (SGMII) */
46#define R_125G_REFCLK15625_SGMII 0x6
47/** 5GHz with 156.25MHz reference clock (QSGMII) */
48#define R_5G_REFCLK15625_QSGMII 0x7
49/** 6.25GHz with 156.25MHz reference clock (RXAUI/25G) */
50#define R_625G_REFCLK15625_RXAUI 0x8
51/** 2.5GHz with 125MHz reference clock */
52#define R_2_5G_REFCLK125 0x9
53/** 5GHz with 125MHz reference clock */
54#define R_5G_REFCLK125 0xa
55/** 8GHz with 125MHz reference clock */
56#define R_8G_REFCLK125 0xb
57/** Must be last, number of modes */
58#define R_NUM_LANE_MODES 0xc
59
60int cvmx_qlm_is_ref_clock(int qlm, int reference_mhz)
61{
62 int ref_clock = cvmx_qlm_measure_clock(qlm);
63 int mhz = ref_clock / 1000000;
64 int range = reference_mhz / 10;
65
66 return ((mhz >= reference_mhz - range) && (mhz <= reference_mhz + range));
67}
68
69static int __get_qlm_spd(int qlm, int speed)
70{
71 int qlm_spd = 0xf;
72
73 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
74 if (speed == 1250)
75 qlm_spd = 0x3;
76 else if (speed == 2500)
77 qlm_spd = 0x2;
78 else if (speed == 5000)
79 qlm_spd = 0x0;
80 else
81 qlm_spd = 0xf;
82 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
83 if (speed == 1250)
84 qlm_spd = 0xa;
85 else if (speed == 2500)
86 qlm_spd = 0x9;
87 else if (speed == 3125)
88 qlm_spd = 0x8;
89 else if (speed == 5000)
90 qlm_spd = 0x6;
91 else if (speed == 6250)
92 qlm_spd = 0x5;
93 else
94 qlm_spd = 0xf;
95 } else if (cvmx_qlm_is_ref_clock(qlm, 156)) {
96 if (speed == 1250)
97 qlm_spd = 0x4;
98 else if (speed == 2500)
99 qlm_spd = 0x7;
100 else if (speed == 3125)
101 qlm_spd = 0xe;
102 else if (speed == 3750)
103 qlm_spd = 0xd;
104 else if (speed == 5000)
105 qlm_spd = 0xb;
106 else if (speed == 6250)
107 qlm_spd = 0xc;
108 else
109 qlm_spd = 0xf;
110 } else if (cvmx_qlm_is_ref_clock(qlm, 161)) {
111 if (speed == 6316)
112 qlm_spd = 0xc;
113 }
114 return qlm_spd;
115}
116
117static void __set_qlm_pcie_mode_61xx(int pcie_port, int root_complex)
118{
119 int rc = root_complex ? 1 : 0;
120 int ep = root_complex ? 0 : 1;
121 cvmx_ciu_soft_prst1_t soft_prst1;
122 cvmx_ciu_soft_prst_t soft_prst;
123 cvmx_mio_rst_ctlx_t rst_ctl;
124
125 if (pcie_port) {
126 soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
127 soft_prst1.s.soft_prst = 1;
128 csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
129 } else {
130 soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
131 soft_prst.s.soft_prst = 1;
132 csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
133 }
134
135 rst_ctl.u64 = csr_rd(CVMX_MIO_RST_CTLX(pcie_port));
136
137 rst_ctl.s.prst_link = rc;
138 rst_ctl.s.rst_link = ep;
139 rst_ctl.s.prtmode = rc;
140 rst_ctl.s.rst_drv = rc;
141 rst_ctl.s.rst_rcv = 0;
142 rst_ctl.s.rst_chip = ep;
143 csr_wr(CVMX_MIO_RST_CTLX(pcie_port), rst_ctl.u64);
144
145 if (root_complex == 0) {
146 if (pcie_port) {
147 soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
148 soft_prst1.s.soft_prst = 0;
149 csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
150 } else {
151 soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
152 soft_prst.s.soft_prst = 0;
153 csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
154 }
155 }
156}
157
158/**
159 * Configure qlm speed and mode. MIO_QLMX_CFG[speed,mode] are not set
160 * for CN61XX.
161 *
162 * @param qlm The QLM to configure
163 * @param speed The speed the QLM needs to be configured in Mhz.
164 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
165 * QLM 0: 0 = PCIe0 1X4, 1 = Reserved, 2 = SGMII1, 3 = XAUI1
166 * QLM 1: 0 = PCIe1 1x2, 1 = PCIe(0/1) 2x1, 2 - 3 = Reserved
167 * QLM 2: 0 - 1 = Reserved, 2 = SGMII0, 3 = XAUI0
168 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP
169 * mode.
170 * @param pcie2x1 Only used when QLM1 is in PCIE2x1 mode. The QLM_SPD has a
171 * different value on how PEMx needs to be configured:
172 * 0x0 - both PEM0 & PEM1 are in gen1 mode.
173 * 0x1 - PEM0 in gen2 and PEM1 in gen1 mode.
174 * 0x2 - PEM0 in gen1 and PEM1 in gen2 mode.
175 * 0x3 - both PEM0 & PEM1 are in gen2 mode.
176 * SPEED value is ignored in this mode. QLM_SPD is set based on
177 * pcie2x1 value in this mode.
178 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +0100179 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +0100180 */
181static int octeon_configure_qlm_cn61xx(int qlm, int speed, int mode, int rc, int pcie2x1)
182{
183 cvmx_mio_qlmx_cfg_t qlm_cfg;
184
185 /* The QLM speed varies for SGMII/XAUI and PCIe mode. And depends on
186 * reference clock.
187 */
188 if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
189 return -1;
190
191 if (qlm < 3) {
192 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
193 } else {
194 debug("WARNING: Invalid QLM(%d) passed\n", qlm);
195 return -1;
196 }
197
198 switch (qlm) {
199 /* SGMII/XAUI mode */
200 case 2: {
201 if (mode < 2) {
202 qlm_cfg.s.qlm_spd = 0xf;
203 break;
204 }
205 qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
206 qlm_cfg.s.qlm_cfg = mode;
207 break;
208 }
209 case 1: {
210 if (mode == 1) { /* 2x1 mode */
211 cvmx_mio_qlmx_cfg_t qlm0;
212
213 /* When QLM0 is configured as PCIe(QLM_CFG=0x0)
214 * and enabled (QLM_SPD != 0xf), QLM1 cannot be
215 * configured as PCIe 2x1 mode (QLM_CFG=0x1)
216 * and enabled (QLM_SPD != 0xf).
217 */
218 qlm0.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
219 if (qlm0.s.qlm_spd != 0xf && qlm0.s.qlm_cfg == 0) {
220 debug("Invalid mode(%d) for QLM(%d) as QLM1 is PCIe mode\n",
221 mode, qlm);
222 qlm_cfg.s.qlm_spd = 0xf;
223 break;
224 }
225
226 /* Set QLM_SPD based on reference clock and mode */
227 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
228 if (pcie2x1 == 0x3)
229 qlm_cfg.s.qlm_spd = 0x0;
230 else if (pcie2x1 == 0x1)
231 qlm_cfg.s.qlm_spd = 0x2;
232 else if (pcie2x1 == 0x2)
233 qlm_cfg.s.qlm_spd = 0x1;
234 else if (pcie2x1 == 0x0)
235 qlm_cfg.s.qlm_spd = 0x3;
236 else
237 qlm_cfg.s.qlm_spd = 0xf;
238 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
239 if (pcie2x1 == 0x3)
240 qlm_cfg.s.qlm_spd = 0x4;
241 else if (pcie2x1 == 0x1)
242 qlm_cfg.s.qlm_spd = 0x6;
243 else if (pcie2x1 == 0x2)
244 qlm_cfg.s.qlm_spd = 0x9;
245 else if (pcie2x1 == 0x0)
246 qlm_cfg.s.qlm_spd = 0x7;
247 else
248 qlm_cfg.s.qlm_spd = 0xf;
249 }
250 qlm_cfg.s.qlm_cfg = mode;
251 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
252
253 /* Set PCIe mode bits */
254 __set_qlm_pcie_mode_61xx(0, rc);
255 __set_qlm_pcie_mode_61xx(1, rc);
256 return 0;
257 } else if (mode > 1) {
258 debug("Invalid mode(%d) for QLM(%d).\n", mode, qlm);
259 qlm_cfg.s.qlm_spd = 0xf;
260 break;
261 }
262
263 /* Set speed and mode for PCIe 1x2 mode. */
264 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
265 if (speed == 5000)
266 qlm_cfg.s.qlm_spd = 0x1;
267 else if (speed == 2500)
268 qlm_cfg.s.qlm_spd = 0x2;
269 else
270 qlm_cfg.s.qlm_spd = 0xf;
271 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
272 if (speed == 5000)
273 qlm_cfg.s.qlm_spd = 0x4;
274 else if (speed == 2500)
275 qlm_cfg.s.qlm_spd = 0x6;
276 else
277 qlm_cfg.s.qlm_spd = 0xf;
278 } else {
279 qlm_cfg.s.qlm_spd = 0xf;
280 }
281
282 qlm_cfg.s.qlm_cfg = mode;
283 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
284
285 /* Set PCIe mode bits */
286 __set_qlm_pcie_mode_61xx(1, rc);
287 return 0;
288 }
289 case 0: {
290 /* QLM_CFG = 0x1 - Reserved */
291 if (mode == 1) {
292 qlm_cfg.s.qlm_spd = 0xf;
293 break;
294 }
295 /* QLM_CFG = 0x0 - PCIe 1x4(PEM0) */
296 if (mode == 0 && speed != 5000 && speed != 2500) {
297 qlm_cfg.s.qlm_spd = 0xf;
298 break;
299 }
300
301 /* Set speed and mode */
302 qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
303 qlm_cfg.s.qlm_cfg = mode;
304 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
305
306 /* Set PCIe mode bits */
307 if (mode == 0)
308 __set_qlm_pcie_mode_61xx(0, rc);
309
310 return 0;
311 }
312 default:
313 debug("WARNING: Invalid QLM(%d) passed\n", qlm);
314 qlm_cfg.s.qlm_spd = 0xf;
315 }
316 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
317 return 0;
318}
319
320/* qlm : DLM to configure
321 * baud_mhz : speed of the DLM
322 * ref_clk_sel : reference clock speed selection where:
323 * 0: 100MHz
324 * 1: 125MHz
325 * 2: 156.25MHz
326 *
327 * ref_clk_input: reference clock input where:
328 * 0: DLMC_REF_CLK0_[P,N]
329 * 1: DLMC_REF_CLK1_[P,N]
330 * 2: DLM0_REF_CLK_[P,N] (only valid for QLM 0)
331 * is_sff7000_rxaui : boolean to indicate whether qlm is RXAUI on SFF7000
332 */
333static int __dlm_setup_pll_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input,
334 int is_sff7000_rxaui)
335{
336 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
337 cvmx_gserx_dlmx_ref_ssp_en_t dlmx_ref_ssp_en;
338 cvmx_gserx_dlmx_mpll_en_t dlmx_mpll_en;
339 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
340 cvmx_gserx_dlmx_tx_amplitude_t tx_amplitude;
341 cvmx_gserx_dlmx_tx_preemph_t tx_preemph;
342 cvmx_gserx_dlmx_rx_eq_t rx_eq;
343 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
344 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
345 int gmx_ref_clk = 100;
346
347 debug("%s(%d, %d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input,
348 is_sff7000_rxaui);
349 if (ref_clk_sel == 1)
350 gmx_ref_clk = 125;
351 else if (ref_clk_sel == 2)
352 gmx_ref_clk = 156;
353
354 if (qlm != 0 && ref_clk_input == 2) {
355 printf("%s: Error: can only use reference clock inputs 0 or 1 for DLM %d\n",
356 __func__, qlm);
357 return -1;
358 }
359
360 /* Hardware defaults are invalid */
361 tx_amplitude.u64 = csr_rd(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0));
362 if (is_sff7000_rxaui) {
363 tx_amplitude.s.tx0_amplitude = 100;
364 tx_amplitude.s.tx1_amplitude = 100;
365 } else {
366 tx_amplitude.s.tx0_amplitude = 65;
367 tx_amplitude.s.tx1_amplitude = 65;
368 }
369
370 csr_wr(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0), tx_amplitude.u64);
371
372 tx_preemph.u64 = csr_rd(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0));
373
374 if (is_sff7000_rxaui) {
375 tx_preemph.s.tx0_preemph = 0;
376 tx_preemph.s.tx1_preemph = 0;
377 } else {
378 tx_preemph.s.tx0_preemph = 22;
379 tx_preemph.s.tx1_preemph = 22;
380 }
381 csr_wr(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0), tx_preemph.u64);
382
383 rx_eq.u64 = csr_rd(CVMX_GSERX_DLMX_RX_EQ(qlm, 0));
384 rx_eq.s.rx0_eq = 0;
385 rx_eq.s.rx1_eq = 0;
386 csr_wr(CVMX_GSERX_DLMX_RX_EQ(qlm, 0), rx_eq.u64);
387
388 /* 1. Write GSER0_DLM0_REF_USE_PAD[REF_USE_PAD] = 1 (to select
389 * reference-clock input)
390 * The documentation for this register in the HRM is useless since
391 * it says it selects between two different clocks that are not
392 * documented anywhere. What it really does is select between
393 * DLM0_REF_CLK_[P,N] if 1 and DLMC_REF_CLK[0,1]_[P,N] if 0.
394 *
395 * This register must be 0 for DLMs 1 and 2 and can only be 1 for
396 * DLM 0.
397 */
398 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(0, 0), ((ref_clk_input == 2) && (qlm == 0)) ? 1 : 0);
399
400 /* Reference clock was already chosen before we got here */
401
402 /* 2. Write GSER0_DLM0_REFCLK_SEL[REFCLK_SEL] if required for
403 * reference-clock selection.
404 *
405 * If GSERX_DLMX_REF_USE_PAD is 1 then this register is ignored.
406 */
407 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(0, 0), ref_clk_input & 1);
408
409 /* Reference clock was already chosen before we got here */
410
411 /* 3. If required, write GSER0_DLM0_REF_CLKDIV2[REF_CLKDIV2] (must be
412 * set if reference clock > 100 MHz)
413 */
414 /* Apply workaround for Errata (G-20669) MPLL may not come up. */
415 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
416 if (gmx_ref_clk == 100)
417 ref_clkdiv2.s.ref_clkdiv2 = 0;
418 else
419 ref_clkdiv2.s.ref_clkdiv2 = 1;
420 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
421
422 /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
423 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
424 dlmx_phy_reset.s.phy_reset = 1;
425 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
426
427 /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
428 * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
429 */
430 /* 7. Set GSER0_DLM0_MPLL_EN[MPLL_EN] = 1 */
431 dlmx_mpll_en.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_EN(0, 0));
432 dlmx_mpll_en.s.mpll_en = 1;
433 csr_wr(CVMX_GSERX_DLMX_MPLL_EN(0, 0), dlmx_mpll_en.u64);
434
435 /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
436 * to the value in the preceding table, which is different
437 * than the desired setting prescribed by the HRM.
438 */
439 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
440 if (gmx_ref_clk == 100)
441 mpll_multiplier.s.mpll_multiplier = 35;
442 else if (gmx_ref_clk == 125)
443 mpll_multiplier.s.mpll_multiplier = 56;
444 else
445 mpll_multiplier.s.mpll_multiplier = 45;
446 debug("%s: Setting mpll multiplier to %u for DLM%d, baud %d, clock rate %uMHz\n",
447 __func__, mpll_multiplier.s.mpll_multiplier, qlm, baud_mhz, gmx_ref_clk);
448
449 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
450
451 /* 5. Clear GSER0_DLM0_TEST_POWERDOWN[TEST_POWERDOWN] */
452 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
453 dlmx_test_powerdown.s.test_powerdown = 0;
454 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
455
456 /* 6. Set GSER0_DLM0_REF_SSP_EN[REF_SSP_EN] = 1 */
457 dlmx_ref_ssp_en.u64 = csr_rd(CVMX_GSERX_DLMX_REF_SSP_EN(qlm, 0));
458 dlmx_ref_ssp_en.s.ref_ssp_en = 1;
459 csr_wr(CVMX_GSERX_DLMX_REF_SSP_EN(0, 0), dlmx_ref_ssp_en.u64);
460
461 /* 8. Clear GSER0_DLM0_PHY_RESET[PHY_RESET] = 0 */
462 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
463 dlmx_phy_reset.s.phy_reset = 0;
464 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
465
466 /* 5. If PCIe or SATA (i.e. if DLM1 or DLM2), set both MPLL_EN
467 * and MPLL_EN_OVRD to one in GSER(0)_PHY(1..2)_OVRD_IN_LO.
468 */
469
470 /* 6. Decrease MPLL_MULTIPLIER by one continually until it
471 * reaches the desired long-term setting, ensuring that each
472 * MPLL_MULTIPLIER value is constant for at least 1 msec before
473 * changing to the next value. The desired long-term setting is
474 * as indicated in HRM tables 21-1, 21-2, and 21-3. This is not
475 * required with the HRM sequence.
476 */
477 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
478 __cvmx_qlm_set_mult(qlm, baud_mhz, mpll_multiplier.s.mpll_multiplier);
479
480 /* 9. Poll until the MPLL locks. Wait for
481 * GSER0_DLM0_MPLL_STATUS[MPLL_STATUS] = 1
482 */
483 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
484 cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
485 printf("PLL for DLM%d failed to lock\n", qlm);
486 return -1;
487 }
488 return 0;
489}
490
491static int __dlm0_setup_tx_cn70xx(int speed, int ref_clk_sel)
492{
493 int need0, need1;
494 cvmx_gmxx_inf_mode_t mode0, mode1;
495 cvmx_gserx_dlmx_tx_rate_t rate;
496 cvmx_gserx_dlmx_tx_en_t en;
497 cvmx_gserx_dlmx_tx_cm_en_t cm_en;
498 cvmx_gserx_dlmx_tx_data_en_t data_en;
499 cvmx_gserx_dlmx_tx_reset_t tx_reset;
500
501 debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
502 mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
503 mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
504
505 /* Which lanes do we need? */
506 need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
507 need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
508 (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
509
510 /* 1. Write GSER0_DLM0_TX_RATE[TXn_RATE] (Set according to required
511 * data rate (see Table 21-1).
512 */
513 rate.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RATE(0, 0));
514 debug("%s: speed: %d\n", __func__, speed);
515 switch (speed) {
516 case 1250:
517 case 2500:
518 switch (ref_clk_sel) {
519 case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
520 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
521 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
522 rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
523 rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
524 break;
525 default:
526 printf("Invalid reference clock select %d\n", ref_clk_sel);
527 return -1;
528 }
529 break;
530 case 3125:
531 switch (ref_clk_sel) {
532 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
533 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
534 rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
535 rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
536 break;
537 default:
538 printf("Invalid reference clock select %d\n", ref_clk_sel);
539 return -1;
540 }
541 break;
542 case 5000: /* QSGMII only */
543 switch (ref_clk_sel) {
544 case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
545 rate.s.tx0_rate = 0;
546 rate.s.tx1_rate = 0;
547 break;
548 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
549 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
550 rate.s.tx0_rate = 0;
551 rate.s.tx1_rate = 0;
552 break;
553 default:
554 printf("Invalid reference clock select %d\n", ref_clk_sel);
555 return -1;
556 }
557 break;
558 case 6250:
559 switch (ref_clk_sel) {
560 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
561 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
562 rate.s.tx0_rate = 0;
563 rate.s.tx1_rate = 0;
564 break;
565 default:
566 printf("Invalid reference clock select %d\n", ref_clk_sel);
567 return -1;
568 }
569 break;
570 default:
571 printf("%s: Invalid rate %d\n", __func__, speed);
572 return -1;
573 }
574 debug("%s: tx 0 rate: %d, tx 1 rate: %d\n", __func__, rate.s.tx0_rate, rate.s.tx1_rate);
575 csr_wr(CVMX_GSERX_DLMX_TX_RATE(0, 0), rate.u64);
576
577 /* 2. Set GSER0_DLM0_TX_EN[TXn_EN] = 1 */
578 en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_EN(0, 0));
579 en.s.tx0_en = need0;
580 en.s.tx1_en = need1;
581 csr_wr(CVMX_GSERX_DLMX_TX_EN(0, 0), en.u64);
582
583 /* 3 set GSER0_DLM0_TX_CM_EN[TXn_CM_EN] = 1 */
584 cm_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_CM_EN(0, 0));
585 cm_en.s.tx0_cm_en = need0;
586 cm_en.s.tx1_cm_en = need1;
587 csr_wr(CVMX_GSERX_DLMX_TX_CM_EN(0, 0), cm_en.u64);
588
589 /* 4. Set GSER0_DLM0_TX_DATA_EN[TXn_DATA_EN] = 1 */
590 data_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0));
591 data_en.s.tx0_data_en = need0;
592 data_en.s.tx1_data_en = need1;
593 csr_wr(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0), data_en.u64);
594
595 /* 5. Clear GSER0_DLM0_TX_RESET[TXn_DATA_EN] = 0 */
596 tx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RESET(0, 0));
597 tx_reset.s.tx0_reset = !need0;
598 tx_reset.s.tx1_reset = !need1;
599 csr_wr(CVMX_GSERX_DLMX_TX_RESET(0, 0), tx_reset.u64);
600
601 /* 6. Poll GSER0_DLM0_TX_STATUS[TXn_STATUS, TXn_CM_STATUS] until both
602 * are set to 1. This prevents GMX from transmitting until the DLM
603 * is ready.
604 */
605 if (need0) {
606 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
607 cvmx_gserx_dlmx_tx_status_t, tx0_status, ==, 1, 10000)) {
608 printf("DLM0 TX0 status fail\n");
609 return -1;
610 }
611 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
612 cvmx_gserx_dlmx_tx_status_t, tx0_cm_status, ==, 1,
613 10000)) {
614 printf("DLM0 TX0 CM status fail\n");
615 return -1;
616 }
617 }
618 if (need1) {
619 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
620 cvmx_gserx_dlmx_tx_status_t, tx1_status, ==, 1, 10000)) {
621 printf("DLM0 TX1 status fail\n");
622 return -1;
623 }
624 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
625 cvmx_gserx_dlmx_tx_status_t, tx1_cm_status, ==, 1,
626 10000)) {
627 printf("DLM0 TX1 CM status fail\n");
628 return -1;
629 }
630 }
631 return 0;
632}
633
634static int __dlm0_setup_rx_cn70xx(int speed, int ref_clk_sel)
635{
636 int need0, need1;
637 cvmx_gmxx_inf_mode_t mode0, mode1;
638 cvmx_gserx_dlmx_rx_rate_t rate;
639 cvmx_gserx_dlmx_rx_pll_en_t pll_en;
640 cvmx_gserx_dlmx_rx_data_en_t data_en;
641 cvmx_gserx_dlmx_rx_reset_t rx_reset;
642
643 debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
644 mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
645 mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
646
647 /* Which lanes do we need? */
648 need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
649 need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
650 (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
651
652 /* 1. Write GSER0_DLM0_RX_RATE[RXn_RATE] (must match the
653 * GER0_DLM0_TX_RATE[TXn_RATE] setting).
654 */
655 rate.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RATE(0, 0));
656 switch (speed) {
657 case 1250:
658 case 2500:
659 switch (ref_clk_sel) {
660 case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
661 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
662 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
663 rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
664 rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
665 break;
666 default:
667 printf("Invalid reference clock select %d\n", ref_clk_sel);
668 return -1;
669 }
670 break;
671 case 3125:
672 switch (ref_clk_sel) {
673 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
674 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
675 rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
676 rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
677 break;
678 default:
679 printf("Invalid reference clock select %d\n", ref_clk_sel);
680 return -1;
681 }
682 break;
683 case 5000: /* QSGMII only */
684 switch (ref_clk_sel) {
685 case OCTEON_QLM_REF_CLK_100MHZ: /* 100MHz */
686 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
687 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
688 rate.s.rx0_rate = 0;
689 rate.s.rx1_rate = 0;
690 break;
691 default:
692 printf("Invalid reference clock select %d\n", ref_clk_sel);
693 return -1;
694 }
695 break;
696 case 6250:
697 switch (ref_clk_sel) {
698 case OCTEON_QLM_REF_CLK_125MHZ: /* 125MHz */
699 case OCTEON_QLM_REF_CLK_156MHZ: /* 156.25MHz */
700 rate.s.rx0_rate = 0;
701 rate.s.rx1_rate = 0;
702 break;
703 default:
704 printf("Invalid reference clock select %d\n", ref_clk_sel);
705 return -1;
706 }
707 break;
708 default:
709 printf("%s: Invalid rate %d\n", __func__, speed);
710 return -1;
711 }
712 debug("%s: rx 0 rate: %d, rx 1 rate: %d\n", __func__, rate.s.rx0_rate, rate.s.rx1_rate);
713 csr_wr(CVMX_GSERX_DLMX_RX_RATE(0, 0), rate.u64);
714
715 /* 2. Set GSER0_DLM0_RX_PLL_EN[RXn_PLL_EN] = 1 */
716 pll_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0));
717 pll_en.s.rx0_pll_en = need0;
718 pll_en.s.rx1_pll_en = need1;
719 csr_wr(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0), pll_en.u64);
720
721 /* 3. Set GSER0_DLM0_RX_DATA_EN[RXn_DATA_EN] = 1 */
722 data_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0));
723 data_en.s.rx0_data_en = need0;
724 data_en.s.rx1_data_en = need1;
725 csr_wr(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0), data_en.u64);
726
727 /* 4. Clear GSER0_DLM0_RX_RESET[RXn_DATA_EN] = 0. Now the GMX can be
728 * enabled: set GMX(0..1)_INF_MODE[EN] = 1
729 */
730 rx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RESET(0, 0));
731 rx_reset.s.rx0_reset = !need0;
732 rx_reset.s.rx1_reset = !need1;
733 csr_wr(CVMX_GSERX_DLMX_RX_RESET(0, 0), rx_reset.u64);
734
735 return 0;
736}
737
738static int a_clk;
739
740static int __dlm2_sata_uctl_init_cn70xx(void)
741{
742 cvmx_sata_uctl_ctl_t uctl_ctl;
743 const int MAX_A_CLK = 333000000; /* Max of 333Mhz */
744 int divisor, a_clkdiv;
745
746 /* Wait for all voltages to reach a stable stable. Ensure the
747 * reference clock is up and stable.
748 */
749
750 /* 2. Wait for IOI reset to deassert. */
751
752 /* 3. Optionally program the GPIO CSRs for SATA features.
753 * a. For cold-presence detect:
754 * i. Select a GPIO for the input and program GPIO_SATA_CTL[sel]
755 * for port0 and port1.
756 * ii. Select a GPIO for the output and program
757 * GPIO_BIT_CFG*[OUTPUT_SEL] for port0 and port1.
758 * b. For mechanical-presence detect, select a GPIO for the input
759 * and program GPIO_SATA_CTL[SEL] for port0/port1.
760 * c. For LED activity, select a GPIO for the output and program
761 * GPIO_BIT_CFG*[OUTPUT_SEL] for port0/port1.
762 */
763
764 /* 4. Assert all resets:
765 * a. UAHC reset: SATA_UCTL_CTL[UAHC_RST] = 1
766 * a. UCTL reset: SATA_UCTL_CTL[UCTL_RST] = 1
767 */
768
769 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
770 uctl_ctl.s.sata_uahc_rst = 1;
771 uctl_ctl.s.sata_uctl_rst = 1;
772 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
773
774 /* 5. Configure the ACLK:
775 * a. Reset the clock dividers: SATA_UCTL_CTL[A_CLKDIV_RST] = 1.
776 * b. Select the ACLK frequency (400 MHz maximum)
777 * i. SATA_UCTL_CTL[A_CLKDIV] = desired value,
778 * ii. SATA_UCTL_CTL[A_CLKDIV_EN] = 1 to enable the ACLK,
779 * c. Deassert the ACLK clock divider reset:
780 * SATA_UCTL_CTL[A_CLKDIV_RST] = 0
781 */
782 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
783 uctl_ctl.s.a_clkdiv_rst = 1;
784 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
785
786 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
787
788 divisor = (gd->bus_clk + MAX_A_CLK - 1) / MAX_A_CLK;
789 if (divisor <= 4) {
790 a_clkdiv = divisor - 1;
791 } else if (divisor <= 6) {
792 a_clkdiv = 4;
793 divisor = 6;
794 } else if (divisor <= 8) {
795 a_clkdiv = 5;
796 divisor = 8;
797 } else if (divisor <= 16) {
798 a_clkdiv = 6;
799 divisor = 16;
800 } else if (divisor <= 24) {
801 a_clkdiv = 7;
802 divisor = 24;
803 } else {
804 printf("Unable to determine SATA clock divisor\n");
805 return -1;
806 }
807
808 /* Calculate the final clock rate */
809 a_clk = gd->bus_clk / divisor;
810
811 uctl_ctl.s.a_clkdiv_sel = a_clkdiv;
812 uctl_ctl.s.a_clk_en = 1;
813 uctl_ctl.s.a_clk_byp_sel = 0;
814 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
815
816 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
817 uctl_ctl.s.a_clkdiv_rst = 0;
818 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
819
820 udelay(1);
821
822 return 0;
823}
824
825static int __sata_dlm_init_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
826{
827 cvmx_gserx_sata_cfg_t sata_cfg;
828 cvmx_gserx_sata_lane_rst_t sata_lane_rst;
829 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
830 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
831 cvmx_gserx_sata_ref_ssp_en_t ref_ssp_en;
832 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
833 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
834 cvmx_sata_uctl_shim_cfg_t shim_cfg;
835 cvmx_gserx_phyx_ovrd_in_lo_t ovrd_in;
836 cvmx_sata_uctl_ctl_t uctl_ctl;
837 int sata_ref_clk;
838
839 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
840
841 switch (ref_clk_sel) {
842 case 0:
843 sata_ref_clk = 100;
844 break;
845 case 1:
846 sata_ref_clk = 125;
847 break;
848 case 2:
849 sata_ref_clk = 156;
850 break;
851 default:
852 printf("%s: Invalid reference clock select %d for qlm %d\n", __func__,
853 ref_clk_sel, qlm);
854 return -1;
855 }
856
857 /* 5. Set GSERX0_SATA_CFG[SATA_EN] = 1 to configure DLM2 multiplexing.
858 */
859 sata_cfg.u64 = csr_rd(CVMX_GSERX_SATA_CFG(0));
860 sata_cfg.s.sata_en = 1;
861 csr_wr(CVMX_GSERX_SATA_CFG(0), sata_cfg.u64);
862
863 /* 1. Write GSER(0)_DLM2_REFCLK_SEL[REFCLK_SEL] if required for
864 * reference-clock selection.
865 */
866 if (ref_clk_input < 2) {
867 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
868 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 0);
869 } else {
870 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 1);
871 }
872
873 ref_ssp_en.u64 = csr_rd(CVMX_GSERX_SATA_REF_SSP_EN(0));
874 ref_ssp_en.s.ref_ssp_en = 1;
875 csr_wr(CVMX_GSERX_SATA_REF_SSP_EN(0), ref_ssp_en.u64);
876
877 /* Apply workaround for Errata (G-20669) MPLL may not come up. */
878
879 /* Set REF_CLKDIV2 based on the Ref Clock */
880 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
881 if (sata_ref_clk == 100)
882 ref_clkdiv2.s.ref_clkdiv2 = 0;
883 else
884 ref_clkdiv2.s.ref_clkdiv2 = 1;
885 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
886
887 /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
888 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
889 dlmx_phy_reset.s.phy_reset = 1;
890 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
891
892 /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
893 * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
894 */
895
896 /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
897 * to the value in the preceding table, which is different
898 * than the desired setting prescribed by the HRM.
899 */
900
901 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
902 if (sata_ref_clk == 100)
903 mpll_multiplier.s.mpll_multiplier = 35;
904 else
905 mpll_multiplier.s.mpll_multiplier = 56;
906 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
907
908 /* 3. Clear GSER0_DLM2_TEST_POWERDOWN[TEST_POWERDOWN] = 0 */
909 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
910 dlmx_test_powerdown.s.test_powerdown = 0;
911 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
912
913 /* 4. Clear either/both lane0 and lane1 resets:
914 * GSER0_SATA_LANE_RST[L0_RST, L1_RST] = 0.
915 */
916 sata_lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(0));
917 sata_lane_rst.s.l0_rst = 0;
918 sata_lane_rst.s.l1_rst = 0;
919 csr_wr(CVMX_GSERX_SATA_LANE_RST(0), sata_lane_rst.u64);
920
921 udelay(1);
922
923 /* 5. Clear GSER0_DLM2_PHY_RESET */
924 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
925 dlmx_phy_reset.s.phy_reset = 0;
926 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
927
928 /* 6. If PCIe or SATA (i.e. if DLM1 or DLM2), set both MPLL_EN
929 * and MPLL_EN_OVRD to one in GSER(0)_PHY(1..2)_OVRD_IN_LO.
930 */
931 ovrd_in.u64 = csr_rd(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0));
932 ovrd_in.s.mpll_en = 1;
933 ovrd_in.s.mpll_en_ovrd = 1;
934 csr_wr(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0), ovrd_in.u64);
935
936 /* 7. Decrease MPLL_MULTIPLIER by one continually until it reaches
937 * the desired long-term setting, ensuring that each MPLL_MULTIPLIER
938 * value is constant for at least 1 msec before changing to the next
939 * value. The desired long-term setting is as indicated in HRM tables
940 * 21-1, 21-2, and 21-3. This is not required with the HRM
941 * sequence.
942 */
943 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
944 if (sata_ref_clk == 100)
945 mpll_multiplier.s.mpll_multiplier = 0x1e;
946 else
947 mpll_multiplier.s.mpll_multiplier = 0x30;
948 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
949
950 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
951 cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
952 printf("ERROR: SATA MPLL failed to set\n");
953 return -1;
954 }
955
956 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
957 rx0_status, ==, 1, 10000)) {
958 printf("ERROR: SATA RX0_STATUS failed to set\n");
959 return -1;
960 }
961 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
962 rx1_status, ==, 1, 10000)) {
963 printf("ERROR: SATA RX1_STATUS failed to set\n");
964 return -1;
965 }
966
967 /* 8. Deassert UCTL and UAHC resets:
968 * a. SATA_UCTL_CTL[UCTL_RST] = 0
969 * b. SATA_UCTL_CTL[UAHC_RST] = 0
970 * c. Wait 10 ACLK cycles before accessing any ACLK-only registers.
971 */
972 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
973 uctl_ctl.s.sata_uctl_rst = 0;
974 uctl_ctl.s.sata_uahc_rst = 0;
975 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
976
977 udelay(1);
978
979 /* 9. Enable conditional SCLK of UCTL by writing
980 * SATA_UCTL_CTL[CSCLK_EN] = 1
981 */
982 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
983 uctl_ctl.s.csclk_en = 1;
984 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
985
986 /* 10. Initialize UAHC as described in the AHCI Specification (UAHC_*
987 * registers
988 */
989
990 /* set-up endian mode */
991 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
992 shim_cfg.s.dma_endian_mode = 1;
993 shim_cfg.s.csr_endian_mode = 3;
994 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
995
996 return 0;
997}
998
999/**
1000 * Initializes DLM 4 for SATA
1001 *
1002 * @param qlm Must be 4.
1003 * @param baud_mhz Baud rate for SATA
1004 * @param ref_clk_sel Selects the speed of the reference clock where:
1005 * 0 = 100MHz, 1 = 125MHz and 2 = 156.25MHz
1006 * @param ref_clk_input Reference clock input where 0 = external QLM clock,
1007 * 1 = qlmc_ref_clk0 and 2 = qlmc_ref_clk1
1008 */
1009static int __sata_dlm_init_cn73xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1010{
1011 cvmx_sata_uctl_shim_cfg_t shim_cfg;
1012 cvmx_gserx_refclk_sel_t refclk_sel;
1013 cvmx_gserx_phy_ctl_t phy_ctl;
1014 cvmx_gserx_rx_pwr_ctrl_p2_t pwr_ctrl_p2;
1015 cvmx_gserx_lanex_misc_cfg_0_t misc_cfg_0;
1016 cvmx_gserx_sata_lane_rst_t lane_rst;
1017 cvmx_gserx_pll_px_mode_0_t pmode_0;
1018 cvmx_gserx_pll_px_mode_1_t pmode_1;
1019 cvmx_gserx_lane_px_mode_0_t lane_pmode_0;
1020 cvmx_gserx_lane_px_mode_1_t lane_pmode_1;
1021 cvmx_gserx_cfg_t gserx_cfg;
1022 cvmx_sata_uctl_ctl_t uctl_ctl;
1023 int l;
1024 int i;
1025
1026 /*
1027 * 1. Configure the SATA
1028 */
1029
1030 /*
1031 * 2. Configure the QLM Reference clock
1032 * Set GSERX_REFCLK_SEL.COM_CLK_SEL to source reference clock
1033 * from the external clock mux.
1034 * GSERX_REFCLK_SEL.USE_COM1 to select qlmc_refclkn/p_1 or
1035 * leave clear to select qlmc_refclkn/p_0
1036 */
1037 refclk_sel.u64 = 0;
1038 if (ref_clk_input == 0) { /* External ref clock */
1039 refclk_sel.s.com_clk_sel = 0;
1040 refclk_sel.s.use_com1 = 0;
1041 } else if (ref_clk_input == 1) { /* Common reference clock 0 */
1042 refclk_sel.s.com_clk_sel = 1;
1043 refclk_sel.s.use_com1 = 0;
1044 } else { /* Common reference clock 1 */
1045 refclk_sel.s.com_clk_sel = 1;
1046 refclk_sel.s.use_com1 = 1;
1047 }
1048
1049 if (ref_clk_sel != 0) {
1050 printf("Wrong reference clock selected for QLM4\n");
1051 return -1;
1052 }
1053
1054 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
1055
1056 /* Reset the QLM after changing the reference clock */
1057 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
1058 phy_ctl.s.phy_reset = 1;
1059 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
1060
1061 udelay(1);
1062
1063 /*
1064 * 3. Configure the QLM for SATA mode set GSERX_CFG.SATA
1065 */
1066 gserx_cfg.u64 = 0;
1067 gserx_cfg.s.sata = 1;
1068 csr_wr(CVMX_GSERX_CFG(qlm), gserx_cfg.u64);
1069
1070 /*
1071 * 12. Clear the appropriate lane resets
1072 * clear GSERX_SATA_LANE_RST.LX_RST where X is the lane number 0-1.
1073 */
1074 lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
1075 lane_rst.s.l0_rst = 0;
1076 lane_rst.s.l1_rst = 0;
1077 csr_wr(CVMX_GSERX_SATA_LANE_RST(qlm), lane_rst.u64);
1078 csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
1079
1080 udelay(1);
1081
1082 /*
1083 * 4. Take the PHY out of reset
1084 * Write GSERX_PHY_CTL.PHY_RESET to a zero
1085 */
1086 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
1087 phy_ctl.s.phy_reset = 0;
1088 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
1089
1090 /* Wait for reset to complete and the PLL to lock */
1091 /* PCIe mode doesn't become ready until the PEM block attempts to bring
1092 * the interface up. Skip this check for PCIe
1093 */
1094 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
1095 rst_rdy, ==, 1, 10000)) {
1096 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
1097 return -1;
1098 }
1099
1100 /* Workaround for errata GSER-30310: SATA HDD Not Ready due to
1101 * PHY SDLL/LDLL lockup at 3GHz
1102 */
1103 for (i = 0; i < 2; i++) {
1104 cvmx_gserx_slicex_pcie1_mode_t pcie1;
1105 cvmx_gserx_slicex_pcie2_mode_t pcie2;
1106 cvmx_gserx_slicex_pcie3_mode_t pcie3;
1107
1108 pcie1.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm));
1109 pcie1.s.rx_pi_bwsel = 1;
1110 pcie1.s.rx_ldll_bwsel = 1;
1111 pcie1.s.rx_sdll_bwsel = 1;
1112 csr_wr(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm), pcie1.u64);
1113
1114 pcie2.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm));
1115 pcie2.s.rx_pi_bwsel = 1;
1116 pcie2.s.rx_ldll_bwsel = 1;
1117 pcie2.s.rx_sdll_bwsel = 1;
1118 csr_wr(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm), pcie2.u64);
1119
1120 pcie3.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm));
1121 pcie3.s.rx_pi_bwsel = 1;
1122 pcie3.s.rx_ldll_bwsel = 1;
1123 pcie3.s.rx_sdll_bwsel = 1;
1124 csr_wr(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm), pcie3.u64);
1125 }
1126
1127 /*
1128 * 7. Change P2 termination
1129 * Clear GSERX_RX_PWR_CTRL_P2.P2_RX_SUBBLK_PD[0] (Termination)
1130 */
1131 pwr_ctrl_p2.u64 = csr_rd(CVMX_GSERX_RX_PWR_CTRL_P2(qlm));
1132 pwr_ctrl_p2.s.p2_rx_subblk_pd &= 0x1e;
1133 csr_wr(CVMX_GSERX_RX_PWR_CTRL_P2(qlm), pwr_ctrl_p2.u64);
1134
1135 /*
1136 * 8. Modify the Electrical IDLE Detect on delay
1137 * Change GSERX_LANE(0..3)_MISC_CFG_0.EIE_DET_STL_ON_TIME to a 0x4
1138 */
1139 for (i = 0; i < 2; i++) {
1140 misc_cfg_0.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
1141 misc_cfg_0.s.eie_det_stl_on_time = 4;
1142 csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg_0.u64);
1143 }
1144
1145 /*
1146 * 9. Modify the PLL and Lane Protocol Mode registers to configure
1147 * the PHY for SATA.
1148 * (Configure all 3 PLLs, doesn't matter what speed it is configured)
1149 */
1150
1151 /* Errata (GSER-26724) SATA never indicates GSER QLM_STAT[RST_RDY]
1152 * We program PLL_PX_MODE_0 last due to this errata
1153 */
1154 for (l = 0; l < 3; l++) {
1155 pmode_1.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_1(l, qlm));
1156 lane_pmode_0.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_0(l, qlm));
1157 lane_pmode_1.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_1(l, qlm));
1158
1159 pmode_1.s.pll_cpadj = 0x2;
1160 pmode_1.s.pll_opr = 0x0;
1161 pmode_1.s.pll_div = 0x1e;
1162 pmode_1.s.pll_pcie3en = 0x0;
1163 pmode_1.s.pll_16p5en = 0x0;
1164
1165 lane_pmode_0.s.ctle = 0x0;
1166 lane_pmode_0.s.pcie = 0x0;
1167 lane_pmode_0.s.tx_ldiv = 0x0;
1168 lane_pmode_0.s.srate = 0;
1169 lane_pmode_0.s.tx_mode = 0x3;
1170 lane_pmode_0.s.rx_mode = 0x3;
1171
1172 lane_pmode_1.s.vma_mm = 1;
1173 lane_pmode_1.s.vma_fine_cfg_sel = 0;
1174 lane_pmode_1.s.cdr_fgain = 0xa;
1175 lane_pmode_1.s.ph_acc_adj = 0x15;
1176
1177 if (l == R_2_5G_REFCLK100)
1178 lane_pmode_0.s.rx_ldiv = 0x2;
1179 else if (l == R_5G_REFCLK100)
1180 lane_pmode_0.s.rx_ldiv = 0x1;
1181 else
1182 lane_pmode_0.s.rx_ldiv = 0x0;
1183
1184 csr_wr(CVMX_GSERX_PLL_PX_MODE_1(l, qlm), pmode_1.u64);
1185 csr_wr(CVMX_GSERX_LANE_PX_MODE_0(l, qlm), lane_pmode_0.u64);
1186 csr_wr(CVMX_GSERX_LANE_PX_MODE_1(l, qlm), lane_pmode_1.u64);
1187 }
1188
1189 for (l = 0; l < 3; l++) {
1190 pmode_0.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_0(l, qlm));
1191 pmode_0.s.pll_icp = 0x1;
1192 pmode_0.s.pll_rloop = 0x3;
1193 pmode_0.s.pll_pcs_div = 0x5;
1194 csr_wr(CVMX_GSERX_PLL_PX_MODE_0(l, qlm), pmode_0.u64);
1195 }
1196
1197 for (i = 0; i < 2; i++) {
1198 cvmx_gserx_slicex_rx_sdll_ctrl_t rx_sdll;
1199
1200 rx_sdll.u64 = csr_rd(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm));
1201 rx_sdll.s.pcs_sds_oob_clk_ctrl = 2;
1202 rx_sdll.s.pcs_sds_rx_sdll_tune = 0;
1203 rx_sdll.s.pcs_sds_rx_sdll_swsel = 0;
1204 csr_wr(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm), rx_sdll.u64);
1205 }
1206
1207 for (i = 0; i < 2; i++) {
1208 cvmx_gserx_lanex_misc_cfg_0_t misc_cfg;
1209
1210 misc_cfg.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
1211 misc_cfg.s.use_pma_polarity = 0;
1212 misc_cfg.s.cfg_pcs_loopback = 0;
1213 misc_cfg.s.pcs_tx_mode_ovrrd_en = 0;
1214 misc_cfg.s.pcs_rx_mode_ovrrd_en = 0;
1215 misc_cfg.s.cfg_eie_det_cnt = 0;
1216 misc_cfg.s.eie_det_stl_on_time = 4;
1217 misc_cfg.s.eie_det_stl_off_time = 0;
1218 misc_cfg.s.tx_bit_order = 1;
1219 misc_cfg.s.rx_bit_order = 1;
1220 csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg.u64);
1221 }
1222
1223 /* Wait for reset to complete and the PLL to lock */
1224 /* PCIe mode doesn't become ready until the PEM block attempts to bring
1225 * the interface up. Skip this check for PCIe
1226 */
1227 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
1228 rst_rdy, ==, 1, 10000)) {
1229 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
1230 return -1;
1231 }
1232
1233 /* Poll GSERX_SATA_STATUS for P0_RDY = 1 */
1234 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
1235 p0_rdy, ==, 1, 10000)) {
1236 printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p0_rdy]\n");
1237 return -1;
1238 }
1239
1240 /* Poll GSERX_SATA_STATUS for P1_RDY = 1 */
1241 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
1242 p1_rdy, ==, 1, 10000)) {
1243 printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p1_rdy]\n");
1244 return -1;
1245 }
1246
1247 udelay(2000);
1248
1249 /* 6. Deassert UCTL and UAHC resets:
1250 * a. SATA_UCTL_CTL[UCTL_RST] = 0
1251 * b. SATA_UCTL_CTL[UAHC_RST] = 0
1252 * c. Wait 10 ACLK cycles before accessing any ACLK-only registers.
1253 */
1254 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1255 uctl_ctl.s.sata_uctl_rst = 0;
1256 uctl_ctl.s.sata_uahc_rst = 0;
1257 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1258
1259 udelay(1);
1260
1261 /* 7. Enable conditional SCLK of UCTL by writing
1262 * SATA_UCTL_CTL[CSCLK_EN] = 1
1263 */
1264 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1265 uctl_ctl.s.csclk_en = 1;
1266 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1267
1268 /* set-up endian mode */
1269 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
1270 shim_cfg.s.dma_endian_mode = 1;
1271 shim_cfg.s.csr_endian_mode = 3;
1272 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
1273
1274 return 0;
1275}
1276
1277static int __dlm2_sata_uahc_init_cn70xx(int baud_mhz)
1278{
1279 cvmx_sata_uahc_gbl_cap_t gbl_cap;
1280 cvmx_sata_uahc_px_sctl_t sctl;
1281 cvmx_sata_uahc_gbl_pi_t pi;
1282 cvmx_sata_uahc_px_cmd_t cmd;
1283 cvmx_sata_uahc_px_sctl_t sctl0, sctl1;
1284 cvmx_sata_uahc_px_ssts_t ssts;
1285 cvmx_sata_uahc_px_tfd_t tfd;
1286 cvmx_sata_uahc_gbl_timer1ms_t gbl_timer1ms;
1287 u64 done;
1288 int result = -1;
1289 int retry_count = 0;
1290 int spd;
1291
1292 /* From the synopsis data book, SATA_UAHC_GBL_TIMER1MS is the
1293 * AMBA clock in MHz * 1000, which is a_clk(Hz) / 1000
1294 */
1295 gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
1296 gbl_timer1ms.s.timv = a_clk / 1000;
1297 csr_wr32(CVMX_SATA_UAHC_GBL_TIMER1MS, gbl_timer1ms.u32);
1298 gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
1299
1300 /* Set-u global capabilities reg (GBL_CAP) */
1301 gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
1302 debug("%s: SATA_UAHC_GBL_CAP before: 0x%x\n", __func__, gbl_cap.u32);
1303 gbl_cap.s.sss = 1;
1304 gbl_cap.s.smps = 1;
1305 csr_wr32(CVMX_SATA_UAHC_GBL_CAP, gbl_cap.u32);
1306 gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
1307 debug("%s: SATA_UAHC_GBL_CAP after: 0x%x\n", __func__, gbl_cap.u32);
1308
1309 /* Set-up global hba control reg (interrupt enables) */
1310 /* Set-up port SATA control registers (speed limitation) */
1311 if (baud_mhz == 1500)
1312 spd = 1;
1313 else if (baud_mhz == 3000)
1314 spd = 2;
1315 else
1316 spd = 3;
1317
1318 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1319 debug("%s: SATA_UAHC_P0_SCTL before: 0x%x\n", __func__, sctl.u32);
1320 sctl.s.spd = spd;
1321 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl.u32);
1322 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1323 debug("%s: SATA_UAHC_P0_SCTL after: 0x%x\n", __func__, sctl.u32);
1324 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1325 debug("%s: SATA_UAHC_P1_SCTL before: 0x%x\n", __func__, sctl.u32);
1326 sctl.s.spd = spd;
1327 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl.u32);
1328 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1329 debug("%s: SATA_UAHC_P1_SCTL after: 0x%x\n", __func__, sctl.u32);
1330
1331 /* Set-up ports implemented reg. */
1332 pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
1333 debug("%s: SATA_UAHC_GBL_PI before: 0x%x\n", __func__, pi.u32);
1334 pi.s.pi = 3;
1335 csr_wr32(CVMX_SATA_UAHC_GBL_PI, pi.u32);
1336 pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
1337 debug("%s: SATA_UAHC_GBL_PI after: 0x%x\n", __func__, pi.u32);
1338
1339retry0:
1340 /* Clear port SERR and IS registers */
1341 csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), csr_rd32(CVMX_SATA_UAHC_PX_SERR(0)));
1342 csr_wr32(CVMX_SATA_UAHC_PX_IS(0), csr_rd32(CVMX_SATA_UAHC_PX_IS(0)));
1343
1344 /* Set spin-up, power on, FIS RX enable, start, active */
1345 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
1346 debug("%s: SATA_UAHC_P0_CMD before: 0x%x\n", __func__, cmd.u32);
1347 cmd.s.fre = 1;
1348 cmd.s.sud = 1;
1349 cmd.s.pod = 1;
1350 cmd.s.st = 1;
1351 cmd.s.icc = 1;
1352 cmd.s.fbscp = 1; /* Enable FIS-based switching */
1353 csr_wr32(CVMX_SATA_UAHC_PX_CMD(0), cmd.u32);
1354 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
1355 debug("%s: SATA_UAHC_P0_CMD after: 0x%x\n", __func__, cmd.u32);
1356
1357 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1358 sctl0.s.det = 1;
1359 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1360
1361 /* check status */
1362 done = get_timer(0);
1363 while (1) {
1364 ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(0));
1365
1366 if (ssts.s.ipm == 1 && ssts.s.det == 3) {
1367 result = 0;
1368 break;
1369 } else if (get_timer(done) > 100) {
1370 result = -1;
1371 break;
1372 }
1373
1374 udelay(100);
1375 }
1376
1377 if (result != -1) {
1378 /* Clear the PxSERR Register, by writing '1s' to each
1379 * implemented bit location
1380 */
1381 csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), -1);
1382
1383 /*
1384 * Wait for indication that SATA drive is ready. This is
1385 * determined via an examination of PxTFD.STS. If PxTFD.STS.BSY
1386 * PxTFD.STS.DRQ, and PxTFD.STS.ERR are all '0', prior to the
1387 * maximum allowed time as specified in the ATA/ATAPI-7
1388 * specification, the device is ready.
1389 */
1390 /*
1391 * Wait for the device to be ready. BSY(7), DRQ(3), and ERR(0)
1392 * must be clear
1393 */
1394 done = get_timer(0);
1395 while (1) {
1396 tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(0));
1397 if ((tfd.s.sts & 0x89) == 0) {
1398 result = 0;
1399 break;
1400 } else if (get_timer(done) > 500) {
1401 if (retry_count < 3) {
1402 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1403 sctl0.s.det = 1; /* Perform interface reset */
1404 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1405 udelay(1000); /* 1ms dicated by AHCI 1.3 spec */
1406 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1407 sctl0.s.det = 0; /* Perform interface reset */
1408 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1409 retry_count++;
1410 goto retry0;
1411 }
1412 result = -1;
1413 break;
1414 }
1415
1416 udelay(100);
1417 }
1418 }
1419
1420 if (result == -1)
1421 printf("SATA0: not available\n");
1422 else
1423 printf("SATA0: available\n");
1424
1425 sctl1.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1426 sctl1.s.det = 1;
1427 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl1.u32);
1428
1429 result = -1;
1430 retry_count = 0;
1431
1432retry1:
1433 /* Clear port SERR and IS registers */
1434 csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
1435 csr_wr32(CVMX_SATA_UAHC_PX_IS(1), csr_rd32(CVMX_SATA_UAHC_PX_IS(1)));
1436
1437 /* Set spin-up, power on, FIS RX enable, start, active */
1438 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
1439 debug("%s: SATA_UAHC_P1_CMD before: 0x%x\n", __func__, cmd.u32);
1440 cmd.s.fre = 1;
1441 cmd.s.sud = 1;
1442 cmd.s.pod = 1;
1443 cmd.s.st = 1;
1444 cmd.s.icc = 1;
1445 cmd.s.fbscp = 1; /* Enable FIS-based switching */
1446 csr_wr32(CVMX_SATA_UAHC_PX_CMD(1), cmd.u32);
1447 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
1448 debug("%s: SATA_UAHC_P1_CMD after: 0x%x\n", __func__, cmd.u32);
1449
1450 /* check status */
1451 done = get_timer(0);
1452 while (1) {
1453 ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(1));
1454
1455 if (ssts.s.ipm == 1 && ssts.s.det == 3) {
1456 result = 0;
1457 break;
1458 } else if (get_timer(done) > 1000) {
1459 result = -1;
1460 break;
1461 }
1462
1463 udelay(100);
1464 }
1465
1466 if (result != -1) {
1467 /* Clear the PxSERR Register, by writing '1s' to each
1468 * implemented bit location
1469 */
1470 csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
1471
1472 /*
1473 * Wait for indication that SATA drive is ready. This is
1474 * determined via an examination of PxTFD.STS. If PxTFD.STS.BSY
1475 * PxTFD.STS.DRQ, and PxTFD.STS.ERR are all '0', prior to the
1476 * maximum allowed time as specified in the ATA/ATAPI-7
1477 * specification, the device is ready.
1478 */
1479 /*
1480 * Wait for the device to be ready. BSY(7), DRQ(3), and ERR(0)
1481 * must be clear
1482 */
1483 done = get_timer(0);
1484 while (1) {
1485 tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(1));
1486 if ((tfd.s.sts & 0x89) == 0) {
1487 result = 0;
1488 break;
1489 } else if (get_timer(done) > 500) {
1490 if (retry_count < 3) {
1491 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1492 sctl0.s.det = 1; /* Perform interface reset */
1493 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
1494 udelay(1000); /* 1ms dicated by AHCI 1.3 spec */
1495 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1496 sctl0.s.det = 0; /* Perform interface reset */
1497 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
1498 retry_count++;
1499 goto retry1;
1500 }
1501 result = -1;
1502 break;
1503 }
1504
1505 udelay(100);
1506 }
1507 }
1508
1509 if (result == -1)
1510 printf("SATA1: not available\n");
1511 else
1512 printf("SATA1: available\n");
1513
1514 return 0;
1515}
1516
1517static int __sata_bist_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1518{
1519 cvmx_sata_uctl_bist_status_t bist_status;
1520 cvmx_sata_uctl_ctl_t uctl_ctl;
1521 cvmx_sata_uctl_shim_cfg_t shim_cfg;
1522 u64 done;
1523 int result = -1;
1524
1525 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1526 bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
1527
1528 {
1529 if (__dlm2_sata_uctl_init_cn70xx()) {
1530 printf("ERROR: Failed to initialize SATA UCTL CSRs\n");
1531 return -1;
1532 }
1533 if (OCTEON_IS_MODEL(OCTEON_CN73XX))
1534 result = __sata_dlm_init_cn73xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1535 else
1536 result = __sata_dlm_init_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1537 if (result) {
1538 printf("ERROR: Failed to initialize SATA GSER CSRs\n");
1539 return -1;
1540 }
1541
1542 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1543 uctl_ctl.s.start_bist = 1;
1544 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1545
1546 /* Set-up for a 1 sec timer. */
1547 done = get_timer(0);
1548 while (1) {
1549 bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
1550 if ((bist_status.s.uctl_xm_r_bist_ndone |
1551 bist_status.s.uctl_xm_w_bist_ndone |
1552 bist_status.s.uahc_p0_rxram_bist_ndone |
1553 bist_status.s.uahc_p1_rxram_bist_ndone |
1554 bist_status.s.uahc_p0_txram_bist_ndone |
1555 bist_status.s.uahc_p1_txram_bist_ndone) == 0) {
1556 result = 0;
1557 break;
1558 } else if (get_timer(done) > 1000) {
1559 result = -1;
1560 break;
1561 }
1562
1563 udelay(100);
1564 }
1565 if (result == -1) {
1566 printf("ERROR: SATA_UCTL_BIST_STATUS = 0x%llx\n",
1567 (unsigned long long)bist_status.u64);
1568 return -1;
1569 }
1570
1571 debug("%s: Initializing UAHC\n", __func__);
1572 if (__dlm2_sata_uahc_init_cn70xx(baud_mhz)) {
1573 printf("ERROR: Failed to initialize SATA UAHC CSRs\n");
1574 return -1;
1575 }
1576 }
1577
1578 /* Change CSR_ENDIAN_MODE to big endian to use Open Source AHCI SATA
1579 * driver
1580 */
1581 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
1582 shim_cfg.s.csr_endian_mode = 1;
1583 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
1584
1585 return 0;
1586}
1587
1588static int __setup_sata(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1589{
1590 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1591 return __sata_bist_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1592}
1593
1594static int __dlmx_setup_pcie_cn70xx(int qlm, enum cvmx_qlm_mode mode, int gen2, int rc,
1595 int ref_clk_sel, int ref_clk_input)
1596{
1597 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
1598 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
1599 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
1600 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
1601 static const u8 ref_clk_mult[2] = { 35, 56 }; /* 100 & 125 MHz ref clock supported. */
1602
1603 debug("%s(%d, %d, %d, %d, %d, %d)\n", __func__, qlm, mode, gen2, rc, ref_clk_sel,
1604 ref_clk_input);
1605 if (rc == 0) {
1606 debug("Skipping initializing PCIe dlm %d in endpoint mode\n", qlm);
1607 return 0;
1608 }
1609
1610 if (qlm > 0 && ref_clk_input > 1) {
1611 printf("%s: Error: ref_clk_input can only be 0 or 1 for QLM %d\n",
1612 __func__, qlm);
1613 return -1;
1614 }
1615
1616 if (ref_clk_sel > OCTEON_QLM_REF_CLK_125MHZ) {
1617 printf("%s: Error: ref_clk_sel can only be 100 or 125 MHZ.\n", __func__);
1618 return -1;
1619 }
1620
1621 /* 1. Write GSER0_DLM(1..2)_REFCLK_SEL[REFCLK_SEL] if required for
1622 * reference-clock selection
1623 */
1624
1625 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
1626
1627 /* 2. If required, write GSER0_DLM(1..2)_REF_CLKDIV2[REF_CLKDIV2] = 1
1628 * (must be set if reference clock >= 100 MHz)
1629 */
1630
1631 /* 4. Configure the PCIE PIPE:
1632 * a. Write GSER0_PCIE_PIPE_PORT_SEL[PIPE_PORT_SEL] to configure the
1633 * PCIE PIPE.
1634 * 0x0 = disables all pipes
1635 * 0x1 = enables pipe0 only (PEM0 4-lane)
1636 * 0x2 = enables pipes 0 and 1 (PEM0 and PEM1 2-lanes each)
1637 * 0x3 = enables pipes 0, 1, 2, and 3 (PEM0, PEM1, and PEM3 are
1638 * one-lane each)
1639 * b. Configure GSER0_PCIE_PIPE_PORT_SEL[CFG_PEM1_DLM2]. If PEM1 is
1640 * to be configured, this bit must reflect which DLM it is logically
1641 * tied to. This bit sets multiplexing logic in GSER, and it is used
1642 * by the RST logic to determine when the MAC can come out of reset.
1643 * 0 = PEM1 is tied to DLM1 (for 3 x 1 PCIe mode).
1644 * 1 = PEM1 is tied to DLM2 (for all other PCIe modes).
1645 */
1646 if (qlm == 1) {
1647 cvmx_gserx_pcie_pipe_port_sel_t pipe_port;
1648
1649 pipe_port.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0));
1650 pipe_port.s.cfg_pem1_dlm2 = (mode == CVMX_QLM_MODE_PCIE_1X1) ? 1 : 0;
1651 pipe_port.s.pipe_port_sel =
1652 (mode == CVMX_QLM_MODE_PCIE) ? 1 : /* PEM0 only */
1653 (mode == CVMX_QLM_MODE_PCIE_1X2) ? 2 : /* PEM0-1 */
1654 (mode == CVMX_QLM_MODE_PCIE_1X1) ? 3 : /* PEM0-2 */
1655 (mode == CVMX_QLM_MODE_PCIE_2X1) ? 3 : /* PEM0-1 */
1656 0; /* PCIe disabled */
1657 csr_wr(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0), pipe_port.u64);
1658 }
1659
1660 /* Apply workaround for Errata (G-20669) MPLL may not come up. */
1661
1662 /* Set REF_CLKDIV2 based on the Ref Clock */
1663 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
1664 ref_clkdiv2.s.ref_clkdiv2 = ref_clk_sel > 0;
1665 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
1666
1667 /* 1. Ensure GSER(0)_DLM(0..2)_PHY_RESET[PHY_RESET] is set. */
1668 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
1669 dlmx_phy_reset.s.phy_reset = 1;
1670 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
1671
1672 /* 2. If SGMII or QSGMII or RXAUI (i.e. if DLM0) set
1673 * GSER(0)_DLM(0)_MPLL_EN[MPLL_EN] to one.
1674 */
1675
1676 /* 3. Set GSER(0)_DLM(0..2)_MPLL_MULTIPLIER[MPLL_MULTIPLIER]
1677 * to the value in the preceding table, which is different
1678 * than the desired setting prescribed by the HRM.
1679 */
1680 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
1681 mpll_multiplier.s.mpll_multiplier = ref_clk_mult[ref_clk_sel];
1682 debug("%s: Setting MPLL multiplier to %d\n", __func__,
1683 (int)mpll_multiplier.s.mpll_multiplier);
1684 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
1685 /* 5. Clear GSER0_DLM(1..2)_TEST_POWERDOWN. Configurations that only
1686 * use DLM1 need not clear GSER0_DLM2_TEST_POWERDOWN
1687 */
1688 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
1689 dlmx_test_powerdown.s.test_powerdown = 0;
1690 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
1691
1692 /* 6. Clear GSER0_DLM(1..2)_PHY_RESET. Configurations that use only
1693 * need DLM1 need not clear GSER0_DLM2_PHY_RESET
1694 */
1695 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
1696 dlmx_phy_reset.s.phy_reset = 0;
1697 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
1698
1699 /* 6. Decrease MPLL_MULTIPLIER by one continually until it reaches
1700 * the desired long-term setting, ensuring that each MPLL_MULTIPLIER
1701 * value is constant for at least 1 msec before changing to the next
1702 * value. The desired long-term setting is as indicated in HRM tables
1703 * 21-1, 21-2, and 21-3. This is not required with the HRM
1704 * sequence.
1705 */
1706 /* This is set when initializing PCIe after soft reset is asserted. */
1707
1708 /* 7. Write the GSER0_PCIE_PIPE_RST register to take the appropriate
1709 * PIPE out of reset. There is a PIPEn_RST bit for each PIPE. Clear
1710 * the appropriate bits based on the configuration (reset is
1711 * active high).
1712 */
1713 if (qlm == 1) {
1714 cvmx_pemx_cfg_t pemx_cfg;
1715 cvmx_pemx_on_t pemx_on;
1716 cvmx_gserx_pcie_pipe_rst_t pipe_rst;
1717 cvmx_rst_ctlx_t rst_ctl;
1718
1719 switch (mode) {
1720 case CVMX_QLM_MODE_PCIE: /* PEM0 on DLM1 & DLM2 */
1721 case CVMX_QLM_MODE_PCIE_1X2: /* PEM0 on DLM1 */
1722 case CVMX_QLM_MODE_PCIE_1X1: /* PEM0 on DLM1 using lane 0 */
1723 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
1724 pemx_cfg.cn70xx.hostmd = rc;
1725 if (mode == CVMX_QLM_MODE_PCIE_1X1) {
1726 pemx_cfg.cn70xx.md =
1727 gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1728 } else if (mode == CVMX_QLM_MODE_PCIE) {
1729 pemx_cfg.cn70xx.md =
1730 gen2 ? CVMX_PEM_MD_GEN2_4LANE : CVMX_PEM_MD_GEN1_4LANE;
1731 } else {
1732 pemx_cfg.cn70xx.md =
1733 gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
1734 }
1735 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
1736
1737 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
1738 rst_ctl.s.rst_drv = 1;
1739 csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
1740
1741 /* PEM0 is on DLM1&2 which is pipe0 */
1742 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1743 pipe_rst.s.pipe0_rst = 0;
1744 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1745
1746 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
1747 pemx_on.s.pemon = 1;
1748 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
1749 break;
1750 case CVMX_QLM_MODE_PCIE_2X1: /* PEM0 and PEM1 on DLM1 */
1751 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
1752 pemx_cfg.cn70xx.hostmd = rc;
1753 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1754 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
1755
1756 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
1757 rst_ctl.s.rst_drv = 1;
1758 csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
1759
1760 /* PEM0 is on DLM1 which is pipe0 */
1761 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1762 pipe_rst.s.pipe0_rst = 0;
1763 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1764
1765 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
1766 pemx_on.s.pemon = 1;
1767 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
1768
1769 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1770 pemx_cfg.cn70xx.hostmd = 1;
1771 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1772 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1773 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1774 rst_ctl.s.rst_drv = 1;
1775 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1776 /* PEM1 is on DLM2 which is pipe1 */
1777 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1778 pipe_rst.s.pipe1_rst = 0;
1779 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1780 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1781 pemx_on.s.pemon = 1;
1782 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1783 break;
1784 default:
1785 break;
1786 }
1787 } else {
1788 cvmx_pemx_cfg_t pemx_cfg;
1789 cvmx_pemx_on_t pemx_on;
1790 cvmx_gserx_pcie_pipe_rst_t pipe_rst;
1791 cvmx_rst_ctlx_t rst_ctl;
1792
1793 switch (mode) {
1794 case CVMX_QLM_MODE_PCIE_1X2: /* PEM1 on DLM2 */
1795 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1796 pemx_cfg.cn70xx.hostmd = 1;
1797 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
1798 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1799
1800 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1801 rst_ctl.s.rst_drv = 1;
1802 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1803
1804 /* PEM1 is on DLM1 lane 0, which is pipe1 */
1805 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1806 pipe_rst.s.pipe1_rst = 0;
1807 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1808
1809 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1810 pemx_on.s.pemon = 1;
1811 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1812 break;
1813 case CVMX_QLM_MODE_PCIE_2X1: /* PEM1 and PEM2 on DLM2 */
1814 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1815 pemx_cfg.cn70xx.hostmd = 1;
1816 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1817 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1818
1819 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1820 rst_ctl.s.rst_drv = 1;
1821 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1822
1823 /* PEM1 is on DLM2 lane 0, which is pipe2 */
1824 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1825 pipe_rst.s.pipe2_rst = 0;
1826 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1827
1828 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1829 pemx_on.s.pemon = 1;
1830 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1831
1832 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
1833 pemx_cfg.cn70xx.hostmd = 1;
1834 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1835 csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
1836
1837 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(2));
1838 rst_ctl.s.rst_drv = 1;
1839 csr_wr(CVMX_RST_CTLX(2), rst_ctl.u64);
1840
1841 /* PEM2 is on DLM2 lane 1, which is pipe3 */
1842 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1843 pipe_rst.s.pipe3_rst = 0;
1844 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1845
1846 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
1847 pemx_on.s.pemon = 1;
1848 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
1849 break;
1850 default:
1851 break;
1852 }
1853 }
1854 return 0;
1855}
1856
1857/**
1858 * Configure dlm speed and mode for cn70xx.
1859 *
1860 * @param qlm The DLM to configure
1861 * @param speed The speed the DLM needs to be configured in Mhz.
1862 * @param mode The DLM to be configured as SGMII/XAUI/PCIe.
1863 * DLM 0: has 2 interfaces which can be configured as
1864 * SGMII/QSGMII/RXAUI. Need to configure both at the
1865 * same time. These are valid option
1866 * CVMX_QLM_MODE_QSGMII,
1867 * CVMX_QLM_MODE_SGMII_SGMII,
1868 * CVMX_QLM_MODE_SGMII_DISABLED,
1869 * CVMX_QLM_MODE_DISABLED_SGMII,
1870 * CVMX_QLM_MODE_SGMII_QSGMII,
1871 * CVMX_QLM_MODE_QSGMII_QSGMII,
1872 * CVMX_QLM_MODE_QSGMII_DISABLED,
1873 * CVMX_QLM_MODE_DISABLED_QSGMII,
1874 * CVMX_QLM_MODE_QSGMII_SGMII,
1875 * CVMX_QLM_MODE_RXAUI_1X2
1876 *
1877 * DLM 1: PEM0/1 in PCIE_1x4/PCIE_2x1/PCIE_1X1
1878 * DLM 2: PEM0/1/2 in PCIE_1x4/PCIE_1x2/PCIE_2x1/PCIE_1x1
1879 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
1880 * @param gen2 Only used for PCIe, gen2 = 1, in GEN2 mode else in GEN1 mode.
1881 *
1882 * @param ref_clk_input The reference-clock input to use to configure QLM
1883 * @param ref_clk_sel The reference-clock selection to use to configure QLM
1884 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01001885 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +01001886 */
1887static int octeon_configure_qlm_cn70xx(int qlm, int speed, int mode, int rc, int gen2,
1888 int ref_clk_sel, int ref_clk_input)
1889{
1890 debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, gen2, ref_clk_sel,
1891 ref_clk_input);
1892 switch (qlm) {
1893 case 0: {
1894 int is_sff7000_rxaui = 0;
1895 cvmx_gmxx_inf_mode_t inf_mode0, inf_mode1;
1896
1897 inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
1898 inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
1899 if (inf_mode0.s.en || inf_mode1.s.en) {
1900 debug("DLM0 already configured\n");
1901 return -1;
1902 }
1903
1904 switch (mode) {
1905 case CVMX_QLM_MODE_SGMII_SGMII:
1906 debug(" Mode SGMII SGMII\n");
1907 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1908 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1909 break;
1910 case CVMX_QLM_MODE_SGMII_QSGMII:
1911 debug(" Mode SGMII QSGMII\n");
1912 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1913 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1914 break;
1915 case CVMX_QLM_MODE_SGMII_DISABLED:
1916 debug(" Mode SGMII Disabled\n");
1917 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1918 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1919 break;
1920 case CVMX_QLM_MODE_DISABLED_SGMII:
1921 debug("Mode Disabled SGMII\n");
1922 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1923 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1924 break;
1925 case CVMX_QLM_MODE_QSGMII_SGMII:
1926 debug(" Mode QSGMII SGMII\n");
1927 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1928 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1929 break;
1930 case CVMX_QLM_MODE_QSGMII_QSGMII:
1931 debug(" Mode QSGMII QSGMII\n");
1932 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1933 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1934 break;
1935 case CVMX_QLM_MODE_QSGMII_DISABLED:
1936 debug(" Mode QSGMII Disabled\n");
1937 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1938 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1939 break;
1940 case CVMX_QLM_MODE_DISABLED_QSGMII:
1941 debug("Mode Disabled QSGMII\n");
1942 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1943 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1944 break;
1945 case CVMX_QLM_MODE_RXAUI:
1946 debug(" Mode RXAUI\n");
1947 inf_mode0.s.mode = CVMX_GMX_INF_MODE_RXAUI;
1948 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1949
1950 break;
1951 default:
1952 debug(" Mode Disabled Disabled\n");
1953 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1954 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1955 break;
1956 }
1957 csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
1958 csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
1959
1960 /* Bringup the PLL */
1961 if (__dlm_setup_pll_cn70xx(qlm, speed, ref_clk_sel, ref_clk_input,
1962 is_sff7000_rxaui))
1963 return -1;
1964
1965 /* TX Lanes */
1966 if (__dlm0_setup_tx_cn70xx(speed, ref_clk_sel))
1967 return -1;
1968
1969 /* RX Lanes */
1970 if (__dlm0_setup_rx_cn70xx(speed, ref_clk_sel))
1971 return -1;
1972
1973 /* Enable the interface */
1974 inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
1975 if (inf_mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED)
1976 inf_mode0.s.en = 1;
1977 csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
1978 inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
1979 if (inf_mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED)
1980 inf_mode1.s.en = 1;
1981 csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
1982 break;
1983 }
1984 case 1:
1985 switch (mode) {
1986 case CVMX_QLM_MODE_PCIE: /* PEM0 on DLM1 & DLM2 */
1987 debug(" Mode PCIe\n");
1988 if (__dlmx_setup_pcie_cn70xx(1, mode, gen2, rc, ref_clk_sel, ref_clk_input))
1989 return -1;
1990 if (__dlmx_setup_pcie_cn70xx(2, mode, gen2, rc, ref_clk_sel, ref_clk_input))
1991 return -1;
1992 break;
1993 case CVMX_QLM_MODE_PCIE_1X2: /* PEM0 on DLM1 */
1994 case CVMX_QLM_MODE_PCIE_2X1: /* PEM0 & PEM1 on DLM1 */
1995 case CVMX_QLM_MODE_PCIE_1X1: /* PEM0 on DLM1, only 1 lane */
1996 debug(" Mode PCIe 1x2, 2x1 or 1x1\n");
1997 if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
1998 ref_clk_input))
1999 return -1;
2000 break;
2001 case CVMX_QLM_MODE_DISABLED:
2002 debug(" Mode disabled\n");
2003 break;
2004 default:
2005 debug("DLM1 illegal mode specified\n");
2006 return -1;
2007 }
2008 break;
2009 case 2:
2010 switch (mode) {
2011 case CVMX_QLM_MODE_SATA_2X1:
2012 debug("%s: qlm 2, mode is SATA 2x1\n", __func__);
2013 /* DLM2 is SATA, PCIE2 is disabled */
2014 if (__setup_sata(qlm, speed, ref_clk_sel, ref_clk_input))
2015 return -1;
2016 break;
2017 case CVMX_QLM_MODE_PCIE:
2018 debug(" Mode PCIe\n");
2019 /* DLM2 is PCIE0, PCIE1-2 are disabled. */
2020 /* Do nothing, its initialized in DLM1 */
2021 break;
2022 case CVMX_QLM_MODE_PCIE_1X2: /* PEM1 on DLM2 */
2023 case CVMX_QLM_MODE_PCIE_2X1: /* PEM1 & PEM2 on DLM2 */
2024 debug(" Mode PCIe 1x2 or 2x1\n");
2025 if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
2026 ref_clk_input))
2027 return -1;
2028 break;
2029 case CVMX_QLM_MODE_DISABLED:
2030 debug(" Mode Disabled\n");
2031 break;
2032 default:
2033 debug("DLM2 illegal mode specified\n");
2034 return -1;
2035 }
2036 default:
2037 return -1;
2038 }
2039
2040 return 0;
2041}
2042
2043/**
2044 * Disables DFE for the specified QLM lane(s).
2045 * This function should only be called for low-loss channels.
2046 *
2047 * @param node Node to configure
2048 * @param qlm QLM to configure
2049 * @param lane Lane to configure, or -1 all lanes
2050 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
2051 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
2052 */
2053void octeon_qlm_dfe_disable(int node, int qlm, int lane, int baud_mhz, int mode)
2054{
2055 int num_lanes = cvmx_qlm_get_lanes(qlm);
2056 int l;
2057 cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
2058 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
2059 cvmx_gserx_lanex_rx_valbbd_ctrl_1_t ctrl_1;
2060 cvmx_gserx_lanex_rx_valbbd_ctrl_2_t ctrl_2;
2061 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
2062
2063 /* Interfaces below 5Gbaud are already manually tuned. */
2064 if (baud_mhz < 5000)
2065 return;
2066
2067 /* Don't run on PCIe links, SATA or KR. These interfaces use training */
2068 switch (mode) {
2069 case CVMX_QLM_MODE_10G_KR_1X2:
2070 case CVMX_QLM_MODE_10G_KR:
2071 case CVMX_QLM_MODE_40G_KR4:
2072 return;
2073 case CVMX_QLM_MODE_PCIE_1X1:
2074 case CVMX_QLM_MODE_PCIE_2X1:
2075 case CVMX_QLM_MODE_PCIE_1X2:
2076 case CVMX_QLM_MODE_PCIE:
2077 case CVMX_QLM_MODE_PCIE_1X8:
2078 return;
2079 case CVMX_QLM_MODE_SATA_2X1:
2080 return;
2081 default:
2082 break;
2083 }
2084
2085 /* Updating pre_ctle minimum to 0. This works best for short channels */
2086 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
2087 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 0;
2088 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
2089
2090 for (l = 0; l < num_lanes; l++) {
2091 if (lane != -1 && lane != l)
2092 continue;
2093
2094 /* 1. Write GSERX_LANEx_RX_LOOP_CTRL = 0x0270
2095 * (var "loop_ctrl" with bits 8 & 1 cleared).
2096 * bit<1> dfe_en_byp = 1'b0
2097 */
2098 loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
2099 loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl & 0x3fd;
2100 csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
2101
2102 /* 2. Write GSERX_LANEx_RX_VALBBD_CTRL_1 = 0x0000
2103 * (var "ctrl1" with all bits cleared)
2104 * bits<14:11> CFG_RX_DFE_C3_MVAL = 4'b0000
2105 * bit<10> CFG_RX_DFE_C3_MSGN = 1'b0
2106 * bits<9:6> CFG_RX_DFE_C2_MVAL = 4'b0000
2107 * bit<5> CFG_RX_DFE_C2_MSGN = 1'b0
2108 * bits<4:0> CFG_RX_DFE_C1_MVAL = 5'b00000
2109 */
2110 ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm));
2111 ctrl_1.s.dfe_c3_mval = 0;
2112 ctrl_1.s.dfe_c3_msgn = 0;
2113 ctrl_1.s.dfe_c2_mval = 0;
2114 ctrl_1.s.dfe_c2_msgn = 0;
2115 ctrl_1.s.dfe_c2_mval = 0;
2116 ctrl_1.s.dfe_c1_mval = 0;
2117 ctrl_1.s.dfe_c1_msgn = 0;
2118 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm), ctrl_1.u64);
2119
2120 /* 3. Write GSERX_LANEx_RX_VALBBD_CTRL_0 = 0x2400
2121 * (var "ctrl0" with following bits set/cleared)
2122 * bits<11:10> CFG_RX_DFE_GAIN = 0x1
2123 * bits<9:6> CFG_RX_DFE_C5_MVAL = 4'b0000
2124 * bit<5> CFG_RX_DFE_C5_MSGN = 1'b0
2125 * bits<4:1> CFG_RX_DFE_C4_MVAL = 4'b0000
2126 * bit<0> CFG_RX_DFE_C4_MSGN = 1'b0
2127 */
2128 ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
2129 ctrl_0.s.dfe_gain = 0x1;
2130 ctrl_0.s.dfe_c5_mval = 0;
2131 ctrl_0.s.dfe_c5_msgn = 0;
2132 ctrl_0.s.dfe_c4_mval = 0;
2133 ctrl_0.s.dfe_c4_msgn = 0;
2134 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
2135
2136 /* 4. Write GSER(0..13)_LANE(0..3)_RX_VALBBD_CTRL_2 = 0x003F
2137 * //enable DFE tap overrides
2138 * bit<5> dfe_ovrd_en = 1
2139 * bit<4> dfe_c5_ovrd_val = 1
2140 * bit<3> dfe_c4_ovrd_val = 1
2141 * bit<2> dfe_c3_ovrd_val = 1
2142 * bit<1> dfe_c2_ovrd_val = 1
2143 * bit<0> dfe_c1_ovrd_val = 1
2144 */
2145 ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm));
2146 ctrl_2.s.dfe_ovrd_en = 0x1;
2147 ctrl_2.s.dfe_c5_ovrd_val = 0x1;
2148 ctrl_2.s.dfe_c4_ovrd_val = 0x1;
2149 ctrl_2.s.dfe_c3_ovrd_val = 0x1;
2150 ctrl_2.s.dfe_c2_ovrd_val = 0x1;
2151 ctrl_2.s.dfe_c1_ovrd_val = 0x1;
2152 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm), ctrl_2.u64);
2153 }
2154}
2155
2156/**
2157 * Disables DFE, uses fixed CTLE Peak value and AGC settings
2158 * for the specified QLM lane(s).
2159 * This function should only be called for low-loss channels.
2160 * This function prevents Rx equalization from happening on all lanes in a QLM
2161 * This function should be called for all lanes being used in the QLM.
2162 *
2163 * @param node Node to configure
2164 * @param qlm QLM to configure
2165 * @param lane Lane to configure, or -1 all lanes
2166 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
2167 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
2168 * @param ctle_zero Equalizer Peaking control
2169 * @param agc_pre_ctle Pre-CTLE gain
2170 * @param agc_post_ctle Post-CTLE gain
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01002171 * Return: Zero on success, negative on failure
Aaron Williams5aeac5c2020-12-11 17:06:06 +01002172 */
2173
2174int octeon_qlm_dfe_disable_ctle_agc(int node, int qlm, int lane, int baud_mhz, int mode,
2175 int ctle_zero, int agc_pre_ctle, int agc_post_ctle)
2176{
2177 int num_lanes = cvmx_qlm_get_lanes(qlm);
2178 int l;
2179 cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
2180 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
2181 cvmx_gserx_lanex_pwr_ctrl_t lanex_pwr_ctrl;
2182 cvmx_gserx_lane_mode_t lmode;
2183 cvmx_gserx_lane_px_mode_1_t px_mode_1;
2184 cvmx_gserx_lanex_rx_cfg_5_t rx_cfg_5;
2185 cvmx_gserx_lanex_rx_cfg_2_t rx_cfg_2;
2186 cvmx_gserx_lanex_rx_ctle_ctrl_t ctle_ctrl;
2187
2188 /* Check tuning constraints */
2189 if (ctle_zero < 0 || ctle_zero > 15) {
2190 printf("Error: N%d.QLM%d: Invalid CTLE_ZERO(%d). Must be between -1 and 15.\n",
2191 node, qlm, ctle_zero);
2192 return -1;
2193 }
2194 if (agc_pre_ctle < 0 || agc_pre_ctle > 15) {
2195 printf("Error: N%d.QLM%d: Invalid AGC_Pre_CTLE(%d)\n",
2196 node, qlm, agc_pre_ctle);
2197 return -1;
2198 }
2199
2200 if (agc_post_ctle < 0 || agc_post_ctle > 15) {
2201 printf("Error: N%d.QLM%d: Invalid AGC_Post_CTLE(%d)\n",
2202 node, qlm, agc_post_ctle);
2203 return -1;
2204 }
2205
2206 /* Interfaces below 5Gbaud are already manually tuned. */
2207 if (baud_mhz < 5000)
2208 return 0;
2209
2210 /* Don't run on PCIe links, SATA or KR. These interfaces use training */
2211 switch (mode) {
2212 case CVMX_QLM_MODE_10G_KR_1X2:
2213 case CVMX_QLM_MODE_10G_KR:
2214 case CVMX_QLM_MODE_40G_KR4:
2215 return 0;
2216 case CVMX_QLM_MODE_PCIE_1X1:
2217 case CVMX_QLM_MODE_PCIE_2X1:
2218 case CVMX_QLM_MODE_PCIE_1X2:
2219 case CVMX_QLM_MODE_PCIE:
2220 case CVMX_QLM_MODE_PCIE_1X8:
2221 return 0;
2222 case CVMX_QLM_MODE_SATA_2X1:
2223 return 0;
2224 default:
2225 break;
2226 }
2227
2228 lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
2229
2230 /* 1. Enable VMA manual mode for the QLM's lane mode */
2231 px_mode_1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm));
2232 px_mode_1.s.vma_mm = 1;
2233 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), px_mode_1.u64);
2234
2235 /* 2. Disable DFE */
2236 octeon_qlm_dfe_disable(node, qlm, lane, baud_mhz, mode);
2237
2238 for (l = 0; l < num_lanes; l++) {
2239 if (lane != -1 && lane != l)
2240 continue;
2241
2242 /* 3. Write GSERX_LANEx_RX_VALBBD_CTRL_0.CFG_RX_AGC_GAIN = 0x2 */
2243 ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
2244 ctrl_0.s.agc_gain = 0x2;
2245 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
2246
2247 /* 4. Write GSERX_LANEx_RX_LOOP_CTRL
2248 * bit<8> lctrl_men = 1'b1
2249 * bit<0> cdr_en_byp = 1'b1
2250 */
2251 loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
2252 loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl | 0x101;
2253 csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
2254
2255 /* 5. Write GSERX_LANEx_PWR_CTRL = 0x0040 (var "lanex_pwr_ctrl" with
2256 * following bits set)
2257 * bit<6> RX_LCTRL_OVRRD_EN = 1'b1
2258 * all other bits cleared.
2259 */
2260 lanex_pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm));
2261 lanex_pwr_ctrl.s.rx_lctrl_ovrrd_en = 1;
2262 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm), lanex_pwr_ctrl.u64);
2263
2264 /* --Setting AGC in manual mode and configuring CTLE-- */
2265 rx_cfg_5.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm));
2266 rx_cfg_5.s.rx_agc_men_ovrrd_val = 1;
2267 rx_cfg_5.s.rx_agc_men_ovrrd_en = 1;
2268 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm), rx_cfg_5.u64);
2269
2270 ctle_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm));
2271 ctle_ctrl.s.pcs_sds_rx_ctle_zero = ctle_zero;
2272 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm), ctle_ctrl.u64);
2273
2274 rx_cfg_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm));
2275 rx_cfg_2.s.rx_sds_rx_agc_mval = (agc_pre_ctle << 4) | agc_post_ctle;
2276 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm), rx_cfg_2.u64);
2277 }
2278 return 0;
2279}
2280
2281/**
2282 * Some QLM speeds need to override the default tuning parameters
2283 *
2284 * @param node Node to configure
2285 * @param qlm QLM to configure
2286 * @param baud_mhz Desired speed in MHz
2287 * @param lane Lane the apply the tuning parameters
2288 * @param tx_swing Voltage swing. The higher the value the lower the voltage,
2289 * the default value is 7.
2290 * @param tx_pre pre-cursor pre-emphasis
2291 * @param tx_post post-cursor pre-emphasis.
2292 * @param tx_gain Transmit gain. Range 0-7
2293 * @param tx_vboost Transmit voltage boost. Range 0-1
2294 */
2295void octeon_qlm_tune_per_lane_v3(int node, int qlm, int baud_mhz, int lane, int tx_swing,
2296 int tx_pre, int tx_post, int tx_gain, int tx_vboost)
2297{
2298 cvmx_gserx_cfg_t gserx_cfg;
2299 cvmx_gserx_lanex_tx_cfg_0_t tx_cfg0;
2300 cvmx_gserx_lanex_tx_pre_emphasis_t pre_emphasis;
2301 cvmx_gserx_lanex_tx_cfg_1_t tx_cfg1;
2302 cvmx_gserx_lanex_tx_cfg_3_t tx_cfg3;
2303 cvmx_bgxx_spux_br_pmd_control_t pmd_control;
2304 cvmx_gserx_lanex_pcs_ctlifc_0_t pcs_ctlifc_0;
2305 cvmx_gserx_lanex_pcs_ctlifc_2_t pcs_ctlifc_2;
2306 int bgx, lmac;
2307
2308 /* Do not apply QLM tuning to PCIe and KR interfaces. */
2309 gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
2310 if (gserx_cfg.s.pcie)
2311 return;
2312
2313 /* Apply the QLM tuning only to cn73xx and cn78xx models only */
2314 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
2315 bgx = (qlm < 2) ? qlm : (qlm - 2);
2316 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
2317 bgx = (qlm < 4) ? (qlm - 2) : 2;
2318 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
2319 bgx = 0;
2320 else
2321 return;
2322
2323 if ((OCTEON_IS_MODEL(OCTEON_CN73XX) && qlm == 6) ||
2324 (OCTEON_IS_MODEL(OCTEON_CNF75XX) && qlm == 5))
2325 lmac = 2;
2326 else
2327 lmac = lane;
2328
2329 /* No need to tune 10G-KR and 40G-KR interfaces */
2330 pmd_control.u64 = csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(lmac, bgx));
2331 if (pmd_control.s.train_en)
2332 return;
2333
2334 if (tx_pre != -1 && tx_post == -1)
2335 tx_post = 0;
2336
2337 if (tx_post != -1 && tx_pre == -1)
2338 tx_pre = 0;
2339
2340 /* Check tuning constraints */
2341 if (tx_swing < -1 || tx_swing > 25) {
2342 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_SWING(%d). TX_SWING must be <= 25.\n",
2343 node, qlm, lane, tx_swing);
2344 return;
2345 }
2346
2347 if (tx_pre < -1 || tx_pre > 10) {
2348 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_PRE(%d). TX_PRE must be <= 10.\n",
2349 node, qlm, lane, tx_swing);
2350 return;
2351 }
2352
2353 if (tx_post < -1 || tx_post > 31) {
2354 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_POST(%d). TX_POST must be <= 15.\n",
2355 node, qlm, lane, tx_swing);
2356 return;
2357 }
2358
2359 if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
2360 tx_pre + tx_post - tx_swing > 2) {
2361 printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) - TX_SWING(%d) must be <= 2\n",
2362 node, qlm, lane, tx_pre, tx_post, tx_swing);
2363 return;
2364 }
2365
2366 if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
2367 tx_pre + tx_post + tx_swing > 35) {
2368 printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) + TX_SWING(%d) must be <= 35\n",
2369 node, qlm, lane, tx_pre, tx_post, tx_swing);
2370 return;
2371 }
2372
2373 if (tx_gain < -1 || tx_gain > 7) {
2374 printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_GAIN(%d). TX_GAIN must be between 0 and 7\n",
2375 node, qlm, lane, tx_gain);
2376 return;
2377 }
2378
2379 if (tx_vboost < -1 || tx_vboost > 1) {
2380 printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_VBOOST(%d). TX_VBOOST must be 0 or 1.\n",
2381 node, qlm, lane, tx_vboost);
2382 return;
2383 }
2384
2385 debug("N%d.QLM%d: Lane %d: TX_SWING=%d, TX_PRE=%d, TX_POST=%d, TX_GAIN=%d, TX_VBOOST=%d\n",
2386 node, qlm, lane, tx_swing, tx_pre, tx_post, tx_gain, tx_vboost);
2387
2388 /* Complete the Tx swing and Tx equilization programming */
2389 /* 1) Enable Tx swing and Tx emphasis overrides */
2390 tx_cfg1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm));
2391 tx_cfg1.s.tx_swing_ovrrd_en = (tx_swing != -1);
2392 tx_cfg1.s.tx_premptap_ovrrd_val = (tx_pre != -1) && (tx_post != -1);
2393 tx_cfg1.s.tx_vboost_en_ovrrd_en = (tx_vboost != -1); /* Vboost override */
2394 ;
2395 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm), tx_cfg1.u64);
2396 /* 2) Program the Tx swing and Tx emphasis Pre-cursor and Post-cursor values */
2397 /* CFG_TX_PREMPTAP[8:4] = Lane X's TX post-cursor value (C+1) */
2398 /* CFG_TX_PREMPTAP[3:0] = Lane X's TX pre-cursor value (C-1) */
2399 if (tx_swing != -1) {
2400 tx_cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm));
2401 tx_cfg0.s.cfg_tx_swing = tx_swing;
2402 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm), tx_cfg0.u64);
2403 }
2404
2405 if ((tx_pre != -1) && (tx_post != -1)) {
2406 pre_emphasis.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm));
2407 pre_emphasis.s.cfg_tx_premptap = (tx_post << 4) | tx_pre;
2408 csr_wr_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm), pre_emphasis.u64);
2409 }
2410
2411 /* Apply TX gain settings */
2412 if (tx_gain != -1) {
2413 tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
2414 tx_cfg3.s.pcs_sds_tx_gain = tx_gain;
2415 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
2416 }
2417
2418 /* Apply TX vboot settings */
2419 if (tx_vboost != -1) {
2420 tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
2421 tx_cfg3.s.cfg_tx_vboost_en = tx_vboost;
2422 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
2423 }
2424
2425 /* 3) Program override for the Tx coefficient request */
2426 pcs_ctlifc_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm));
2427 if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
2428 pcs_ctlifc_0.s.cfg_tx_coeff_req_ovrrd_val = 0x1;
2429 if (tx_vboost != -1)
2430 pcs_ctlifc_0.s.cfg_tx_vboost_en_ovrrd_val = 1;
2431 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm), pcs_ctlifc_0.u64);
2432
2433 /* 4) Enable the Tx coefficient request override enable */
2434 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2435 if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
2436 pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0x1;
2437 if (tx_vboost != -1)
2438 pcs_ctlifc_2.s.cfg_tx_vboost_en_ovrrd_en = 1;
2439 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2440
2441 /* 5) Issue a Control Interface Configuration Override request to start the Tx equalizer */
2442 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2443 pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
2444 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2445
2446 /* 6) Wait 1 ms for the request to complete */
2447 udelay(1000);
2448
2449 /* Steps 7 & 8 required for subsequent Tx swing and Tx equilization adjustment */
2450 /* 7) Disable the Tx coefficient request override enable */
2451 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2452 pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0;
2453 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2454 /* 8) Issue a Control Interface Configuration Override request */
2455 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2456 pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
2457 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2458}
2459
2460/**
2461 * Some QLM speeds need to override the default tuning parameters
2462 *
2463 * @param node Node to configure
2464 * @param qlm QLM to configure
2465 * @param baud_mhz Desired speed in MHz
2466 * @param tx_swing Voltage swing. The higher the value the lower the voltage,
2467 * the default value is 7.
2468 * @param tx_premptap bits [0:3] pre-cursor pre-emphasis, bits[4:8] post-cursor
2469 * pre-emphasis.
2470 * @param tx_gain Transmit gain. Range 0-7
2471 * @param tx_vboost Transmit voltage boost. Range 0-1
2472 *
2473 */
2474void octeon_qlm_tune_v3(int node, int qlm, int baud_mhz, int tx_swing, int tx_premptap, int tx_gain,
2475 int tx_vboost)
2476{
2477 int lane;
2478 int num_lanes = cvmx_qlm_get_lanes(qlm);
2479
2480 for (lane = 0; lane < num_lanes; lane++) {
2481 int tx_pre = (tx_premptap == -1) ? -1 : tx_premptap & 0xf;
2482 int tx_post = (tx_premptap == -1) ? -1 : (tx_premptap >> 4) & 0x1f;
2483
2484 octeon_qlm_tune_per_lane_v3(node, qlm, baud_mhz, lane, tx_swing, tx_pre, tx_post,
2485 tx_gain, tx_vboost);
2486 }
2487}
2488
2489/**
2490 * Some QLMs need to override the default pre-ctle for low loss channels.
2491 *
2492 * @param node Node to configure
2493 * @param qlm QLM to configure
2494 * @param pre_ctle pre-ctle settings for low loss channels
2495 */
2496void octeon_qlm_set_channel_v3(int node, int qlm, int pre_ctle)
2497{
2498 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
2499
2500 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
2501 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = pre_ctle;
2502 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
2503}
2504
2505static void __qlm_init_errata_20844(int node, int qlm)
2506{
2507 int lane;
2508
2509 /* Only applies to CN78XX pass 1.x */
2510 if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
2511 return;
2512
2513 /* Errata GSER-20844: Electrical Idle logic can coast
2514 * 1) After the link first comes up write the following
2515 * register on each lane to prevent the application logic
2516 * from stomping on the Coast inputs. This is a one time write,
2517 * or if you prefer you could put it in the link up loop and
2518 * write it every time the link comes up.
2519 * 1a) Then write GSER(0..13)_LANE(0..3)_PCS_CTLIFC_2
2520 * Set CTLIFC_OVRRD_REQ (later)
2521 * Set CFG_RX_CDR_COAST_REQ_OVRRD_EN
2522 * Its not clear if #1 and #1a can be combined, lets try it
2523 * this way first.
2524 */
2525 for (lane = 0; lane < 4; lane++) {
2526 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
2527 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc_2;
2528
2529 ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2530 ctlifc_2.s.cfg_rx_cdr_coast_req_ovrrd_en = 1;
2531 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
2532
2533 misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
2534 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
2535 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 0;
2536 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
2537
2538 udelay(1);
2539
2540 misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
2541 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
2542 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 1;
2543 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
2544 ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2545 ctlifc_2.s.ctlifc_ovrrd_req = 1;
2546 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
2547 }
2548}
2549
2550/** CN78xx reference clock register settings */
2551struct refclk_settings_cn78xx {
2552 bool valid; /** Reference clock speed supported */
2553 union cvmx_gserx_pll_px_mode_0 mode_0;
2554 union cvmx_gserx_pll_px_mode_1 mode_1;
2555 union cvmx_gserx_lane_px_mode_0 pmode_0;
2556 union cvmx_gserx_lane_px_mode_1 pmode_1;
2557};
2558
2559/** Default reference clock for various modes */
2560static const u8 def_ref_clk_cn78xx[R_NUM_LANE_MODES] = { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 };
2561
2562/**
2563 * This data structure stores the reference clock for each mode for each QLM.
2564 *
2565 * It is indexed first by the node number, then the QLM number and then the
2566 * lane mode. It is initialized to the default values.
2567 */
2568static u8 ref_clk_cn78xx[CVMX_MAX_NODES][8][R_NUM_LANE_MODES] = {
2569 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2570 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2571 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2572 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2573 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2574 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2575 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2576 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2577 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2578 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2579 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2580 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2581 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2582 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2583 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2584 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2585 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2586 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2587 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2588 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2589 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2590 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2591 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2592 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2593 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2594 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2595 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2596 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2597 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2598 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2599 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2600 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } }
2601};
2602
2603/**
2604 * This data structure contains the register values for the cn78xx PLLs
2605 * It is indexed first by the reference clock and second by the mode.
2606 * Note that not all combinations are supported.
2607 */
2608static const struct refclk_settings_cn78xx refclk_settings_cn78xx[R_NUM_LANE_MODES][4] = {
2609 { /* 0 R_2_5G_REFCLK100 */
2610 { /* 100MHz reference clock */
2611 .valid = true,
2612 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2613 .mode_1.s = { .pll_16p5en = 0x0,
2614 .pll_cpadj = 0x2,
2615 .pll_pcie3en = 0x0,
2616 .pll_opr = 0x0,
2617 .pll_div = 0x19 },
2618 .pmode_0.s = { .ctle = 0x0,
2619 .pcie = 0x1,
2620 .tx_ldiv = 0x1,
2621 .rx_ldiv = 0x1,
2622 .srate = 0x0,
2623 .tx_mode = 0x3,
2624 .rx_mode = 0x3 },
2625 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2626 .vma_mm = 0x1,
2627 .cdr_fgain = 0xa,
2628 .ph_acc_adj = 0x14 } },
2629 { /* 125MHz reference clock */
2630 .valid = true,
2631 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2632 .mode_1.s = { .pll_16p5en = 0x0,
2633 .pll_cpadj = 0x1,
2634 .pll_pcie3en = 0x0,
2635 .pll_opr = 0x0,
2636 .pll_div = 0x14 },
2637 .pmode_0.s = { .ctle = 0x0,
2638 .pcie = 0x1,
2639 .tx_ldiv = 0x1,
2640 .rx_ldiv = 0x1,
2641 .srate = 0x0,
2642 .tx_mode = 0x3,
2643 .rx_mode = 0x3 },
2644 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2645 .vma_mm = 0x1,
2646 .cdr_fgain = 0xa,
2647 .ph_acc_adj = 0x14 } },
2648 { /* 156.25MHz reference clock */
2649 .valid = true,
2650 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2651 .mode_1.s = { .pll_16p5en = 0x0,
2652 .pll_cpadj = 0x2,
2653 .pll_pcie3en = 0x0,
2654 .pll_opr = 0x0,
2655 .pll_div = 0x10 },
2656 .pmode_0.s = { .ctle = 0x0,
2657 .pcie = 0x1,
2658 .tx_ldiv = 0x1,
2659 .rx_ldiv = 0x1,
2660 .srate = 0x0,
2661 .tx_mode = 0x3,
2662 .rx_mode = 0x3 },
2663 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2664 .vma_mm = 0x1,
2665 .cdr_fgain = 0xa,
2666 .ph_acc_adj = 0x14 } },
2667 {
2668 /* 161.1328125MHz reference clock */
2669 .valid = false,
2670 } },
2671 {
2672 /* 1 R_5G_REFCLK100 */
2673 { /* 100MHz reference clock */
2674 .valid = true,
2675 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2676 .mode_1.s = { .pll_16p5en = 0x0,
2677 .pll_cpadj = 0x2,
2678 .pll_pcie3en = 0x0,
2679 .pll_opr = 0x0,
2680 .pll_div = 0x19 },
2681 .pmode_0.s = { .ctle = 0x0,
2682 .pcie = 0x1,
2683 .tx_ldiv = 0x0,
2684 .rx_ldiv = 0x0,
2685 .srate = 0x0,
2686 .tx_mode = 0x3,
2687 .rx_mode = 0x3 },
2688 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2689 .vma_mm = 0x0,
2690 .cdr_fgain = 0xa,
2691 .ph_acc_adj = 0x14 } },
2692 { /* 125MHz reference clock */
2693 .valid = true,
2694 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2695 .mode_1.s = { .pll_16p5en = 0x0,
2696 .pll_cpadj = 0x1,
2697 .pll_pcie3en = 0x0,
2698 .pll_opr = 0x0,
2699 .pll_div = 0x14 },
2700 .pmode_0.s = { .ctle = 0x0,
2701 .pcie = 0x1,
2702 .tx_ldiv = 0x0,
2703 .rx_ldiv = 0x0,
2704 .srate = 0x0,
2705 .tx_mode = 0x3,
2706 .rx_mode = 0x3 },
2707 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2708 .vma_mm = 0x0,
2709 .cdr_fgain = 0xa,
2710 .ph_acc_adj = 0x14 } },
2711 { /* 156.25MHz reference clock */
2712 .valid = true,
2713 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2714 .mode_1.s = { .pll_16p5en = 0x0,
2715 .pll_cpadj = 0x2,
2716 .pll_pcie3en = 0x0,
2717 .pll_opr = 0x0,
2718 .pll_div = 0x10 },
2719 .pmode_0.s = { .ctle = 0x0,
2720 .pcie = 0x1,
2721 .tx_ldiv = 0x0,
2722 .rx_ldiv = 0x0,
2723 .srate = 0x0,
2724 .tx_mode = 0x3,
2725 .rx_mode = 0x3 },
2726 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2727 .vma_mm = 0x0,
2728 .cdr_fgain = 0xa,
2729 .ph_acc_adj = 0x14 } },
2730 {
2731 /* 161.1328125MHz reference clock */
2732 .valid = false,
2733 },
2734 },
2735 { /* 2 R_8G_REFCLK100 */
2736 { /* 100MHz reference clock */
2737 .valid = true,
2738 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2739 .mode_1.s = { .pll_16p5en = 0x0,
2740 .pll_cpadj = 0x2,
2741 .pll_pcie3en = 0x1,
2742 .pll_opr = 0x1,
2743 .pll_div = 0x28 },
2744 .pmode_0.s = { .ctle = 0x3,
2745 .pcie = 0x0,
2746 .tx_ldiv = 0x0,
2747 .rx_ldiv = 0x0,
2748 .srate = 0x0,
2749 .tx_mode = 0x3,
2750 .rx_mode = 0x3 },
2751 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2752 .vma_mm = 0x0,
2753 .cdr_fgain = 0xb,
2754 .ph_acc_adj = 0x23 } },
2755 { /* 125MHz reference clock */
2756 .valid = true,
2757 .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2758 .mode_1.s = { .pll_16p5en = 0x0,
2759 .pll_cpadj = 0x1,
2760 .pll_pcie3en = 0x1,
2761 .pll_opr = 0x1,
2762 .pll_div = 0x20 },
2763 .pmode_0.s = { .ctle = 0x3,
2764 .pcie = 0x0,
2765 .tx_ldiv = 0x0,
2766 .rx_ldiv = 0x0,
2767 .srate = 0x0,
2768 .tx_mode = 0x3,
2769 .rx_mode = 0x3 },
2770 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2771 .vma_mm = 0x0,
2772 .cdr_fgain = 0xb,
2773 .ph_acc_adj = 0x23 } },
2774 { /* 156.25MHz reference clock not supported */
2775 .valid = false } },
2776 {
2777 /* 3 R_125G_REFCLK15625_KX */
2778 { /* 100MHz reference */
2779 .valid = true,
2780 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2781 .mode_1.s = { .pll_16p5en = 0x1,
2782 .pll_cpadj = 0x2,
2783 .pll_pcie3en = 0x0,
2784 .pll_opr = 0x0,
2785 .pll_div = 0x19 },
2786 .pmode_0.s = { .ctle = 0x0,
2787 .pcie = 0x0,
2788 .tx_ldiv = 0x2,
2789 .rx_ldiv = 0x2,
2790 .srate = 0x0,
2791 .tx_mode = 0x3,
2792 .rx_mode = 0x3 },
2793 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2794 .vma_mm = 0x1,
2795 .cdr_fgain = 0xc,
2796 .ph_acc_adj = 0x1e } },
2797 { /* 125MHz reference */
2798 .valid = true,
2799 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2800 .mode_1.s = { .pll_16p5en = 0x1,
2801 .pll_cpadj = 0x2,
2802 .pll_pcie3en = 0x0,
2803 .pll_opr = 0x0,
2804 .pll_div = 0x14 },
2805 .pmode_0.s = { .ctle = 0x0,
2806 .pcie = 0x0,
2807 .tx_ldiv = 0x2,
2808 .rx_ldiv = 0x2,
2809 .srate = 0x0,
2810 .tx_mode = 0x3,
2811 .rx_mode = 0x3 },
2812 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2813 .vma_mm = 0x1,
2814 .cdr_fgain = 0xc,
2815 .ph_acc_adj = 0x1e } },
2816 { /* 156.25MHz reference */
2817 .valid = true,
2818 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2819 .mode_1.s = { .pll_16p5en = 0x1,
2820 .pll_cpadj = 0x3,
2821 .pll_pcie3en = 0x0,
2822 .pll_opr = 0x0,
2823 .pll_div = 0x10 },
2824 .pmode_0.s = { .ctle = 0x0,
2825 .pcie = 0x0,
2826 .tx_ldiv = 0x2,
2827 .rx_ldiv = 0x2,
2828 .srate = 0x0,
2829 .tx_mode = 0x3,
2830 .rx_mode = 0x3 },
2831 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2832 .vma_mm = 0x1,
2833 .cdr_fgain = 0xc,
2834 .ph_acc_adj = 0x1e } },
2835 {
2836 /* 161.1328125MHz reference clock */
2837 .valid = false,
2838 },
2839 },
2840 { /* 4 R_3125G_REFCLK15625_XAUI */
2841 { /* 100MHz reference */
2842 .valid = false },
2843 { /* 125MHz reference */
2844 .valid = true,
2845 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
2846 .mode_1.s = { .pll_16p5en = 0x1,
2847 .pll_cpadj = 0x2,
2848 .pll_pcie3en = 0x0,
2849 .pll_opr = 0x0,
2850 .pll_div = 0x19 },
2851 .pmode_0.s = { .ctle = 0x0,
2852 .pcie = 0x0,
2853 .tx_ldiv = 0x1,
2854 .rx_ldiv = 0x1,
2855 .srate = 0x0,
2856 .tx_mode = 0x3,
2857 .rx_mode = 0x3 },
2858 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2859 .vma_mm = 0x1,
2860 .cdr_fgain = 0xc,
2861 .ph_acc_adj = 0x1e } },
2862 { /* 156.25MHz reference, default */
2863 .valid = true,
2864 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
2865 .mode_1.s = { .pll_16p5en = 0x1,
2866 .pll_cpadj = 0x2,
2867 .pll_pcie3en = 0x0,
2868 .pll_opr = 0x0,
2869 .pll_div = 0x14 },
2870 .pmode_0.s = { .ctle = 0x0,
2871 .pcie = 0x0,
2872 .tx_ldiv = 0x1,
2873 .rx_ldiv = 0x1,
2874 .srate = 0x0,
2875 .tx_mode = 0x3,
2876 .rx_mode = 0x3 },
2877 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2878 .vma_mm = 0x1,
2879 .cdr_fgain = 0xc,
2880 .ph_acc_adj = 0x1e } },
2881 {
2882 /* 161.1328125MHz reference clock */
2883 .valid = false,
2884 } },
2885 { /* 5 R_103125G_REFCLK15625_KR */
2886 { /* 100MHz reference */
2887 .valid = false },
2888 { /* 125MHz reference */
2889 .valid = false },
2890 { /* 156.25MHz reference */
2891 .valid = true,
2892 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2893 .mode_1.s = { .pll_16p5en = 0x1,
2894 .pll_cpadj = 0x2,
2895 .pll_pcie3en = 0x0,
2896 .pll_opr = 0x1,
2897 .pll_div = 0x21 },
2898 .pmode_0.s = { .ctle = 0x3,
2899 .pcie = 0x0,
2900 .tx_ldiv = 0x0,
2901 .rx_ldiv = 0x0,
2902 .srate = 0x0,
2903 .tx_mode = 0x3,
2904 .rx_mode = 0x3 },
2905 .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
2906 .vma_mm = 0x0,
2907 .cdr_fgain = 0xa,
2908 .ph_acc_adj = 0xf } },
2909 { /* 161.1328125 reference */
2910 .valid = true,
2911 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2912 .mode_1.s = { .pll_16p5en = 0x1,
2913 .pll_cpadj = 0x2,
2914 .pll_pcie3en = 0x0,
2915 .pll_opr = 0x1,
2916 .pll_div = 0x20 },
2917 .pmode_0.s = { .ctle = 0x3,
2918 .pcie = 0x0,
2919 .tx_ldiv = 0x0,
2920 .rx_ldiv = 0x0,
2921 .srate = 0x0,
2922 .tx_mode = 0x3,
2923 .rx_mode = 0x3 },
2924 .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
2925 .vma_mm = 0x0,
2926 .cdr_fgain = 0xa,
2927 .ph_acc_adj = 0xf } } },
2928 { /* 6 R_125G_REFCLK15625_SGMII */
2929 { /* 100MHz reference clock */
2930 .valid = 1,
2931 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2932 .mode_1.s = { .pll_16p5en = 0x1,
2933 .pll_cpadj = 0x2,
2934 .pll_pcie3en = 0x0,
2935 .pll_opr = 0x0,
2936 .pll_div = 0x19 },
2937 .pmode_0.s = { .ctle = 0x0,
2938 .pcie = 0x0,
2939 .tx_ldiv = 0x2,
2940 .rx_ldiv = 0x2,
2941 .srate = 0x0,
2942 .tx_mode = 0x3,
2943 .rx_mode = 0x3 },
2944 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2945 .vma_mm = 0x1,
2946 .cdr_fgain = 0xc,
2947 .ph_acc_adj = 0x1e } },
2948 { /* 125MHz reference clock */
2949 .valid = 1,
2950 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2951 .mode_1.s = { .pll_16p5en = 0x1,
2952 .pll_cpadj = 0x2,
2953 .pll_pcie3en = 0x0,
2954 .pll_opr = 0x0,
2955 .pll_div = 0x14 },
2956 .pmode_0.s = { .ctle = 0x0,
2957 .pcie = 0x0,
2958 .tx_ldiv = 0x2,
2959 .rx_ldiv = 0x2,
2960 .srate = 0x0,
2961 .tx_mode = 0x3,
2962 .rx_mode = 0x3 },
2963 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2964 .vma_mm = 0x0,
2965 .cdr_fgain = 0xc,
2966 .ph_acc_adj = 0x1e } },
2967 { /* 156.25MHz reference clock */
2968 .valid = 1,
2969 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2970 .mode_1.s = { .pll_16p5en = 0x1,
2971 .pll_cpadj = 0x3,
2972 .pll_pcie3en = 0x0,
2973 .pll_opr = 0x0,
2974 .pll_div = 0x10 },
2975 .pmode_0.s = { .ctle = 0x0,
2976 .pcie = 0x0,
2977 .tx_ldiv = 0x2,
2978 .rx_ldiv = 0x2,
2979 .srate = 0x0,
2980 .tx_mode = 0x3,
2981 .rx_mode = 0x3 },
2982 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2983 .vma_mm = 0x1,
2984 .cdr_fgain = 0xc,
2985 .ph_acc_adj = 0x1e } } },
2986 { /* 7 R_5G_REFCLK15625_QSGMII */
2987 { /* 100MHz reference */
2988 .valid = true,
2989 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2990 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
2991 .pll_div = 0x19 },
2992 .pmode_0.s = { .ctle = 0x0,
2993 .pcie = 0x0,
2994 .tx_ldiv = 0x0,
2995 .rx_ldiv = 0x0,
2996 .srate = 0x0,
2997 .tx_mode = 0x3,
2998 .rx_mode = 0x3 },
2999 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3000 .vma_mm = 0x1,
3001 .cdr_fgain = 0xc,
3002 .ph_acc_adj = 0x1e } },
3003 { /* 125MHz reference */
3004 .valid = true,
3005 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3006 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x1, .pll_pcie3en = 0x0,
3007 .pll_div = 0x14 },
3008 .pmode_0.s = { .ctle = 0x0,
3009 .pcie = 0x0,
3010 .tx_ldiv = 0x0,
3011 .rx_ldiv = 0x0,
3012 .srate = 0x0,
3013 .tx_mode = 0x3,
3014 .rx_mode = 0x3 },
3015 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3016 .vma_mm = 0x1,
3017 .cdr_fgain = 0xc,
3018 .ph_acc_adj = 0x1e } },
3019 { /* 156.25MHz reference */
3020 .valid = true,
3021 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3022 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
3023 .pll_div = 0x10 },
3024 .pmode_0.s = { .ctle = 0x0,
3025 .pcie = 0x0,
3026 .tx_ldiv = 0x0,
3027 .rx_ldiv = 0x0,
3028 .srate = 0x0,
3029 .tx_mode = 0x3,
3030 .rx_mode = 0x3 },
3031 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3032 .vma_mm = 0x1,
3033 .cdr_fgain = 0xc,
3034 .ph_acc_adj = 0x1e } },
3035 {
3036 /* 161.1328125MHz reference clock */
3037 .valid = false,
3038 } },
3039 { /* 8 R_625G_REFCLK15625_RXAUI */
3040 { /* 100MHz reference */
3041 .valid = false },
3042 { /* 125MHz reference */
3043 .valid = true,
3044 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3045 .mode_1.s = { .pll_16p5en = 0x0,
3046 .pll_cpadj = 0x2,
3047 .pll_pcie3en = 0x0,
3048 .pll_opr = 0x0,
3049 .pll_div = 0x19 },
3050 .pmode_0.s = { .ctle = 0x0,
3051 .pcie = 0x0,
3052 .tx_ldiv = 0x0,
3053 .rx_ldiv = 0x0,
3054 .srate = 0x0,
3055 .tx_mode = 0x3,
3056 .rx_mode = 0x3 },
3057 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3058 .vma_mm = 0x0,
3059 .cdr_fgain = 0xa,
3060 .ph_acc_adj = 0x14 } },
3061 { /* 156.25MHz reference */
3062 .valid = true,
3063 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3064 .mode_1.s = { .pll_16p5en = 0x0,
3065 .pll_cpadj = 0x2,
3066 .pll_pcie3en = 0x0,
3067 .pll_opr = 0x0,
3068 .pll_div = 0x14 },
3069 .pmode_0.s = { .ctle = 0x0,
3070 .pcie = 0x0,
3071 .tx_ldiv = 0x0,
3072 .rx_ldiv = 0x0,
3073 .srate = 0x0,
3074 .tx_mode = 0x3,
3075 .rx_mode = 0x3 },
3076 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3077 .vma_mm = 0x0,
3078 .cdr_fgain = 0xa,
3079 .ph_acc_adj = 0x14 } },
3080 { /* 161.1328125 reference */
3081 .valid = true,
3082 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3083 .mode_1.s = { .pll_16p5en = 0x0,
3084 .pll_cpadj = 0x2,
3085 .pll_pcie3en = 0x0,
3086 .pll_opr = 0x0,
3087 .pll_div = 0x14 },
3088 .pmode_0.s = { .ctle = 0x0,
3089 .pcie = 0x0,
3090 .tx_ldiv = 0x0,
3091 .rx_ldiv = 0x0,
3092 .srate = 0x0,
3093 .tx_mode = 0x3,
3094 .rx_mode = 0x3 },
3095 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3096 .vma_mm = 0x0,
3097 .cdr_fgain = 0xa,
3098 .ph_acc_adj = 0x14 } } },
3099 { /* 9 R_2_5G_REFCLK125 */
3100 { /* 100MHz reference */
3101 .valid = true,
3102 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3103 .mode_1.s = { .pll_16p5en = 0x0,
3104 .pll_cpadj = 0x2,
3105 .pll_pcie3en = 0x0,
3106 .pll_opr = 0x0,
3107 .pll_div = 0x19 },
3108 .pmode_0.s = { .ctle = 0x0,
3109 .pcie = 0x1,
3110 .tx_ldiv = 0x1,
3111 .rx_ldiv = 0x1,
3112 .srate = 0x0,
3113 .tx_mode = 0x3,
3114 .rx_mode = 0x3 },
3115 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3116 .vma_mm = 0x1,
3117 .cdr_fgain = 0xa,
3118 .ph_acc_adj = 0x14 } },
3119 { /* 125MHz reference */
3120 .valid = true,
3121 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3122 .mode_1.s = { .pll_16p5en = 0x0,
3123 .pll_cpadj = 0x1,
3124 .pll_pcie3en = 0x0,
3125 .pll_opr = 0x0,
3126 .pll_div = 0x14 },
3127 .pmode_0.s = { .ctle = 0x0,
3128 .pcie = 0x1,
3129 .tx_ldiv = 0x1,
3130 .rx_ldiv = 0x1,
3131 .srate = 0x0,
3132 .tx_mode = 0x3,
3133 .rx_mode = 0x3 },
3134 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3135 .vma_mm = 0x1,
3136 .cdr_fgain = 0xa,
3137 .ph_acc_adj = 0x14 } },
3138 { /* 156,25MHz reference */
3139 .valid = true,
3140 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3141 .mode_1.s = { .pll_16p5en = 0x0,
3142 .pll_cpadj = 0x2,
3143 .pll_pcie3en = 0x0,
3144 .pll_opr = 0x0,
3145 .pll_div = 0x10 },
3146 .pmode_0.s = { .ctle = 0x0,
3147 .pcie = 0x1,
3148 .tx_ldiv = 0x1,
3149 .rx_ldiv = 0x1,
3150 .srate = 0x0,
3151 .tx_mode = 0x3,
3152 .rx_mode = 0x3 },
3153 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3154 .vma_mm = 0x1,
3155 .cdr_fgain = 0xa,
3156 .ph_acc_adj = 0x14 } },
3157 {
3158 /* 161.1328125MHz reference clock */
3159 .valid = false,
3160 } },
3161 { /* 0xa R_5G_REFCLK125 */
3162 { /* 100MHz reference */
3163 .valid = true,
3164 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3165 .mode_1.s = { .pll_16p5en = 0x0,
3166 .pll_cpadj = 0x2,
3167 .pll_pcie3en = 0x0,
3168 .pll_opr = 0x0,
3169 .pll_div = 0x19 },
3170 .pmode_0.s = { .ctle = 0x0,
3171 .pcie = 0x1,
3172 .tx_ldiv = 0x0,
3173 .rx_ldiv = 0x0,
3174 .srate = 0x0,
3175 .tx_mode = 0x3,
3176 .rx_mode = 0x3 },
3177 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3178 .vma_mm = 0x0,
3179 .cdr_fgain = 0xa,
3180 .ph_acc_adj = 0x14 } },
3181 { /* 125MHz reference */
3182 .valid = true,
3183 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3184 .mode_1.s = { .pll_16p5en = 0x0,
3185 .pll_cpadj = 0x1,
3186 .pll_pcie3en = 0x0,
3187 .pll_opr = 0x0,
3188 .pll_div = 0x14 },
3189 .pmode_0.s = { .ctle = 0x0,
3190 .pcie = 0x1,
3191 .tx_ldiv = 0x0,
3192 .rx_ldiv = 0x0,
3193 .srate = 0x0,
3194 .tx_mode = 0x3,
3195 .rx_mode = 0x3 },
3196 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3197 .vma_mm = 0x0,
3198 .cdr_fgain = 0xa,
3199 .ph_acc_adj = 0x14 } },
3200 { /* 156.25MHz reference */
3201 .valid = true,
3202 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3203 .mode_1.s = { .pll_16p5en = 0x0,
3204 .pll_cpadj = 0x2,
3205 .pll_pcie3en = 0x0,
3206 .pll_opr = 0x0,
3207 .pll_div = 0x10 },
3208 .pmode_0.s = { .ctle = 0x0,
3209 .pcie = 0x1,
3210 .tx_ldiv = 0x0,
3211 .rx_ldiv = 0x0,
3212 .srate = 0x0,
3213 .tx_mode = 0x3,
3214 .rx_mode = 0x3 },
3215 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3216 .vma_mm = 0x0,
3217 .cdr_fgain = 0xa,
3218 .ph_acc_adj = 0x14 } },
3219 {
3220 /* 161.1328125MHz reference clock */
3221 .valid = false,
3222 } },
3223 { /* 0xb R_8G_REFCLK125 */
3224 { /* 100MHz reference */
3225 .valid = true,
3226 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
3227 .mode_1.s = { .pll_16p5en = 0x0,
3228 .pll_cpadj = 0x2,
3229 .pll_pcie3en = 0x1,
3230 .pll_opr = 0x1,
3231 .pll_div = 0x28 },
3232 .pmode_0.s = { .ctle = 0x3,
3233 .pcie = 0x0,
3234 .tx_ldiv = 0x0,
3235 .rx_ldiv = 0x0,
3236 .srate = 0x0,
3237 .tx_mode = 0x3,
3238 .rx_mode = 0x3 },
3239 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3240 .vma_mm = 0x0,
3241 .cdr_fgain = 0xb,
3242 .ph_acc_adj = 0x23 } },
3243 { /* 125MHz reference */
3244 .valid = true,
3245 .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
3246 .mode_1.s = { .pll_16p5en = 0x0,
3247 .pll_cpadj = 0x1,
3248 .pll_pcie3en = 0x1,
3249 .pll_opr = 0x1,
3250 .pll_div = 0x20 },
3251 .pmode_0.s = { .ctle = 0x3,
3252 .pcie = 0x0,
3253 .tx_ldiv = 0x0,
3254 .rx_ldiv = 0x0,
3255 .srate = 0x0,
3256 .tx_mode = 0x3,
3257 .rx_mode = 0x3 },
3258 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3259 .vma_mm = 0x0,
3260 .cdr_fgain = 0xb,
3261 .ph_acc_adj = 0x23 } },
3262 { /* 156.25MHz reference */
3263 .valid = false },
3264 {
3265 /* 161.1328125MHz reference clock */
3266 .valid = false,
3267 } }
3268};
3269
3270/**
3271 * Set a non-standard reference clock for a node, qlm and lane mode.
3272 *
3273 * @INTERNAL
3274 *
3275 * @param node node number the reference clock is used with
3276 * @param qlm qlm number the reference clock is hooked up to
3277 * @param lane_mode current lane mode selected for the QLM
3278 * @param ref_clk_sel 0 = 100MHz, 1 = 125MHz, 2 = 156.25MHz,
3279 * 3 = 161.1328125MHz
3280 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01003281 * Return: 0 for success or -1 if the reference clock selector is not supported
Aaron Williams5aeac5c2020-12-11 17:06:06 +01003282 *
3283 * NOTE: This must be called before __qlm_setup_pll_cn78xx.
3284 */
3285static int __set_qlm_ref_clk_cn78xx(int node, int qlm, int lane_mode, int ref_clk_sel)
3286{
3287 if (ref_clk_sel > 3 || ref_clk_sel < 0 ||
3288 !refclk_settings_cn78xx[lane_mode][ref_clk_sel].valid) {
3289 debug("%s: Invalid reference clock %d for lane mode %d for node %d, QLM %d\n",
3290 __func__, ref_clk_sel, lane_mode, node, qlm);
3291 return -1;
3292 }
3293 debug("%s(%d, %d, 0x%x, %d)\n", __func__, node, qlm, lane_mode, ref_clk_sel);
3294 ref_clk_cn78xx[node][qlm][lane_mode] = ref_clk_sel;
3295 return 0;
3296}
3297
3298/**
3299 * KR - Inverted Tx Coefficient Direction Change. Changing Pre & Post Tap inc/dec direction
3300 *
3301 *
3302 * @INTERNAL
3303 *
3304 * @param node Node number to configure
3305 * @param qlm QLM number to configure
3306 */
3307static void __qlm_kr_inc_dec_gser26636(int node, int qlm)
3308{
3309 cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl;
3310
3311 /* Apply workaround for Errata GSER-26636,
3312 * KR training coefficient update inverted
3313 */
3314 rx_txdir_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
3315 rx_txdir_ctrl.s.rx_precorr_chg_dir = 1;
3316 rx_txdir_ctrl.s.rx_tap1_chg_dir = 1;
3317 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl.u64);
3318}
3319
3320/**
3321 * Updating the RX EQ settings to support wider temperature range
3322 * @INTERNAL
3323 *
3324 * @param node Node number to configure
3325 * @param qlm QLM number to configure
3326 */
3327static void __qlm_rx_eq_temp_gser27140(int node, int qlm)
3328{
3329 int lane;
3330 int num_lanes = cvmx_qlm_get_lanes(qlm);
3331 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t rx_valbbd_ctrl_0;
3332 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
3333 cvmx_gserx_lane_vma_fine_ctrl_0_t lane_vma_fine_ctrl_0;
3334 cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl_1;
3335 cvmx_gserx_eq_wait_time_t eq_wait_time;
3336 cvmx_gserx_rx_txdir_ctrl_2_t rx_txdir_ctrl_2;
3337 cvmx_gserx_rx_txdir_ctrl_0_t rx_txdir_ctrl_0;
3338
3339 for (lane = 0; lane < num_lanes; lane++) {
3340 rx_valbbd_ctrl_0.u64 =
3341 csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm));
3342 rx_valbbd_ctrl_0.s.agc_gain = 3;
3343 rx_valbbd_ctrl_0.s.dfe_gain = 2;
3344 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm),
3345 rx_valbbd_ctrl_0.u64);
3346 }
3347
3348 /* do_pre_ctle_limits_work_around: */
3349 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
3350 //lane_vma_fine_ctrl_2.s.rx_prectle_peak_max_fine = 11;
3351 lane_vma_fine_ctrl_2.s.rx_prectle_gain_max_fine = 11;
3352 //lane_vma_fine_ctrl_2.s.rx_prectle_peak_min_fine = 6;
3353 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 6;
3354 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
3355
3356 /* do_inc_dec_thres_work_around: */
3357 rx_txdir_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm));
3358 rx_txdir_ctrl_0.s.rx_boost_hi_thrs = 11;
3359 rx_txdir_ctrl_0.s.rx_boost_lo_thrs = 4;
3360 rx_txdir_ctrl_0.s.rx_boost_hi_val = 15;
3361 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm), rx_txdir_ctrl_0.u64);
3362
3363 /* do_sdll_iq_work_around: */
3364 lane_vma_fine_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm));
3365 lane_vma_fine_ctrl_0.s.rx_sdll_iq_max_fine = 14;
3366 lane_vma_fine_ctrl_0.s.rx_sdll_iq_min_fine = 8;
3367 lane_vma_fine_ctrl_0.s.rx_sdll_iq_step_fine = 2;
3368
3369 /* do_vma_window_work_around_2: */
3370 lane_vma_fine_ctrl_0.s.vma_window_wait_fine = 5;
3371 lane_vma_fine_ctrl_0.s.lms_wait_time_fine = 5;
3372
3373 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm), lane_vma_fine_ctrl_0.u64);
3374
3375 /* Set dfe_tap_1_lo_thres_val: */
3376 rx_txdir_ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
3377 rx_txdir_ctrl_1.s.rx_tap1_lo_thrs = 8;
3378 rx_txdir_ctrl_1.s.rx_tap1_hi_thrs = 0x17;
3379 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl_1.u64);
3380
3381 /* do_rxeq_wait_cnt_work_around: */
3382 eq_wait_time.u64 = csr_rd_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm));
3383 eq_wait_time.s.rxeq_wait_cnt = 6;
3384 csr_wr_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm), eq_wait_time.u64);
3385
3386 /* do_write_rx_txdir_precorr_thresholds: */
3387 rx_txdir_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm));
3388 rx_txdir_ctrl_2.s.rx_precorr_hi_thrs = 0xc0;
3389 rx_txdir_ctrl_2.s.rx_precorr_lo_thrs = 0x40;
3390 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm), rx_txdir_ctrl_2.u64);
3391}
3392
3393/* Errata GSER-26150: 10G PHY PLL Temperature Failure
3394 * This workaround must be completed after the final deassertion of
3395 * GSERx_PHY_CTL[PHY_RESET]
3396 */
3397static int __qlm_errata_gser_26150(int node, int qlm, int is_pcie)
3398{
3399 int num_lanes = 4;
3400 int i;
3401 cvmx_gserx_glbl_pll_cfg_3_t pll_cfg_3;
3402 cvmx_gserx_glbl_misc_config_1_t misc_config_1;
3403
3404 /* PCIe only requires the LC-VCO parameters to be updated */
3405 if (is_pcie) {
3406 /* Update PLL parameters */
3407 /* Step 1: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
3408 * GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0
3409 */
3410 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3411 pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
3412 pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
3413 csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
3414
3415 /* Step 2: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
3416 misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
3417 misc_config_1.s.pcs_sds_trim_chp_reg = 2;
3418 csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
3419 return 0;
3420 }
3421
3422 /* Applying this errata twice causes problems */
3423 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3424 if (pll_cfg_3.s.pll_vctrl_sel_lcvco_val == 0x2)
3425 return 0;
3426
3427 /* (GSER-26150) 10 Gb temperature excursions can cause lock failure */
3428 /* Change the calibration point of the VCO at start up to shift some
3429 * available range of the VCO from -deltaT direction to the +deltaT
3430 * ramp direction allowing a greater range of VCO temperatures before
3431 * experiencing the failure.
3432 */
3433
3434 /* Check for DLMs on CN73XX and CNF75XX */
3435 if (OCTEON_IS_MODEL(OCTEON_CN73XX) && (qlm == 5 || qlm == 6))
3436 num_lanes = 2;
3437
3438 /* Put PHY in P2 Power-down state Need to Power down all lanes in a
3439 * QLM/DLM to force PHY to P2 state
3440 */
3441 for (i = 0; i < num_lanes; i++) {
3442 cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
3443 cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
3444 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3445
3446 /* Step 1: Set Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL]
3447 * = 0x3
3448 * Select P2 power state for Tx lane
3449 */
3450 ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
3451 ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x3;
3452 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
3453 /* Step 2: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL]
3454 * = 0x3
3455 * Select P2 power state for Rx lane
3456 */
3457 ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
3458 ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x3;
3459 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
3460 /* Step 3: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
3461 * Enable Tx power state override and Set
3462 * GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
3463 * Enable Rx power state override
3464 */
3465 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3466 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
3467 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
3468 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3469 /* Step 4: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
3470 * Start the CTLIFC override state machine
3471 */
3472 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3473 ctlifc2.s.ctlifc_ovrrd_req = 0x1;
3474 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3475 }
3476
3477 /* Update PLL parameters */
3478 /* Step 5: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
3479 * GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0
3480 */
3481 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3482 pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
3483 pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
3484 csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
3485
3486 /* Step 6: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
3487 misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
3488 misc_config_1.s.pcs_sds_trim_chp_reg = 2;
3489 csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
3490
3491 /* Wake up PHY and transition to P0 Power-up state to bring-up the lanes,
3492 * need to wake up all PHY lanes
3493 */
3494 for (i = 0; i < num_lanes; i++) {
3495 cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
3496 cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
3497 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3498 /* Step 7: Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL] = 0x0
3499 * Select P0 power state for Tx lane
3500 */
3501 ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
3502 ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x0;
3503 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
3504 /* Step 8: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL] = 0x0
3505 * Select P0 power state for Rx lane
3506 */
3507 ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
3508 ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x0;
3509 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
3510 /* Step 9: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
3511 * Enable Tx power state override and Set
3512 * GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
3513 * Enable Rx power state override
3514 */
3515 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3516 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
3517 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
3518 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3519 /* Step 10: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
3520 * Start the CTLIFC override state machine
3521 */
3522 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3523 ctlifc2.s.ctlifc_ovrrd_req = 0x1;
3524 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3525 }
3526
3527 /* Step 11: Wait 10 msec */
3528 mdelay(10);
3529
3530 /* Release Lane Tx/Rx Power state override enables. */
3531 for (i = 0; i < num_lanes; i++) {
3532 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3533
3534 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3535 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x0;
3536 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x0;
3537 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3538 }
3539
3540 /* Step 12: Poll GSER()_PLL_STAT.[PLL_LOCK] = 1
3541 * Poll and check that PLL is locked
3542 */
3543 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
3544 pll_lock, ==, 1, 10000)) {
3545 printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", node, qlm);
3546 return -1;
3547 }
3548
3549 /* Step 13: Poll GSER()_QLM_STAT.[RST_RDY] = 1
3550 * Poll and check that QLM/DLM is Ready
3551 */
3552 if (is_pcie == 0 &&
3553 CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
3554 rst_rdy, ==, 1, 10000)) {
3555 printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", node, qlm);
3556 return -1;
3557 }
3558
3559 return 0;
3560}
3561
3562/**
3563 * Configure all of the PLLs for a particular node and qlm
3564 * @INTERNAL
3565 *
3566 * @param node Node number to configure
3567 * @param qlm QLM number to configure
3568 */
3569static void __qlm_setup_pll_cn78xx(int node, int qlm)
3570{
3571 cvmx_gserx_pll_px_mode_0_t mode_0;
3572 cvmx_gserx_pll_px_mode_1_t mode_1;
3573 cvmx_gserx_lane_px_mode_0_t pmode_0;
3574 cvmx_gserx_lane_px_mode_1_t pmode_1;
3575 int lane_mode;
3576 int ref_clk;
3577 const struct refclk_settings_cn78xx *clk_settings;
3578
3579 for (lane_mode = 0; lane_mode < R_NUM_LANE_MODES; lane_mode++) {
3580 mode_0.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm));
3581 mode_1.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm));
3582 pmode_0.u64 = 0;
3583 pmode_1.u64 = 0;
3584 ref_clk = ref_clk_cn78xx[node][qlm][lane_mode];
3585 clk_settings = &refclk_settings_cn78xx[lane_mode][ref_clk];
3586 debug("%s(%d, %d): lane_mode: 0x%x, ref_clk: %d\n", __func__, node, qlm, lane_mode,
3587 ref_clk);
3588
3589 if (!clk_settings->valid) {
3590 printf("%s: Error: reference clock %d is not supported for lane mode %d on qlm %d\n",
3591 __func__, ref_clk, lane_mode, qlm);
3592 continue;
3593 }
3594
3595 mode_0.s.pll_icp = clk_settings->mode_0.s.pll_icp;
3596 mode_0.s.pll_rloop = clk_settings->mode_0.s.pll_rloop;
3597 mode_0.s.pll_pcs_div = clk_settings->mode_0.s.pll_pcs_div;
3598
3599 mode_1.s.pll_16p5en = clk_settings->mode_1.s.pll_16p5en;
3600 mode_1.s.pll_cpadj = clk_settings->mode_1.s.pll_cpadj;
3601 mode_1.s.pll_pcie3en = clk_settings->mode_1.s.pll_pcie3en;
3602 mode_1.s.pll_opr = clk_settings->mode_1.s.pll_opr;
3603 mode_1.s.pll_div = clk_settings->mode_1.s.pll_div;
3604
3605 pmode_0.u64 = clk_settings->pmode_0.u64;
3606
3607 pmode_1.u64 = clk_settings->pmode_1.u64;
3608
3609 csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm), mode_1.u64);
3610 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_0(lane_mode, qlm), pmode_0.u64);
3611 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lane_mode, qlm), pmode_1.u64);
3612 csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm), mode_0.u64);
3613 }
3614}
3615
3616/**
3617 * Get the lane mode for the specified node and QLM.
3618 *
3619 * @param ref_clk_sel The reference-clock selection to use to configure QLM
3620 * 0 = REF_100MHZ
3621 * 1 = REF_125MHZ
3622 * 2 = REF_156MHZ
3623 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
3624 * @param[out] alt_pll_settings If non-NULL this will be set if non-default PLL
3625 * settings are required for the mode.
3626 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01003627 * Return: lane mode to use or -1 on error
Aaron Williams5aeac5c2020-12-11 17:06:06 +01003628 *
3629 * NOTE: In some modes
3630 */
3631static int __get_lane_mode_for_speed_and_ref_clk(int ref_clk_sel, int baud_mhz,
3632 bool *alt_pll_settings)
3633{
3634 if (alt_pll_settings)
3635 *alt_pll_settings = false;
3636 switch (baud_mhz) {
3637 case 98304:
3638 case 49152:
3639 case 24576:
3640 case 12288:
3641 if (ref_clk_sel != 3) {
3642 printf("Error: Invalid ref clock\n");
3643 return -1;
3644 }
3645 return 0x5;
3646 case 6144:
3647 case 3072:
3648 if (ref_clk_sel != 3) {
3649 printf("Error: Invalid ref clock\n");
3650 return -1;
3651 }
3652 return 0x8;
3653 case 1250:
3654 if (alt_pll_settings)
3655 *alt_pll_settings = (ref_clk_sel != 2);
3656 return R_125G_REFCLK15625_SGMII;
3657 case 2500:
3658 if (ref_clk_sel == 0)
3659 return R_2_5G_REFCLK100;
3660
3661 if (alt_pll_settings)
3662 *alt_pll_settings = (ref_clk_sel != 1);
3663 return R_2_5G_REFCLK125;
3664 case 3125:
3665 if (ref_clk_sel == 2) {
3666 return R_3125G_REFCLK15625_XAUI;
3667 } else if (ref_clk_sel == 1) {
3668 if (alt_pll_settings)
3669 *alt_pll_settings = true;
3670 return R_3125G_REFCLK15625_XAUI;
3671 }
3672
3673 printf("Error: Invalid speed\n");
3674 return -1;
3675 case 5000:
3676 if (ref_clk_sel == 0) {
3677 return R_5G_REFCLK100;
3678 } else if (ref_clk_sel == 1) {
3679 if (alt_pll_settings)
3680 *alt_pll_settings = (ref_clk_sel != 1);
3681 return R_5G_REFCLK125;
3682 } else {
3683 return R_5G_REFCLK15625_QSGMII;
3684 }
3685 case 6250:
3686 if (ref_clk_sel != 0) {
3687 if (alt_pll_settings)
3688 *alt_pll_settings = (ref_clk_sel != 2);
3689 return R_625G_REFCLK15625_RXAUI;
3690 }
3691
3692 printf("Error: Invalid speed\n");
3693 return -1;
3694 case 6316:
3695 if (ref_clk_sel != 3) {
3696 printf("Error: Invalid speed\n");
3697 } else {
3698 *alt_pll_settings = true;
3699 return R_625G_REFCLK15625_RXAUI;
3700 }
3701 case 8000:
3702 if (ref_clk_sel == 0)
3703 return R_8G_REFCLK100;
3704 else if (ref_clk_sel == 1)
3705 return R_8G_REFCLK125;
3706
3707 printf("Error: Invalid speed\n");
3708 return -1;
3709 case 103125:
3710 if (ref_clk_sel == 3 && alt_pll_settings)
3711 *alt_pll_settings = true;
3712
3713 if (ref_clk_sel == 2 || ref_clk_sel == 3)
3714 return R_103125G_REFCLK15625_KR;
3715
3716 default:
3717 printf("Error: Invalid speed\n");
3718 return -1;
3719 }
3720
3721 return -1;
3722}
3723
3724/*
3725 * Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
3726 * during speed change. Change SLI_WINDOW_CTL[time] to 525us
3727 */
3728static void __set_sli_window_ctl_errata_31375(int node)
3729{
3730 if (OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX) ||
3731 OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
3732 cvmx_sli_window_ctl_t window_ctl;
3733
3734 window_ctl.u64 = csr_rd_node(node, CVMX_PEXP_SLI_WINDOW_CTL);
3735 /* Configure SLI_WINDOW_CTL only once */
3736 if (window_ctl.s.time != 8191)
3737 return;
3738
3739 window_ctl.s.time = gd->bus_clk * 525ull / 1000000;
3740 csr_wr_node(node, CVMX_PEXP_SLI_WINDOW_CTL, window_ctl.u64);
3741 }
3742}
3743
3744static void __cvmx_qlm_pcie_errata_ep_cn78xx(int node, int pem)
3745{
3746 cvmx_pciercx_cfg031_t cfg031;
3747 cvmx_pciercx_cfg032_t cfg032;
3748 cvmx_pciercx_cfg040_t cfg040;
3749 cvmx_pemx_cfg_t pemx_cfg;
3750 cvmx_pemx_on_t pemx_on;
3751 int low_qlm, high_qlm;
3752 int qlm, lane;
3753 u64 start_cycle;
3754
3755 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
3756
3757 /* Errata (GSER-21178) PCIe gen3 doesn't work, continued */
3758
3759 /* Wait for the link to come up as Gen1 */
3760 printf("PCIe%d: Waiting for EP out of reset\n", pem);
3761 while (pemx_on.s.pemoor == 0) {
3762 udelay(1000);
3763 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
3764 }
3765
3766 /* Enable gen3 speed selection */
3767 printf("PCIe%d: Enabling Gen3 for EP\n", pem);
3768 /* Force Gen1 for initial link bringup. We'll fix it later */
3769 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3770 pemx_cfg.s.md = 2;
3771 csr_wr_node(node, CVMX_PEMX_CFG(pem), pemx_cfg.u64);
3772 cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG031(pem));
3773 cfg031.s.mls = 2;
3774 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem), cfg031.u32);
3775 cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG040(pem));
3776 cfg040.s.tls = 3;
3777 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem), cfg040.u32);
3778
3779 /* Wait up to 10ms for the link speed change to complete */
3780 start_cycle = get_timer(0);
3781 do {
3782 if (get_timer(start_cycle) > 10)
3783 return;
3784
3785 mdelay(1);
3786 cfg032.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG032(pem));
3787 } while (cfg032.s.ls != 3);
3788
3789 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3790 low_qlm = pem; /* FIXME */
3791 high_qlm = (pemx_cfg.cn78xx.lanes8) ? low_qlm + 1 : low_qlm;
3792
3793 /* Toggle cfg_rx_dll_locken_ovrrd_en and rx_resetn_ovrrd_en across
3794 * all QM lanes in use
3795 */
3796 for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
3797 for (lane = 0; lane < 4; lane++) {
3798 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
3799 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3800
3801 misc_ovrrd.u64 =
3802 csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
3803 misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 1;
3804 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
3805 misc_ovrrd.u64);
3806 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
3807 pwr_ctrl.s.rx_resetn_ovrrd_en = 1;
3808 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
3809 }
3810 }
3811 for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
3812 for (lane = 0; lane < 4; lane++) {
3813 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
3814 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3815
3816 misc_ovrrd.u64 =
3817 csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
3818 misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 0;
3819 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
3820 misc_ovrrd.u64);
3821 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
3822 pwr_ctrl.s.rx_resetn_ovrrd_en = 0;
3823 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
3824 }
3825 }
3826
3827 //printf("PCIe%d: Waiting for EP link up at Gen3\n", pem);
3828 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_PEMX_ON(pem), cvmx_pemx_on_t, pemoor, ==, 1,
3829 1000000)) {
3830 printf("PCIe%d: Timeout waiting for EP link up at Gen3\n", pem);
3831 return;
3832 }
3833}
3834
3835static void __cvmx_qlm_pcie_errata_cn78xx(int node, int qlm)
3836{
3837 int pem, i, q;
3838 int is_8lanes;
3839 int is_high_lanes;
3840 int low_qlm, high_qlm, is_host;
3841 int need_ep_monitor;
3842 cvmx_pemx_cfg_t pem_cfg, pem3_cfg;
3843 cvmx_gserx_slice_cfg_t slice_cfg;
3844 cvmx_gserx_rx_pwr_ctrl_p1_t pwr_ctrl_p1;
3845 cvmx_rst_soft_prstx_t soft_prst;
3846
3847 /* Only applies to CN78XX pass 1.x */
3848 if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
3849 return;
3850
3851 /* Determine the PEM for this QLM, whether we're in 8 lane mode,
3852 * and whether these are the top lanes of the 8
3853 */
3854 switch (qlm) {
3855 case 0: /* First 4 lanes of PEM0 */
3856 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
3857 pem = 0;
3858 is_8lanes = pem_cfg.cn78xx.lanes8;
3859 is_high_lanes = 0;
3860 break;
3861 case 1: /* Either last 4 lanes of PEM0, or PEM1 */
3862 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
3863 pem = (pem_cfg.cn78xx.lanes8) ? 0 : 1;
3864 is_8lanes = pem_cfg.cn78xx.lanes8;
3865 is_high_lanes = is_8lanes;
3866 break;
3867 case 2: /* First 4 lanes of PEM2 */
3868 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
3869 pem = 2;
3870 is_8lanes = pem_cfg.cn78xx.lanes8;
3871 is_high_lanes = 0;
3872 break;
3873 case 3: /* Either last 4 lanes of PEM2, or PEM3 */
3874 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
3875 pem3_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
3876 pem = (pem_cfg.cn78xx.lanes8) ? 2 : 3;
3877 is_8lanes = (pem == 2) ? pem_cfg.cn78xx.lanes8 : pem3_cfg.cn78xx.lanes8;
3878 is_high_lanes = (pem == 2) && is_8lanes;
3879 break;
3880 case 4: /* Last 4 lanes of PEM3 */
3881 pem = 3;
3882 is_8lanes = 1;
3883 is_high_lanes = 1;
3884 break;
3885 default:
3886 return;
3887 }
3888
3889 /* These workaround must be applied once per PEM. Since we're called per
3890 * QLM, wait for the 2nd half of 8 lane setups before doing the workaround
3891 */
3892 if (is_8lanes && !is_high_lanes)
3893 return;
3894
3895 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3896 is_host = pem_cfg.cn78xx.hostmd;
3897 low_qlm = (is_8lanes) ? qlm - 1 : qlm;
3898 high_qlm = qlm;
3899 qlm = -1;
3900
3901 if (!is_host) {
3902 /* Read the current slice config value. If its at the value we will
3903 * program then skip doing the workaround. We're probably doing a
3904 * hot reset and the workaround is already applied
3905 */
3906 slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(low_qlm));
3907 if (slice_cfg.s.tx_rx_detect_lvl_enc == 7 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
3908 return;
3909 }
3910
3911 if (is_host && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3912 /* (GSER-XXXX) GSER PHY needs to be reset at initialization */
3913 cvmx_gserx_phy_ctl_t phy_ctl;
3914
3915 for (q = low_qlm; q <= high_qlm; q++) {
3916 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
3917 phy_ctl.s.phy_reset = 1;
3918 csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
3919 }
3920 udelay(5);
3921
3922 for (q = low_qlm; q <= high_qlm; q++) {
3923 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
3924 phy_ctl.s.phy_reset = 0;
3925 csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
3926 }
3927 udelay(5);
3928 }
3929
3930 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3931 /* (GSER-20936) GSER has wrong PCIe RX detect reset value */
3932 for (q = low_qlm; q <= high_qlm; q++) {
3933 slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(q));
3934 slice_cfg.s.tx_rx_detect_lvl_enc = 7;
3935 csr_wr_node(node, CVMX_GSERX_SLICE_CFG(q), slice_cfg.u64);
3936 }
3937
3938 /* Clear the bit in GSERX_RX_PWR_CTRL_P1[p1_rx_subblk_pd]
3939 * that coresponds to "Lane DLL"
3940 */
3941 for (q = low_qlm; q <= high_qlm; q++) {
3942 pwr_ctrl_p1.u64 = csr_rd_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q));
3943 pwr_ctrl_p1.s.p1_rx_subblk_pd &= ~4;
3944 csr_wr_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q), pwr_ctrl_p1.u64);
3945 }
3946
3947 /* Errata (GSER-20888) GSER incorrect synchronizers hurts PCIe
3948 * Override TX Power State machine TX reset control signal
3949 */
3950 for (q = low_qlm; q <= high_qlm; q++) {
3951 for (i = 0; i < 4; i++) {
3952 cvmx_gserx_lanex_tx_cfg_0_t tx_cfg;
3953 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3954
3955 tx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q));
3956 tx_cfg.s.tx_resetn_ovrrd_val = 1;
3957 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q), tx_cfg.u64);
3958 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q));
3959 pwr_ctrl.s.tx_p2s_resetn_ovrrd_en = 1;
3960 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q), pwr_ctrl.u64);
3961 }
3962 }
3963 }
3964
3965 if (!is_host) {
3966 cvmx_pciercx_cfg089_t cfg089;
3967 cvmx_pciercx_cfg090_t cfg090;
3968 cvmx_pciercx_cfg091_t cfg091;
3969 cvmx_pciercx_cfg092_t cfg092;
3970 cvmx_pciercx_cfg548_t cfg548;
3971 cvmx_pciercx_cfg554_t cfg554;
3972
3973 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3974 /* Errata (GSER-21178) PCIe gen3 doesn't work */
3975 /* The starting equalization hints are incorrect on CN78XX pass 1.x. Fix
3976 * them for the 8 possible lanes. It doesn't hurt to program them even
3977 * for lanes not in use
3978 */
3979 cfg089.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG089(pem));
3980 cfg089.s.l1urph = 2;
3981 cfg089.s.l1utp = 7;
3982 cfg089.s.l0urph = 2;
3983 cfg089.s.l0utp = 7;
3984 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG089(pem), cfg089.u32);
3985 cfg090.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG090(pem));
3986 cfg090.s.l3urph = 2;
3987 cfg090.s.l3utp = 7;
3988 cfg090.s.l2urph = 2;
3989 cfg090.s.l2utp = 7;
3990 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG090(pem), cfg090.u32);
3991 cfg091.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG091(pem));
3992 cfg091.s.l5urph = 2;
3993 cfg091.s.l5utp = 7;
3994 cfg091.s.l4urph = 2;
3995 cfg091.s.l4utp = 7;
3996 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG091(pem), cfg091.u32);
3997 cfg092.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG092(pem));
3998 cfg092.s.l7urph = 2;
3999 cfg092.s.l7utp = 7;
4000 cfg092.s.l6urph = 2;
4001 cfg092.s.l6utp = 7;
4002 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG092(pem), cfg092.u32);
4003 /* FIXME: Disable phase 2 and phase 3 equalization */
4004 cfg548.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG548(pem));
4005 cfg548.s.ep2p3d = 1;
4006 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG548(pem), cfg548.u32);
4007 }
4008 /* Errata (GSER-21331) GEN3 Equalization may fail */
4009 /* Disable preset #10 and disable the 2ms timeout */
4010 cfg554.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG554(pem));
4011 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
4012 cfg554.s.p23td = 1;
4013 cfg554.s.prv = 0x3ff;
4014 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG554(pem), cfg554.u32);
4015
4016 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
4017 need_ep_monitor = (pem_cfg.s.md == 2);
4018 if (need_ep_monitor) {
4019 cvmx_pciercx_cfg031_t cfg031;
4020 cvmx_pciercx_cfg040_t cfg040;
4021
4022 /* Force Gen1 for initial link bringup. We'll
4023 * fix it later
4024 */
4025 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
4026 pem_cfg.s.md = 0;
4027 csr_wr_node(node, CVMX_PEMX_CFG(pem), pem_cfg.u64);
4028 cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem,
4029 CVMX_PCIERCX_CFG031(pem));
4030 cfg031.s.mls = 0;
4031 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem),
4032 cfg031.u32);
4033 cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem,
4034 CVMX_PCIERCX_CFG040(pem));
4035 cfg040.s.tls = 1;
4036 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem),
4037 cfg040.u32);
4038 __cvmx_qlm_pcie_errata_ep_cn78xx(node, pem);
4039 }
4040 return;
4041 }
4042 }
4043
4044 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
4045 /* De-assert the SOFT_RST bit for this QLM (PEM), causing the PCIe
4046 * workarounds code above to take effect.
4047 */
4048 soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
4049 soft_prst.s.soft_prst = 0;
4050 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
4051 udelay(1);
4052
4053 /* Assert the SOFT_RST bit for this QLM (PEM), putting the PCIe back into
4054 * reset state with disturbing the workarounds.
4055 */
4056 soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
4057 soft_prst.s.soft_prst = 1;
4058 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
4059 }
4060 udelay(1);
4061}
4062
4063/**
4064 * Setup the PEM to either driver or receive reset from PRST based on RC or EP
4065 *
4066 * @param node Node to use in a Numa setup
4067 * @param pem Which PEM to setuo
4068 * @param is_endpoint
4069 * Non zero if PEM is a EP
4070 */
4071static void __setup_pem_reset(int node, int pem, int is_endpoint)
4072{
4073 cvmx_rst_ctlx_t rst_ctl;
4074
4075 /* Make sure is_endpoint is either 0 or 1 */
4076 is_endpoint = (is_endpoint != 0);
4077 rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
4078 rst_ctl.s.prst_link = 0; /* Link down causes soft reset */
4079 rst_ctl.s.rst_link = is_endpoint; /* EP PERST causes a soft reset */
4080 rst_ctl.s.rst_drv = !is_endpoint; /* Drive if RC */
4081 rst_ctl.s.rst_rcv = is_endpoint; /* Only read PERST in EP mode */
4082 rst_ctl.s.rst_chip = 0; /* PERST doesn't pull CHIP_RESET */
4083 csr_wr_node(node, CVMX_RST_CTLX(pem), rst_ctl.u64);
4084}
4085
4086/**
4087 * Configure QLM speed and mode for cn78xx.
4088 *
4089 * @param node Node to configure the QLM
4090 * @param qlm The QLM to configure
4091 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
4092 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
4093 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
4094 * @param gen3 Only used for PCIe
4095 * gen3 = 2 GEN3 mode
4096 * gen3 = 1 GEN2 mode
4097 * gen3 = 0 GEN1 mode
4098 *
4099 * @param ref_clk_sel The reference-clock selection to use to configure QLM
4100 * 0 = REF_100MHZ
4101 * 1 = REF_125MHZ
4102 * 2 = REF_156MHZ
4103 * 3 = REF_161MHZ
4104 * @param ref_clk_input The reference-clock input to use to configure QLM
4105 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01004106 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +01004107 */
4108int octeon_configure_qlm_cn78xx(int node, int qlm, int baud_mhz, int mode, int rc, int gen3,
4109 int ref_clk_sel, int ref_clk_input)
4110{
4111 cvmx_gserx_phy_ctl_t phy_ctl;
4112 cvmx_gserx_lane_mode_t lmode;
4113 cvmx_gserx_cfg_t cfg;
4114 cvmx_gserx_refclk_sel_t refclk_sel;
4115
4116 int is_pcie = 0;
4117 int is_ilk = 0;
4118 int is_bgx = 0;
4119 int lane_mode = 0;
4120 int lmac_type = 0;
4121 bool alt_pll = false;
4122 int num_ports = 0;
4123 int lane_to_sds = 0;
4124
4125 debug("%s(node: %d, qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
4126 __func__, node, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
4127 if (OCTEON_IS_MODEL(OCTEON_CN76XX) && qlm > 4) {
4128 debug("%s: qlm %d not present on CN76XX\n", __func__, qlm);
4129 return -1;
4130 }
4131
4132 /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
4133 * during speed change. Change SLI_WINDOW_CTL[time] to 525us
4134 */
4135 __set_sli_window_ctl_errata_31375(node);
4136
4137 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
4138 /* If PEM is in EP, no need to do anything */
4139
4140 if (cfg.s.pcie && rc == 0) {
4141 debug("%s: node %d, qlm %d is in PCIe endpoint mode, returning\n",
4142 __func__, node, qlm);
4143 return 0;
4144 }
4145
4146 /* Set the reference clock to use */
4147 refclk_sel.u64 = 0;
4148 if (ref_clk_input == 0) { /* External ref clock */
4149 refclk_sel.s.com_clk_sel = 0;
4150 refclk_sel.s.use_com1 = 0;
4151 } else if (ref_clk_input == 1) {
4152 refclk_sel.s.com_clk_sel = 1;
4153 refclk_sel.s.use_com1 = 0;
4154 } else {
4155 refclk_sel.s.com_clk_sel = 1;
4156 refclk_sel.s.use_com1 = 1;
4157 }
4158
4159 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4160
4161 /* Reset the QLM after changing the reference clock */
4162 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4163 phy_ctl.s.phy_reset = 1;
4164 phy_ctl.s.phy_pd = 1;
4165 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4166
4167 udelay(1000);
4168
4169 /* Always restore the reference clocks for a QLM */
4170 memcpy(ref_clk_cn78xx[node][qlm], def_ref_clk_cn78xx, sizeof(def_ref_clk_cn78xx));
4171 switch (mode) {
4172 case CVMX_QLM_MODE_PCIE:
4173 case CVMX_QLM_MODE_PCIE_1X8: {
4174 cvmx_pemx_cfg_t pemx_cfg;
4175 cvmx_pemx_on_t pemx_on;
4176
4177 is_pcie = 1;
4178
4179 if (ref_clk_sel == 0) {
4180 refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
4181 refclk_sel.s.pcie_refclk125 = 0;
4182 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4183 if (gen3 == 0) /* Gen1 mode */
4184 lane_mode = R_2_5G_REFCLK100;
4185 else if (gen3 == 1) /* Gen2 mode */
4186 lane_mode = R_5G_REFCLK100;
4187 else
4188 lane_mode = R_8G_REFCLK100;
4189 } else if (ref_clk_sel == 1) {
4190 refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
4191 refclk_sel.s.pcie_refclk125 = 1;
4192 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4193 if (gen3 == 0) /* Gen1 mode */
4194 lane_mode = R_2_5G_REFCLK125;
4195 else if (gen3 == 1) /* Gen2 mode */
4196 lane_mode = R_5G_REFCLK125;
4197 else
4198 lane_mode = R_8G_REFCLK125;
4199 } else {
4200 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
4201 return -1;
4202 }
4203
4204 switch (qlm) {
4205 case 0: /* Either x4 or x8 based on PEM0 */
4206 {
4207 cvmx_rst_soft_prstx_t rst_prst;
4208
4209 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(0));
4210 rst_prst.s.soft_prst = rc;
4211 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
4212 __setup_pem_reset(node, 0, !rc);
4213
4214 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
4215 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4216 pemx_cfg.cn78xx.hostmd = rc;
4217 pemx_cfg.cn78xx.md = gen3;
4218 csr_wr_node(node, CVMX_PEMX_CFG(0), pemx_cfg.u64);
4219 /* x8 mode waits for QLM1 setup before turning on the PEM */
4220 if (mode == CVMX_QLM_MODE_PCIE) {
4221 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
4222 pemx_on.s.pemon = 1;
4223 csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
4224 }
4225 break;
4226 }
4227 case 1: /* Either PEM0 x8 or PEM1 x4 */
4228 {
4229 if (mode == CVMX_QLM_MODE_PCIE) {
4230 cvmx_rst_soft_prstx_t rst_prst;
4231 cvmx_pemx_cfg_t pemx_cfg;
4232
4233 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(1));
4234 rst_prst.s.soft_prst = rc;
4235 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
4236 __setup_pem_reset(node, 1, !rc);
4237
4238 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(1));
4239 pemx_cfg.cn78xx.lanes8 = 0;
4240 pemx_cfg.cn78xx.hostmd = rc;
4241 pemx_cfg.cn78xx.md = gen3;
4242 csr_wr_node(node, CVMX_PEMX_CFG(1), pemx_cfg.u64);
4243
4244 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(1));
4245 pemx_on.s.pemon = 1;
4246 csr_wr_node(node, CVMX_PEMX_ON(1), pemx_on.u64);
4247 } else {
4248 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
4249 pemx_on.s.pemon = 1;
4250 csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
4251 }
4252 break;
4253 }
4254 case 2: /* Either PEM2 x4 or PEM2 x8 */
4255 {
4256 cvmx_rst_soft_prstx_t rst_prst;
4257
4258 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(2));
4259 rst_prst.s.soft_prst = rc;
4260 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
4261 __setup_pem_reset(node, 2, !rc);
4262
4263 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
4264 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4265 pemx_cfg.cn78xx.hostmd = rc;
4266 pemx_cfg.cn78xx.md = gen3;
4267 csr_wr_node(node, CVMX_PEMX_CFG(2), pemx_cfg.u64);
4268 /* x8 mode waits for QLM3 setup before turning on the PEM */
4269 if (mode == CVMX_QLM_MODE_PCIE) {
4270 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
4271 pemx_on.s.pemon = 1;
4272 csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
4273 }
4274 break;
4275 }
4276 case 3: /* Either PEM2 x8 or PEM3 x4 */
4277 {
4278 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
4279 if (pemx_cfg.cn78xx.lanes8) {
4280 /* Last 4 lanes of PEM2 */
4281 /* PEMX_CFG already setup */
4282 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
4283 pemx_on.s.pemon = 1;
4284 csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
4285 }
4286 /* Check if PEM3 uses QLM3 and in x4 lane mode */
4287 if (mode == CVMX_QLM_MODE_PCIE) {
4288 cvmx_rst_soft_prstx_t rst_prst;
4289
4290 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
4291 rst_prst.s.soft_prst = rc;
4292 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4293 __setup_pem_reset(node, 3, !rc);
4294
4295 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
4296 pemx_cfg.cn78xx.lanes8 = 0;
4297 pemx_cfg.cn78xx.hostmd = rc;
4298 pemx_cfg.cn78xx.md = gen3;
4299 csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
4300
4301 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4302 pemx_on.s.pemon = 1;
4303 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4304 }
4305 break;
4306 }
4307 case 4: /* Either PEM3 x4 or PEM3 x8 */
4308 {
4309 if (mode == CVMX_QLM_MODE_PCIE_1X8) {
4310 /* Last 4 lanes of PEM3 */
4311 /* PEMX_CFG already setup */
4312 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4313 pemx_on.s.pemon = 1;
4314 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4315 } else {
4316 /* 4 lanes of PEM3 */
4317 cvmx_pemx_qlm_t pemx_qlm;
4318 cvmx_rst_soft_prstx_t rst_prst;
4319
4320 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
4321 rst_prst.s.soft_prst = rc;
4322 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4323 __setup_pem_reset(node, 3, !rc);
4324
4325 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
4326 pemx_cfg.cn78xx.lanes8 = 0;
4327 pemx_cfg.cn78xx.hostmd = rc;
4328 pemx_cfg.cn78xx.md = gen3;
4329 csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
4330 /* PEM3 is on QLM4 */
4331 pemx_qlm.u64 = csr_rd_node(node, CVMX_PEMX_QLM(3));
4332 pemx_qlm.cn78xx.pem3qlm = 1;
4333 csr_wr_node(node, CVMX_PEMX_QLM(3), pemx_qlm.u64);
4334 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4335 pemx_on.s.pemon = 1;
4336 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4337 }
4338 break;
4339 }
4340 default:
4341 break;
4342 }
4343 break;
4344 }
4345 case CVMX_QLM_MODE_ILK:
4346 is_ilk = 1;
4347 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4348 if (lane_mode == -1)
4349 return -1;
4350 /* FIXME: Set lane_mode for other speeds */
4351 break;
4352 case CVMX_QLM_MODE_SGMII:
4353 is_bgx = 1;
4354 lmac_type = 0;
4355 lane_to_sds = 1;
4356 num_ports = 4;
4357 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4358 debug("%s: SGMII lane mode: %d, alternate PLL: %s\n", __func__, lane_mode,
4359 alt_pll ? "true" : "false");
4360 if (lane_mode == -1)
4361 return -1;
4362 break;
4363 case CVMX_QLM_MODE_XAUI:
4364 is_bgx = 5;
4365 lmac_type = 1;
4366 lane_to_sds = 0xe4;
4367 num_ports = 1;
4368 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4369 debug("%s: XAUI lane mode: %d\n", __func__, lane_mode);
4370 if (lane_mode == -1)
4371 return -1;
4372 break;
4373 case CVMX_QLM_MODE_RXAUI:
4374 is_bgx = 3;
4375 lmac_type = 2;
4376 lane_to_sds = 0;
4377 num_ports = 2;
4378 debug("%s: RXAUI lane mode: %d\n", __func__, lane_mode);
4379 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4380 if (lane_mode == -1)
4381 return -1;
4382 break;
4383 case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
4384 case CVMX_QLM_MODE_10G_KR:
4385 is_bgx = 1;
4386 lmac_type = 3;
4387 lane_to_sds = 1;
4388 num_ports = 4;
4389 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4390 debug("%s: XFI/10G_KR lane mode: %d\n", __func__, lane_mode);
4391 if (lane_mode == -1)
4392 return -1;
4393 break;
4394 case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
4395 case CVMX_QLM_MODE_40G_KR4:
4396 is_bgx = 5;
4397 lmac_type = 4;
4398 lane_to_sds = 0xe4;
4399 num_ports = 1;
4400 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4401 debug("%s: XLAUI/40G_KR4 lane mode: %d\n", __func__, lane_mode);
4402 if (lane_mode == -1)
4403 return -1;
4404 break;
4405 case CVMX_QLM_MODE_DISABLED:
4406 /* Power down the QLM */
4407 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4408 phy_ctl.s.phy_pd = 1;
4409 phy_ctl.s.phy_reset = 1;
4410 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4411 /* Disable all modes */
4412 csr_wr_node(node, CVMX_GSERX_CFG(qlm), 0);
4413 /* Do nothing */
4414 return 0;
4415 default:
4416 break;
4417 }
4418
4419 if (alt_pll) {
4420 debug("%s: alternate PLL settings used for node %d, qlm %d, lane mode %d, reference clock %d\n",
4421 __func__, node, qlm, lane_mode, ref_clk_sel);
4422 if (__set_qlm_ref_clk_cn78xx(node, qlm, lane_mode, ref_clk_sel)) {
4423 printf("%s: Error: reference clock %d is not supported for node %d, qlm %d\n",
4424 __func__, ref_clk_sel, node, qlm);
4425 return -1;
4426 }
4427 }
4428
4429 /* Power up PHY, but keep it in reset */
4430 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4431 phy_ctl.s.phy_pd = 0;
4432 phy_ctl.s.phy_reset = 1;
4433 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4434
4435 /* Errata GSER-20788: GSER(0..13)_CFG[BGX_QUAD]=1 is broken. Force the
4436 * BGX_QUAD bit to be clear for CN78XX pass 1.x
4437 */
4438 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
4439 is_bgx &= 3;
4440
4441 /* Set GSER for the interface mode */
4442 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
4443 cfg.s.ila = is_ilk;
4444 cfg.s.bgx = is_bgx & 1;
4445 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
4446 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
4447 cfg.s.pcie = is_pcie;
4448 csr_wr_node(node, CVMX_GSERX_CFG(qlm), cfg.u64);
4449
4450 /* Lane mode */
4451 lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
4452 lmode.s.lmode = lane_mode;
4453 csr_wr_node(node, CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
4454
4455 /* BGX0-1 can connect to QLM0-1 or QLM 2-3. Program the select bit if we're
4456 * one of these QLMs and we're using BGX
4457 */
4458 if (qlm < 4 && is_bgx) {
4459 int bgx = qlm & 1;
4460 int use_upper = (qlm >> 1) & 1;
4461 cvmx_bgxx_cmr_global_config_t global_cfg;
4462
4463 global_cfg.u64 = csr_rd_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
4464 global_cfg.s.pmux_sds_sel = use_upper;
4465 csr_wr_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_cfg.u64);
4466 }
4467
4468 /* Bring phy out of reset */
4469 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4470 phy_ctl.s.phy_reset = 0;
4471 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4472 csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4473
4474 /*
4475 * Wait 250 ns until the management interface is ready to accept
4476 * read/write commands.
4477 */
4478 udelay(1);
4479
4480 if (is_bgx) {
4481 int bgx = (qlm < 2) ? qlm : qlm - 2;
4482 cvmx_bgxx_cmrx_config_t cmr_config;
4483 int index;
4484
4485 for (index = 0; index < num_ports; index++) {
4486 cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx));
4487 cmr_config.s.enable = 0;
4488 cmr_config.s.data_pkt_tx_en = 0;
4489 cmr_config.s.data_pkt_rx_en = 0;
4490 cmr_config.s.lmac_type = lmac_type;
4491 cmr_config.s.lane_to_sds = ((lane_to_sds == 1) ?
4492 index : ((lane_to_sds == 0) ?
4493 (index ? 0xe : 4) :
4494 lane_to_sds));
4495 csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
4496 }
4497 csr_wr_node(node, CVMX_BGXX_CMR_TX_LMACS(bgx), num_ports);
4498 csr_wr_node(node, CVMX_BGXX_CMR_RX_LMACS(bgx), num_ports);
4499
4500 /* Enable/disable training for 10G_KR/40G_KR4/XFI/XLAUI modes */
4501 for (index = 0; index < num_ports; index++) {
4502 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
4503
4504 spu_pmd_control.u64 =
4505 csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
4506
4507 if (mode == CVMX_QLM_MODE_10G_KR || mode == CVMX_QLM_MODE_40G_KR4)
4508 spu_pmd_control.s.train_en = 1;
4509 else if (mode == CVMX_QLM_MODE_XFI || mode == CVMX_QLM_MODE_XLAUI)
4510 spu_pmd_control.s.train_en = 0;
4511
4512 csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
4513 spu_pmd_control.u64);
4514 }
4515 }
4516
4517 /* Configure the gser pll */
4518 if (!is_pcie)
4519 __qlm_setup_pll_cn78xx(node, qlm);
4520
4521 /* Wait for reset to complete and the PLL to lock */
4522 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm),
4523 cvmx_gserx_pll_stat_t,
4524 pll_lock, ==, 1, 10000)) {
4525 printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n",
4526 node, qlm);
4527 return -1;
4528 }
4529
4530 /* Perform PCIe errata workaround */
4531 if (is_pcie)
4532 __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
4533 else
4534 __qlm_init_errata_20844(node, qlm);
4535
4536 /* Wait for reset to complete and the PLL to lock */
4537 /* PCIe mode doesn't become ready until the PEM block attempts to bring
4538 * the interface up. Skip this check for PCIe
4539 */
4540 if (!is_pcie && CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm),
4541 cvmx_gserx_qlm_stat_t, rst_rdy,
4542 ==, 1, 10000)) {
4543 printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n",
4544 node, qlm);
4545 return -1;
4546 }
4547
4548 /* Errata GSER-26150: 10G PHY PLL Temperature Failure */
4549 /* This workaround must be completed after the final deassertion of
4550 * GSERx_PHY_CTL[PHY_RESET].
4551 * Apply the workaround to 10.3125Gbps and 8Gbps only.
4552 */
4553 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
4554 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
4555 __qlm_errata_gser_26150(0, qlm, is_pcie);
4556
4557 /* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
4558 * Change. Applied to all 10G standards (required for KR) but also
4559 * applied to other standards in case software training is used
4560 */
4561 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && baud_mhz == 103125)
4562 __qlm_kr_inc_dec_gser26636(node, qlm);
4563
4564 /* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
4565 /* This workaround will only be applied to Pass 1.x */
4566 /* It will also only be applied if the SERDES data-rate is 10G */
4567 /* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
4568 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
4569 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
4570 cvmx_qlm_gser_errata_25992(node, qlm);
4571
4572 /* Errata GSER-27140: Updating the RX EQ settings due to temperature
4573 * drift sensitivities
4574 */
4575 /* This workaround will also only be applied if the SERDES data-rate is 10G */
4576 if (baud_mhz == 103125)
4577 __qlm_rx_eq_temp_gser27140(node, qlm);
4578
4579 /* Reduce the voltage amplitude coming from Marvell PHY and also change
4580 * DFE threshold settings for RXAUI interface
4581 */
4582 if (is_bgx && mode == CVMX_QLM_MODE_RXAUI) {
4583 int l;
4584
4585 for (l = 0; l < 4; l++) {
4586 cvmx_gserx_lanex_rx_cfg_4_t cfg4;
4587 cvmx_gserx_lanex_tx_cfg_0_t cfg0;
4588 /* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
4589 cfg4.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
4590 cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
4591 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
4592 /* Reduce the voltage swing to roughly 460mV */
4593 cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
4594 cfg0.s.cfg_tx_swing = 0x12;
4595 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
4596 }
4597 }
4598
4599 return 0;
4600}
4601
4602static int __is_qlm_valid_bgx_cn73xx(int qlm)
4603{
4604 if (qlm == 2 || qlm == 3 || qlm == 5 || qlm == 6)
4605 return 0;
4606 return 1;
4607}
4608
4609/**
4610 * Configure QLM/DLM speed and mode for cn73xx.
4611 *
4612 * @param qlm The QLM to configure
4613 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
4614 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
4615 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
4616 * @param gen3 Only used for PCIe
4617 * gen3 = 2 GEN3 mode
4618 * gen3 = 1 GEN2 mode
4619 * gen3 = 0 GEN1 mode
4620 *
4621 * @param ref_clk_sel The reference-clock selection to use to configure QLM
4622 * 0 = REF_100MHZ
4623 * 1 = REF_125MHZ
4624 * 2 = REF_156MHZ
4625 * 3 = REF_161MHZ
4626 *
4627 * @param ref_clk_input The reference-clock input to use to configure QLM
4628 * 0 = QLM/DLM reference clock input
4629 * 1 = common reference clock input 0
4630 * 2 = common reference clock input 1
4631 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01004632 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +01004633 */
4634static int octeon_configure_qlm_cn73xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
4635 int ref_clk_sel, int ref_clk_input)
4636{
4637 cvmx_gserx_phy_ctl_t phy_ctl;
4638 cvmx_gserx_lane_mode_t lmode;
4639 cvmx_gserx_cfg_t cfg;
4640 cvmx_gserx_refclk_sel_t refclk_sel;
4641 int is_pcie = 0;
4642 int is_bgx = 0;
4643 int lane_mode = 0;
4644 short lmac_type[4] = { 0 };
4645 short sds_lane[4] = { 0 };
4646 bool alt_pll = false;
4647 int enable_training = 0;
4648 int additional_lmacs = 0;
4649
4650 debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
4651 __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
4652
4653 /* Don't configure QLM4 if it is not in SATA mode */
4654 if (qlm == 4) {
4655 if (mode == CVMX_QLM_MODE_SATA_2X1)
4656 return __setup_sata(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
4657
4658 printf("Invalid mode for QLM4\n");
4659 return 0;
4660 }
4661
4662 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
4663
4664 /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
4665 * during speed change. Change SLI_WINDOW_CTL[time] to 525us
4666 */
4667 __set_sli_window_ctl_errata_31375(0);
4668 /* If PEM is in EP, no need to do anything */
4669 if (cfg.s.pcie && rc == 0 &&
4670 (mode == CVMX_QLM_MODE_PCIE || mode == CVMX_QLM_MODE_PCIE_1X8 ||
4671 mode == CVMX_QLM_MODE_PCIE_1X2)) {
4672 debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
4673 return 0;
4674 }
4675
4676 /* Set the reference clock to use */
4677 refclk_sel.u64 = 0;
4678 if (ref_clk_input == 0) { /* External ref clock */
4679 refclk_sel.s.com_clk_sel = 0;
4680 refclk_sel.s.use_com1 = 0;
4681 } else if (ref_clk_input == 1) {
4682 refclk_sel.s.com_clk_sel = 1;
4683 refclk_sel.s.use_com1 = 0;
4684 } else {
4685 refclk_sel.s.com_clk_sel = 1;
4686 refclk_sel.s.use_com1 = 1;
4687 }
4688
4689 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4690
4691 /* Reset the QLM after changing the reference clock */
4692 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
4693 phy_ctl.s.phy_reset = 1;
4694 phy_ctl.s.phy_pd = 1;
4695 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4696
4697 udelay(1000);
4698
4699 /* Check if QLM is a valid BGX interface */
4700 if (mode != CVMX_QLM_MODE_PCIE && mode != CVMX_QLM_MODE_PCIE_1X2 &&
4701 mode != CVMX_QLM_MODE_PCIE_1X8) {
4702 if (__is_qlm_valid_bgx_cn73xx(qlm))
4703 return -1;
4704 }
4705
4706 switch (mode) {
4707 case CVMX_QLM_MODE_PCIE:
4708 case CVMX_QLM_MODE_PCIE_1X2:
4709 case CVMX_QLM_MODE_PCIE_1X8: {
4710 cvmx_pemx_cfg_t pemx_cfg;
4711 cvmx_pemx_on_t pemx_on;
4712 cvmx_pemx_qlm_t pemx_qlm;
4713 cvmx_rst_soft_prstx_t rst_prst;
4714 int port = 0;
4715
4716 is_pcie = 1;
4717
4718 if (qlm < 5 && mode == CVMX_QLM_MODE_PCIE_1X2) {
4719 printf("Invalid PCIe mode(%d) for QLM%d\n", mode, qlm);
4720 return -1;
4721 }
4722
4723 if (ref_clk_sel == 0) {
4724 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
4725 refclk_sel.s.pcie_refclk125 = 0;
4726 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4727 if (gen3 == 0) /* Gen1 mode */
4728 lane_mode = R_2_5G_REFCLK100;
4729 else if (gen3 == 1) /* Gen2 mode */
4730 lane_mode = R_5G_REFCLK100;
4731 else
4732 lane_mode = R_8G_REFCLK100;
4733 } else if (ref_clk_sel == 1) {
4734 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
4735 refclk_sel.s.pcie_refclk125 = 1;
4736 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4737 if (gen3 == 0) /* Gen1 mode */
4738 lane_mode = R_2_5G_REFCLK125;
4739 else if (gen3 == 1) /* Gen2 mode */
4740 lane_mode = R_5G_REFCLK125;
4741 else
4742 lane_mode = R_8G_REFCLK125;
4743 } else {
4744 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
4745 return -1;
4746 }
4747
4748 switch (qlm) {
4749 case 0: /* Either x4 or x8 based on PEM0 */
4750 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
4751 rst_prst.s.soft_prst = rc;
4752 csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
4753 __setup_pem_reset(0, 0, !rc);
4754
4755 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
4756 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4757 pemx_cfg.cn78xx.hostmd = rc;
4758 pemx_cfg.cn78xx.md = gen3;
4759 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
4760 /* x8 mode waits for QLM1 setup before turning on the PEM */
4761 if (mode == CVMX_QLM_MODE_PCIE) {
4762 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
4763 pemx_on.s.pemon = 1;
4764 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
4765 }
4766 break;
4767 case 1: /* Either PEM0 x8 or PEM1 x4 */
4768 if (mode == CVMX_QLM_MODE_PCIE) {
4769 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
4770 rst_prst.s.soft_prst = rc;
4771 csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
4772 __setup_pem_reset(0, 1, !rc);
4773
4774 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
4775 pemx_cfg.cn78xx.lanes8 = 0;
4776 pemx_cfg.cn78xx.hostmd = rc;
4777 pemx_cfg.cn78xx.md = gen3;
4778 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
4779
4780 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
4781 pemx_on.s.pemon = 1;
4782 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
4783 } else { /* x8 mode */
4784 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
4785 pemx_on.s.pemon = 1;
4786 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
4787 }
4788 break;
4789 case 2: /* Either PEM2 x4 or PEM2 x8 or BGX0 */
4790 {
4791 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(2));
4792 pemx_qlm.cn73xx.pemdlmsel = 0;
4793 csr_wr(CVMX_PEMX_QLM(2), pemx_qlm.u64);
4794
4795 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(2));
4796 rst_prst.s.soft_prst = rc;
4797 csr_wr(CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
4798 __setup_pem_reset(0, 2, !rc);
4799
4800 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
4801 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4802 pemx_cfg.cn78xx.hostmd = rc;
4803 pemx_cfg.cn78xx.md = gen3;
4804 csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
4805 /* x8 mode waits for QLM3 setup before turning on the PEM */
4806 if (mode == CVMX_QLM_MODE_PCIE) {
4807 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
4808 pemx_on.s.pemon = 1;
4809 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
4810 }
4811 break;
4812 }
4813 case 3: /* Either PEM2 x8 or PEM3 x4 or BGX1 */
4814 /* PEM2/PEM3 are configured to use QLM2/3 */
4815 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
4816 if (pemx_cfg.cn78xx.lanes8) {
4817 /* Last 4 lanes of PEM2 */
4818 /* PEMX_CFG already setup */
4819 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
4820 pemx_on.s.pemon = 1;
4821 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
4822 }
4823 /* Check if PEM3 uses QLM3 and in x4 lane mode */
4824 if (mode == CVMX_QLM_MODE_PCIE) {
4825 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(3));
4826 pemx_qlm.cn73xx.pemdlmsel = 0;
4827 csr_wr(CVMX_PEMX_QLM(3), pemx_qlm.u64);
4828
4829 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(3));
4830 rst_prst.s.soft_prst = rc;
4831 csr_wr(CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4832 __setup_pem_reset(0, 3, !rc);
4833
4834 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(3));
4835 pemx_cfg.cn78xx.lanes8 = 0;
4836 pemx_cfg.cn78xx.hostmd = rc;
4837 pemx_cfg.cn78xx.md = gen3;
4838 csr_wr(CVMX_PEMX_CFG(3), pemx_cfg.u64);
4839
4840 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(3));
4841 pemx_on.s.pemon = 1;
4842 csr_wr(CVMX_PEMX_ON(3), pemx_on.u64);
4843 }
4844 break;
4845 case 5: /* PEM2/PEM3 x2 or BGX2 */
4846 case 6:
4847 port = (qlm == 5) ? 2 : 3;
4848 if (mode == CVMX_QLM_MODE_PCIE_1X2) {
4849 /* PEM2/PEM3 are configured to use DLM5/6 */
4850 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(port));
4851 pemx_qlm.cn73xx.pemdlmsel = 1;
4852 csr_wr(CVMX_PEMX_QLM(port), pemx_qlm.u64);
4853 /* 2 lanes of PEM3 */
4854 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(port));
4855 rst_prst.s.soft_prst = rc;
4856 csr_wr(CVMX_RST_SOFT_PRSTX(port), rst_prst.u64);
4857 __setup_pem_reset(0, port, !rc);
4858
4859 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(port));
4860 pemx_cfg.cn78xx.lanes8 = 0;
4861 pemx_cfg.cn78xx.hostmd = rc;
4862 pemx_cfg.cn78xx.md = gen3;
4863 csr_wr(CVMX_PEMX_CFG(port), pemx_cfg.u64);
4864
4865 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(port));
4866 pemx_on.s.pemon = 1;
4867 csr_wr(CVMX_PEMX_ON(port), pemx_on.u64);
4868 }
4869 break;
4870 default:
4871 break;
4872 }
4873 break;
4874 }
4875 case CVMX_QLM_MODE_SGMII:
4876 is_bgx = 1;
4877 lmac_type[0] = 0;
4878 lmac_type[1] = 0;
4879 lmac_type[2] = 0;
4880 lmac_type[3] = 0;
4881 sds_lane[0] = 0;
4882 sds_lane[1] = 1;
4883 sds_lane[2] = 2;
4884 sds_lane[3] = 3;
4885 break;
4886 case CVMX_QLM_MODE_SGMII_2X1:
4887 if (qlm == 5) {
4888 is_bgx = 1;
4889 lmac_type[0] = 0;
4890 lmac_type[1] = 0;
4891 lmac_type[2] = -1;
4892 lmac_type[3] = -1;
4893 sds_lane[0] = 0;
4894 sds_lane[1] = 1;
4895 } else if (qlm == 6) {
4896 is_bgx = 1;
4897 lmac_type[0] = -1;
4898 lmac_type[1] = -1;
4899 lmac_type[2] = 0;
4900 lmac_type[3] = 0;
4901 sds_lane[2] = 2;
4902 sds_lane[3] = 3;
4903 additional_lmacs = 2;
4904 }
4905 break;
4906 case CVMX_QLM_MODE_XAUI:
4907 is_bgx = 5;
4908 lmac_type[0] = 1;
4909 lmac_type[1] = -1;
4910 lmac_type[2] = -1;
4911 lmac_type[3] = -1;
4912 sds_lane[0] = 0xe4;
4913 break;
4914 case CVMX_QLM_MODE_RXAUI:
4915 is_bgx = 3;
4916 lmac_type[0] = 2;
4917 lmac_type[1] = 2;
4918 lmac_type[2] = -1;
4919 lmac_type[3] = -1;
4920 sds_lane[0] = 0x4;
4921 sds_lane[1] = 0xe;
4922 break;
4923 case CVMX_QLM_MODE_RXAUI_1X2:
4924 if (qlm == 5) {
4925 is_bgx = 3;
4926 lmac_type[0] = 2;
4927 lmac_type[1] = -1;
4928 lmac_type[2] = -1;
4929 lmac_type[3] = -1;
4930 sds_lane[0] = 0x4;
4931 }
4932 if (qlm == 6) {
4933 is_bgx = 3;
4934 lmac_type[0] = -1;
4935 lmac_type[1] = -1;
4936 lmac_type[2] = 2;
4937 lmac_type[3] = -1;
4938 sds_lane[2] = 0xe;
4939 additional_lmacs = 2;
4940 }
4941 break;
4942 case CVMX_QLM_MODE_10G_KR:
4943 enable_training = 1;
4944 case CVMX_QLM_MODE_XFI: /* 10GR_4X1 */
4945 is_bgx = 1;
4946 lmac_type[0] = 3;
4947 lmac_type[1] = 3;
4948 lmac_type[2] = 3;
4949 lmac_type[3] = 3;
4950 sds_lane[0] = 0;
4951 sds_lane[1] = 1;
4952 sds_lane[2] = 2;
4953 sds_lane[3] = 3;
4954 break;
4955 case CVMX_QLM_MODE_10G_KR_1X2:
4956 enable_training = 1;
4957 case CVMX_QLM_MODE_XFI_1X2:
4958 if (qlm == 5) {
4959 is_bgx = 1;
4960 lmac_type[0] = 3;
4961 lmac_type[1] = 3;
4962 lmac_type[2] = -1;
4963 lmac_type[3] = -1;
4964 sds_lane[0] = 0;
4965 sds_lane[1] = 1;
4966 } else if (qlm == 6) {
4967 is_bgx = 1;
4968 lmac_type[0] = -1;
4969 lmac_type[1] = -1;
4970 lmac_type[2] = 3;
4971 lmac_type[3] = 3;
4972 sds_lane[2] = 2;
4973 sds_lane[3] = 3;
4974 additional_lmacs = 2;
4975 }
4976 break;
4977 case CVMX_QLM_MODE_40G_KR4:
4978 enable_training = 1;
4979 case CVMX_QLM_MODE_XLAUI: /* 40GR4_1X4 */
4980 is_bgx = 5;
4981 lmac_type[0] = 4;
4982 lmac_type[1] = -1;
4983 lmac_type[2] = -1;
4984 lmac_type[3] = -1;
4985 sds_lane[0] = 0xe4;
4986 break;
4987 case CVMX_QLM_MODE_RGMII_SGMII:
4988 is_bgx = 1;
4989 lmac_type[0] = 5;
4990 lmac_type[1] = 0;
4991 lmac_type[2] = 0;
4992 lmac_type[3] = 0;
4993 sds_lane[0] = 0;
4994 sds_lane[1] = 1;
4995 sds_lane[2] = 2;
4996 sds_lane[3] = 3;
4997 break;
4998 case CVMX_QLM_MODE_RGMII_SGMII_1X1:
4999 if (qlm == 5) {
5000 is_bgx = 1;
5001 lmac_type[0] = 5;
5002 lmac_type[1] = 0;
5003 lmac_type[2] = -1;
5004 lmac_type[3] = -1;
5005 sds_lane[0] = 0;
5006 sds_lane[1] = 1;
5007 }
5008 break;
5009 case CVMX_QLM_MODE_RGMII_SGMII_2X1:
5010 if (qlm == 6) {
5011 is_bgx = 1;
5012 lmac_type[0] = 5;
5013 lmac_type[1] = -1;
5014 lmac_type[2] = 0;
5015 lmac_type[3] = 0;
5016 sds_lane[0] = 0;
5017 sds_lane[2] = 0;
5018 sds_lane[3] = 1;
5019 }
5020 break;
5021 case CVMX_QLM_MODE_RGMII_10G_KR:
5022 enable_training = 1;
5023 case CVMX_QLM_MODE_RGMII_XFI:
5024 is_bgx = 1;
5025 lmac_type[0] = 5;
5026 lmac_type[1] = 3;
5027 lmac_type[2] = 3;
5028 lmac_type[3] = 3;
5029 sds_lane[0] = 0;
5030 sds_lane[1] = 1;
5031 sds_lane[2] = 2;
5032 sds_lane[3] = 3;
5033 break;
5034 case CVMX_QLM_MODE_RGMII_10G_KR_1X1:
5035 enable_training = 1;
5036 case CVMX_QLM_MODE_RGMII_XFI_1X1:
5037 if (qlm == 5) {
5038 is_bgx = 3;
5039 lmac_type[0] = 5;
5040 lmac_type[1] = 3;
5041 lmac_type[2] = -1;
5042 lmac_type[3] = -1;
5043 sds_lane[0] = 0;
5044 sds_lane[1] = 1;
5045 }
5046 break;
5047 case CVMX_QLM_MODE_RGMII_40G_KR4:
5048 enable_training = 1;
5049 case CVMX_QLM_MODE_RGMII_XLAUI:
5050 is_bgx = 5;
5051 lmac_type[0] = 5;
5052 lmac_type[1] = 4;
5053 lmac_type[2] = -1;
5054 lmac_type[3] = -1;
5055 sds_lane[0] = 0x0;
5056 sds_lane[1] = 0xe4;
5057 break;
5058 case CVMX_QLM_MODE_RGMII_RXAUI:
5059 is_bgx = 3;
5060 lmac_type[0] = 5;
5061 lmac_type[1] = 2;
5062 lmac_type[2] = 2;
5063 lmac_type[3] = -1;
5064 sds_lane[0] = 0x0;
5065 sds_lane[1] = 0x4;
5066 sds_lane[2] = 0xe;
5067 break;
5068 case CVMX_QLM_MODE_RGMII_XAUI:
5069 is_bgx = 5;
5070 lmac_type[0] = 5;
5071 lmac_type[1] = 1;
5072 lmac_type[2] = -1;
5073 lmac_type[3] = -1;
5074 sds_lane[0] = 0;
5075 sds_lane[1] = 0xe4;
5076 break;
5077 default:
5078 break;
5079 }
5080
5081 if (is_pcie == 0)
5082 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
5083 debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
5084 alt_pll ? "true" : "false");
5085 if (lane_mode == -1)
5086 return -1;
5087
5088 if (alt_pll) {
5089 debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
5090 __func__, qlm, lane_mode, ref_clk_sel);
5091 if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
5092 printf("%s: Error: reference clock %d is not supported for qlm %d, lane mode: 0x%x\n",
5093 __func__, ref_clk_sel, qlm, lane_mode);
5094 return -1;
5095 }
5096 }
5097
5098 /* Power up PHY, but keep it in reset */
5099 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5100 phy_ctl.s.phy_pd = 0;
5101 phy_ctl.s.phy_reset = 1;
5102 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5103
5104 /* Set GSER for the interface mode */
5105 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5106 cfg.s.bgx = is_bgx & 1;
5107 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
5108 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
5109 cfg.s.pcie = is_pcie;
5110 csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
5111
5112 /* Lane mode */
5113 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5114 lmode.s.lmode = lane_mode;
5115 csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
5116
5117 /* Program lmac_type to figure out the type of BGX interface configured */
5118 if (is_bgx) {
5119 int bgx = (qlm < 4) ? qlm - 2 : 2;
5120 cvmx_bgxx_cmrx_config_t cmr_config;
5121 cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
5122 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
5123 int index, total_lmacs = 0;
5124
5125 for (index = 0; index < 4; index++) {
5126 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
5127 cmr_config.s.enable = 0;
5128 cmr_config.s.data_pkt_rx_en = 0;
5129 cmr_config.s.data_pkt_tx_en = 0;
5130 if (lmac_type[index] != -1) {
5131 cmr_config.s.lmac_type = lmac_type[index];
5132 cmr_config.s.lane_to_sds = sds_lane[index];
5133 total_lmacs++;
5134 /* RXAUI takes up 2 lmacs */
5135 if (lmac_type[index] == 2)
5136 total_lmacs += 1;
5137 }
5138 csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
5139
5140 /* Errata (TBD) RGMII doesn't turn on clock if its by
5141 * itself. Force them on
5142 */
5143 if (lmac_type[index] == 5) {
5144 cvmx_bgxx_cmr_global_config_t global_config;
5145
5146 global_config.u64 = csr_rd(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
5147 global_config.s.bgx_clk_enable = 1;
5148 csr_wr(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_config.u64);
5149 }
5150
5151 /* Enable training for 10G_KR/40G_KR4 modes */
5152 if (enable_training == 1 &&
5153 (lmac_type[index] == 3 || lmac_type[index] == 4)) {
5154 spu_pmd_control.u64 =
5155 csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
5156 spu_pmd_control.s.train_en = 1;
5157 csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
5158 spu_pmd_control.u64);
5159 }
5160 }
5161
5162 /* Update the total number of lmacs */
5163 rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
5164 rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
5165 csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
5166 csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
5167 }
5168
5169 /* Bring phy out of reset */
5170 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5171 phy_ctl.s.phy_reset = 0;
5172 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5173
5174 /*
5175 * Wait 1us until the management interface is ready to accept
5176 * read/write commands.
5177 */
5178 udelay(1);
5179
5180 /* Wait for reset to complete and the PLL to lock */
5181 /* PCIe mode doesn't become ready until the PEM block attempts to bring
5182 * the interface up. Skip this check for PCIe
5183 */
5184 if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm),
5185 cvmx_gserx_qlm_stat_t,
5186 rst_rdy, ==, 1, 10000)) {
5187 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
5188 return -1;
5189 }
5190
5191 /* Configure the gser pll */
5192 if (!is_pcie)
5193 __qlm_setup_pll_cn78xx(0, qlm);
5194
5195 /* Wait for reset to complete and the PLL to lock */
5196 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
5197 pll_lock, ==, 1, 10000)) {
5198 printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
5199 return -1;
5200 }
5201
5202 /* Errata GSER-26150: 10G PHY PLL Temperature Failure */
5203 /* This workaround must be completed after the final deassertion of
5204 * GSERx_PHY_CTL[PHY_RESET].
5205 * Apply the workaround to 10.3125Gbps and 8Gbps only.
5206 */
5207 if (OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) &&
5208 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
5209 __qlm_errata_gser_26150(0, qlm, is_pcie);
5210
5211 /* Errata GSER-26636: 10G-KR/40G-KR - Inverted Tx Coefficient Direction
5212 * Change. Applied to all 10G standards (required for KR) but also
5213 * applied to other standards in case software training is used
5214 */
5215 if (baud_mhz == 103125)
5216 __qlm_kr_inc_dec_gser26636(0, qlm);
5217
5218 /* Errata GSER-25992: RX EQ Default Settings Update (CTLE Bias) */
5219 /* This workaround will only be applied to Pass 1.x */
5220 /* It will also only be applied if the SERDES data-rate is 10G */
5221 /* or if PCIe Gen3 (gen3=2 is PCIe Gen3) */
5222 if (baud_mhz == 103125 || (is_pcie && gen3 == 2))
5223 cvmx_qlm_gser_errata_25992(0, qlm);
5224
5225 /* Errata GSER-27140: Updating the RX EQ settings due to temperature
5226 * drift sensitivities
5227 */
5228 /* This workaround will also only be applied if the SERDES data-rate is 10G */
5229 if (baud_mhz == 103125)
5230 __qlm_rx_eq_temp_gser27140(0, qlm);
5231
5232 /* Reduce the voltage amplitude coming from Marvell PHY and also change
5233 * DFE threshold settings for RXAUI interface
5234 */
5235 if (is_bgx) {
5236 int l;
5237
5238 for (l = 0; l < 4; l++) {
5239 cvmx_gserx_lanex_rx_cfg_4_t cfg4;
5240 cvmx_gserx_lanex_tx_cfg_0_t cfg0;
5241
5242 if (lmac_type[l] == 2) {
5243 /* Change the Q/QB error sampler 0 threshold from 0xD to 0xF */
5244 cfg4.u64 = csr_rd(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
5245 cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
5246 csr_wr(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
5247 /* Reduce the voltage swing to roughly 460mV */
5248 cfg0.u64 = csr_rd(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
5249 cfg0.s.cfg_tx_swing = 0x12;
5250 csr_wr(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
5251 }
5252 }
5253 }
5254
5255 return 0;
5256}
5257
5258static int __rmac_pll_config(int baud_mhz, int qlm, int mode)
5259{
5260 cvmx_gserx_pll_px_mode_0_t pmode0;
5261 cvmx_gserx_pll_px_mode_1_t pmode1;
5262 cvmx_gserx_lane_px_mode_0_t lmode0;
5263 cvmx_gserx_lane_px_mode_1_t lmode1;
5264 cvmx_gserx_lane_mode_t lmode;
5265
5266 switch (baud_mhz) {
5267 case 98304:
5268 pmode0.u64 = 0x1a0a;
5269 pmode1.u64 = 0x3228;
5270 lmode0.u64 = 0x600f;
5271 lmode1.u64 = 0xa80f;
5272 break;
5273 case 49152:
5274 if (mode == CVMX_QLM_MODE_SDL) {
5275 pmode0.u64 = 0x3605;
5276 pmode1.u64 = 0x0814;
5277 lmode0.u64 = 0x000f;
5278 lmode1.u64 = 0x6814;
5279 } else {
5280 pmode0.u64 = 0x1a0a;
5281 pmode1.u64 = 0x3228;
5282 lmode0.u64 = 0x650f;
5283 lmode1.u64 = 0xe80f;
5284 }
5285 break;
5286 case 24576:
5287 pmode0.u64 = 0x1a0a;
5288 pmode1.u64 = 0x3228;
5289 lmode0.u64 = 0x6a0f;
5290 lmode1.u64 = 0xe80f;
5291 break;
5292 case 12288:
5293 pmode0.u64 = 0x1a0a;
5294 pmode1.u64 = 0x3228;
5295 lmode0.u64 = 0x6f0f;
5296 lmode1.u64 = 0xe80f;
5297 break;
5298 case 6144:
5299 pmode0.u64 = 0x160a;
5300 pmode1.u64 = 0x1019;
5301 lmode0.u64 = 0x000f;
5302 lmode1.u64 = 0x2814;
5303 break;
5304 case 3072:
5305 pmode0.u64 = 0x160a;
5306 pmode1.u64 = 0x1019;
5307 lmode0.u64 = 0x050f;
5308 lmode1.u64 = 0x6814;
5309 break;
5310 default:
5311 printf("Invalid speed for CPRI/SDL configuration\n");
5312 return -1;
5313 }
5314
5315 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5316 csr_wr(CVMX_GSERX_PLL_PX_MODE_0(lmode.s.lmode, qlm), pmode0.u64);
5317 csr_wr(CVMX_GSERX_PLL_PX_MODE_1(lmode.s.lmode, qlm), pmode1.u64);
5318 csr_wr(CVMX_GSERX_LANE_PX_MODE_0(lmode.s.lmode, qlm), lmode0.u64);
5319 csr_wr(CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), lmode1.u64);
5320 return 0;
5321}
5322
5323/**
5324 * Configure QLM/DLM speed and mode for cnf75xx.
5325 *
5326 * @param qlm The QLM to configure
5327 * @param baud_mhz The speed the QLM needs to be configured in Mhz.
5328 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
5329 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP mode.
5330 * @param gen3 Only used for PCIe
5331 * gen3 = 2 GEN3 mode
5332 * gen3 = 1 GEN2 mode
5333 * gen3 = 0 GEN1 mode
5334 *
5335 * @param ref_clk_sel The reference-clock selection to use to configure QLM
5336 * 0 = REF_100MHZ
5337 * 1 = REF_125MHZ
5338 * 2 = REF_156MHZ
5339 * 3 = REF_122MHZ
5340 * @param ref_clk_input The reference-clock input to use to configure QLM
5341 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01005342 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +01005343 */
5344static int octeon_configure_qlm_cnf75xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
5345 int ref_clk_sel, int ref_clk_input)
5346{
5347 cvmx_gserx_phy_ctl_t phy_ctl;
5348 cvmx_gserx_lane_mode_t lmode;
5349 cvmx_gserx_cfg_t cfg;
5350 cvmx_gserx_refclk_sel_t refclk_sel;
5351 int is_pcie = 0;
5352 int is_bgx = 0;
5353 int is_srio = 0;
5354 int is_rmac = 0;
5355 int is_rmac_pipe = 0;
5356 int lane_mode = 0;
5357 short lmac_type[4] = { 0 };
5358 short sds_lane[4] = { 0 };
5359 bool alt_pll = false;
5360 int enable_training = 0;
5361 int additional_lmacs = 0;
5362 int port = (qlm == 3) ? 1 : 0;
5363 cvmx_sriox_status_reg_t status_reg;
5364
5365 debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
5366 __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
5367 if (qlm > 8) {
5368 printf("Invalid qlm%d passed\n", qlm);
5369 return -1;
5370 }
5371
5372 /* Errata PEM-31375 PEM RSL accesses to PCLK registers can timeout
5373 * during speed change. Change SLI_WINDOW_CTL[time] to 525us
5374 */
5375 __set_sli_window_ctl_errata_31375(0);
5376
5377 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5378
5379 /* If PEM is in EP, no need to do anything */
5380 if (cfg.s.pcie && rc == 0) {
5381 debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
5382 return 0;
5383 }
5384
5385 if (cfg.s.srio && rc == 0) {
5386 debug("%s: qlm %d is in SRIO endpoint mode, returning\n", __func__, qlm);
5387 return 0;
5388 }
5389
5390 /* Set the reference clock to use */
5391 refclk_sel.u64 = 0;
5392 if (ref_clk_input == 0) { /* External ref clock */
5393 refclk_sel.s.com_clk_sel = 0;
5394 refclk_sel.s.use_com1 = 0;
5395 } else if (ref_clk_input == 1) {
5396 refclk_sel.s.com_clk_sel = 1;
5397 refclk_sel.s.use_com1 = 0;
5398 } else {
5399 refclk_sel.s.com_clk_sel = 1;
5400 refclk_sel.s.use_com1 = 1;
5401 }
5402
5403 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5404
5405 /* Reset the QLM after changing the reference clock */
5406 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5407 phy_ctl.s.phy_reset = 1;
5408 phy_ctl.s.phy_pd = 1;
5409 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5410
5411 udelay(1000);
5412
5413 switch (mode) {
5414 case CVMX_QLM_MODE_PCIE:
5415 case CVMX_QLM_MODE_PCIE_1X2:
5416 case CVMX_QLM_MODE_PCIE_2X1: {
5417 cvmx_pemx_cfg_t pemx_cfg;
5418 cvmx_pemx_on_t pemx_on;
5419 cvmx_rst_soft_prstx_t rst_prst;
5420
5421 is_pcie = 1;
5422
5423 if (qlm > 1) {
5424 printf("Invalid PCIe mode for QLM%d\n", qlm);
5425 return -1;
5426 }
5427
5428 if (ref_clk_sel == 0) {
5429 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
5430 refclk_sel.s.pcie_refclk125 = 0;
5431 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5432 if (gen3 == 0) /* Gen1 mode */
5433 lane_mode = R_2_5G_REFCLK100;
5434 else if (gen3 == 1) /* Gen2 mode */
5435 lane_mode = R_5G_REFCLK100;
5436 else
5437 lane_mode = R_8G_REFCLK100;
5438 } else if (ref_clk_sel == 1) {
5439 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
5440 refclk_sel.s.pcie_refclk125 = 1;
5441 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5442 if (gen3 == 0) /* Gen1 mode */
5443 lane_mode = R_2_5G_REFCLK125;
5444 else if (gen3 == 1) /* Gen2 mode */
5445 lane_mode = R_5G_REFCLK125;
5446 else
5447 lane_mode = R_8G_REFCLK125;
5448 } else {
5449 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
5450 return -1;
5451 }
5452
5453 switch (qlm) {
5454 case 0: /* Either x4 or x2 based on PEM0 */
5455 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
5456 rst_prst.s.soft_prst = rc;
5457 csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
5458 __setup_pem_reset(0, 0, !rc);
5459
5460 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
5461 pemx_cfg.cnf75xx.hostmd = rc;
5462 pemx_cfg.cnf75xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE);
5463 pemx_cfg.cnf75xx.md = gen3;
5464 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
5465 /* x4 mode waits for QLM1 setup before turning on the PEM */
5466 if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
5467 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
5468 pemx_on.s.pemon = 1;
5469 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
5470 }
5471 break;
5472 case 1: /* Either PEM0 x4 or PEM1 x2 */
5473 if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
5474 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
5475 rst_prst.s.soft_prst = rc;
5476 csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
5477 __setup_pem_reset(0, 1, !rc);
5478
5479 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
5480 pemx_cfg.cnf75xx.hostmd = rc;
5481 pemx_cfg.cnf75xx.md = gen3;
5482 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
5483
5484 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
5485 pemx_on.s.pemon = 1;
5486 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
5487 } else {
5488 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
5489 pemx_on.s.pemon = 1;
5490 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
5491 }
5492 break;
5493 default:
5494 break;
5495 }
5496 break;
5497 }
5498 case CVMX_QLM_MODE_SRIO_1X4:
5499 case CVMX_QLM_MODE_SRIO_2X2:
5500 case CVMX_QLM_MODE_SRIO_4X1: {
5501 int spd = 0xf;
5502
5503 if (cvmx_fuse_read(1601)) {
5504 debug("SRIO is not supported on cnf73xx model\n");
5505 return -1;
5506 }
5507
5508 switch (baud_mhz) {
5509 case 1250:
5510 switch (ref_clk_sel) {
5511 case 0: /* 100 MHz ref clock */
5512 spd = 0x3;
5513 break;
5514 case 1: /* 125 MHz ref clock */
5515 spd = 0xa;
5516 break;
5517 case 2: /* 156.25 MHz ref clock */
5518 spd = 0x4;
5519 break;
5520 default:
5521 spd = 0xf; /* Disabled */
5522 break;
5523 }
5524 break;
5525 case 2500:
5526 switch (ref_clk_sel) {
5527 case 0: /* 100 MHz ref clock */
5528 spd = 0x2;
5529 break;
5530 case 1: /* 125 MHz ref clock */
5531 spd = 0x9;
5532 break;
5533 case 2: /* 156.25 MHz ref clock */
5534 spd = 0x7;
5535 break;
5536 default:
5537 spd = 0xf; /* Disabled */
5538 break;
5539 }
5540 break;
5541 case 3125:
5542 switch (ref_clk_sel) {
5543 case 1: /* 125 MHz ref clock */
5544 spd = 0x8;
5545 break;
5546 case 2: /* 156.25 MHz ref clock */
5547 spd = 0xe;
5548 break;
5549 default:
5550 spd = 0xf; /* Disabled */
5551 break;
5552 }
5553 break;
5554 case 5000:
5555 switch (ref_clk_sel) {
5556 case 0: /* 100 MHz ref clock */
5557 spd = 0x0;
5558 break;
5559 case 1: /* 125 MHz ref clock */
5560 spd = 0x6;
5561 break;
5562 case 2: /* 156.25 MHz ref clock */
5563 spd = 0xb;
5564 break;
5565 default:
5566 spd = 0xf; /* Disabled */
5567 break;
5568 }
5569 break;
5570 default:
5571 spd = 0xf;
5572 break;
5573 }
5574
5575 if (spd == 0xf) {
5576 printf("ERROR: Invalid SRIO speed (%d) configured for QLM%d\n", baud_mhz,
5577 qlm);
5578 return -1;
5579 }
5580
5581 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
5582 status_reg.s.spd = spd;
5583 csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
5584 is_srio = 1;
5585 break;
5586 }
5587
5588 case CVMX_QLM_MODE_SGMII_2X1:
5589 if (qlm == 4) {
5590 is_bgx = 1;
5591 lmac_type[0] = 0;
5592 lmac_type[1] = 0;
5593 lmac_type[2] = -1;
5594 lmac_type[3] = -1;
5595 sds_lane[0] = 0;
5596 sds_lane[1] = 1;
5597 } else if (qlm == 5) {
5598 is_bgx = 1;
5599 lmac_type[0] = -1;
5600 lmac_type[1] = -1;
5601 lmac_type[2] = 0;
5602 lmac_type[3] = 0;
5603 sds_lane[2] = 2;
5604 sds_lane[3] = 3;
5605 additional_lmacs = 2;
5606 }
5607 break;
5608 case CVMX_QLM_MODE_10G_KR_1X2:
5609 enable_training = 1;
5610 case CVMX_QLM_MODE_XFI_1X2:
5611 if (qlm == 5) {
5612 is_bgx = 1;
5613 lmac_type[0] = -1;
5614 lmac_type[1] = -1;
5615 lmac_type[2] = 3;
5616 lmac_type[3] = 3;
5617 sds_lane[2] = 2;
5618 sds_lane[3] = 3;
5619 additional_lmacs = 2;
5620 }
5621 break;
5622 case CVMX_QLM_MODE_CPRI: /* CPRI / JESD204B */
5623 is_rmac = 1;
5624 break;
5625 case CVMX_QLM_MODE_SDL: /* Serdes Lite (SDL) */
5626 is_rmac = 1;
5627 is_rmac_pipe = 1;
5628 lane_mode = 1;
5629 break;
5630 default:
5631 break;
5632 }
5633
5634 if (is_rmac_pipe == 0 && is_pcie == 0) {
5635 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz,
5636 &alt_pll);
5637 }
5638
5639 debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
5640 alt_pll ? "true" : "false");
5641 if (lane_mode == -1)
5642 return -1;
5643
5644 if (alt_pll) {
5645 debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
5646 __func__, qlm, lane_mode, ref_clk_sel);
5647 if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
5648 printf("%s: Error: reference clock %d is not supported for qlm %d\n",
5649 __func__, ref_clk_sel, qlm);
5650 return -1;
5651 }
5652 }
5653
5654 /* Power up PHY, but keep it in reset */
5655 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5656 phy_ctl.s.phy_pd = 0;
5657 phy_ctl.s.phy_reset = 1;
5658 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5659
5660 /* Set GSER for the interface mode */
5661 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5662 cfg.s.bgx = is_bgx & 1;
5663 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
5664 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
5665 cfg.s.pcie = is_pcie;
5666 cfg.s.srio = is_srio;
5667 cfg.s.rmac = is_rmac;
5668 cfg.s.rmac_pipe = is_rmac_pipe;
5669 csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
5670
5671 /* Lane mode */
5672 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5673 lmode.s.lmode = lane_mode;
5674 csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
5675
5676 /* Because of the Errata where quad mode does not work, program
5677 * lmac_type to figure out the type of BGX interface configured
5678 */
5679 if (is_bgx) {
5680 int bgx = 0;
5681 cvmx_bgxx_cmrx_config_t cmr_config;
5682 cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
5683 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
5684 int index, total_lmacs = 0;
5685
5686 for (index = 0; index < 4; index++) {
5687 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
5688 cmr_config.s.enable = 0;
5689 cmr_config.s.data_pkt_rx_en = 0;
5690 cmr_config.s.data_pkt_tx_en = 0;
5691 if (lmac_type[index] != -1) {
5692 cmr_config.s.lmac_type = lmac_type[index];
5693 cmr_config.s.lane_to_sds = sds_lane[index];
5694 total_lmacs++;
5695 }
5696 csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
5697
5698 /* Enable training for 10G_KR/40G_KR4 modes */
5699 if (enable_training == 1 &&
5700 (lmac_type[index] == 3 || lmac_type[index] == 4)) {
5701 spu_pmd_control.u64 =
5702 csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
5703 spu_pmd_control.s.train_en = 1;
5704 csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
5705 spu_pmd_control.u64);
5706 }
5707 }
5708
5709 /* Update the total number of lmacs */
5710 rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
5711 rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
5712 csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
5713 csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
5714 }
5715
5716 /* Bring phy out of reset */
5717 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5718 phy_ctl.s.phy_reset = 0;
5719 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5720
5721 /*
5722 * Wait 1us until the management interface is ready to accept
5723 * read/write commands.
5724 */
5725 udelay(1);
5726
5727 if (is_srio) {
5728 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
5729 status_reg.s.srio = 1;
5730 csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
5731 return 0;
5732 }
5733
5734 /* Wait for reset to complete and the PLL to lock */
5735 /* PCIe mode doesn't become ready until the PEM block attempts to bring
5736 * the interface up. Skip this check for PCIe
5737 */
5738 if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
5739 rst_rdy, ==, 1, 10000)) {
5740 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
5741 return -1;
5742 }
5743
5744 /* Configure the gser pll */
5745 if (is_rmac)
5746 __rmac_pll_config(baud_mhz, qlm, mode);
5747 else if (!(is_pcie || is_srio))
5748 __qlm_setup_pll_cn78xx(0, qlm);
5749
5750 /* Wait for reset to complete and the PLL to lock */
5751 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
5752 pll_lock, ==, 1, 10000)) {
5753 printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
5754 return -1;
5755 }
5756
5757 /* Errata GSER-27140: Updating the RX EQ settings due to temperature
5758 * drift sensitivities
5759 */
5760 /* This workaround will also only be applied if the SERDES data-rate is 10G */
5761 if (baud_mhz == 103125)
5762 __qlm_rx_eq_temp_gser27140(0, qlm);
5763
5764 return 0;
5765}
5766
5767/**
5768 * Configure qlm/dlm speed and mode.
5769 * @param qlm The QLM or DLM to configure
5770 * @param speed The speed the QLM needs to be configured in Mhz.
5771 * @param mode The QLM to be configured as SGMII/XAUI/PCIe.
5772 * @param rc Only used for PCIe, rc = 1 for root complex mode, 0 for EP
5773 * mode.
5774 * @param pcie_mode Only used when qlm/dlm are in pcie mode.
5775 * @param ref_clk_sel Reference clock to use for 70XX where:
5776 * 0: 100MHz
5777 * 1: 125MHz
5778 * 2: 156.25MHz
5779 * 3: 122MHz (Used by RMAC)
5780 * @param ref_clk_input This selects which reference clock input to use. For
5781 * cn70xx:
5782 * 0: DLMC_REF_CLK0
5783 * 1: DLMC_REF_CLK1
5784 * 2: DLM0_REF_CLK
5785 * cn61xx: (not used)
5786 * cn78xx/cn76xx/cn73xx:
5787 * 0: Internal clock (QLM[0-7]_REF_CLK)
5788 * 1: QLMC_REF_CLK0
5789 * 2: QLMC_REF_CLK1
5790 *
Heinrich Schuchardt185f8122022-01-19 18:05:50 +01005791 * Return: Return 0 on success or -1.
Aaron Williams5aeac5c2020-12-11 17:06:06 +01005792 */
5793int octeon_configure_qlm(int qlm, int speed, int mode, int rc, int pcie_mode, int ref_clk_sel,
5794 int ref_clk_input)
5795{
5796 int node = 0; // ToDo: corrently only node 0 is supported
5797
5798 debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, pcie_mode,
5799 ref_clk_sel, ref_clk_input);
5800 if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
5801 return octeon_configure_qlm_cn61xx(qlm, speed, mode, rc, pcie_mode);
5802 else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
5803 return octeon_configure_qlm_cn70xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5804 ref_clk_input);
5805 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
5806 return octeon_configure_qlm_cn78xx(node, qlm, speed, mode, rc, pcie_mode,
5807 ref_clk_sel, ref_clk_input);
5808 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
5809 return octeon_configure_qlm_cn73xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5810 ref_clk_input);
5811 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
5812 return octeon_configure_qlm_cnf75xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5813 ref_clk_input);
5814 else
5815 return -1;
5816}
5817
5818void octeon_init_qlm(int node)
5819{
5820 int qlm;
5821 cvmx_gserx_phy_ctl_t phy_ctl;
5822 cvmx_gserx_cfg_t cfg;
5823 int baud_mhz;
5824 int pem;
5825
5826 if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
5827 return;
5828
5829 for (qlm = 0; qlm < 8; qlm++) {
5830 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
5831 if (phy_ctl.s.phy_reset == 0) {
5832 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
5833 if (cfg.s.pcie)
5834 __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
5835 else
5836 __qlm_init_errata_20844(node, qlm);
5837
5838 baud_mhz = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
5839 if (baud_mhz == 6250 || baud_mhz == 6316)
5840 octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xa, 0xa0, -1, -1);
5841 else if (baud_mhz == 103125)
5842 octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xd, 0xd0, -1, -1);
5843 }
5844 }
5845
5846 /* Setup how each PEM drives the PERST lines */
5847 for (pem = 0; pem < 4; pem++) {
5848 cvmx_rst_ctlx_t rst_ctl;
5849
5850 rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
5851 __setup_pem_reset(node, pem, !rst_ctl.s.host_mode);
5852 }
5853}