blob: dc19c3bc1d9b4c9736f44c41182ee62765825db3 [file] [log] [blame]
Aaron Williams7f0f8432022-04-07 09:11:22 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 *
5 * Functions for RGMII/GMII/MII initialization, configuration,
6 * and monitoring.
7 */
8
9#include <log.h>
10#include <time.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/cvmx-csr.h>
15#include <mach/cvmx-bootmem.h>
16#include <mach/octeon-model.h>
17#include <mach/cvmx-fuse.h>
18#include <mach/octeon-feature.h>
19#include <mach/cvmx-qlm.h>
20#include <mach/octeon_qlm.h>
21#include <mach/cvmx-pcie.h>
22#include <mach/cvmx-coremask.h>
23#include <mach/cvmx-helper.h>
24#include <mach/cvmx-helper-board.h>
25
26#include <mach/cvmx-hwpko.h>
27
28#include <mach/cvmx-asxx-defs.h>
29#include <mach/cvmx-dbg-defs.h>
30#include <mach/cvmx-gmxx-defs.h>
31#include <mach/cvmx-npi-defs.h>
32#include <mach/cvmx-pko-defs.h>
33
34/**
35 * @INTERNAL
36 * Probe RGMII ports and determine the number present
37 *
38 * @param xiface Interface to probe
39 *
40 * @return Number of RGMII/GMII/MII ports (0-4).
41 */
42int __cvmx_helper_rgmii_probe(int xiface)
43{
44 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
45 int num_ports = 0;
46 union cvmx_gmxx_inf_mode mode;
47
48 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
49
50 if (mode.s.type)
51 debug("ERROR: Unsupported Octeon model in %s\n", __func__);
52 else
53 debug("ERROR: Unsupported Octeon model in %s\n", __func__);
54 return num_ports;
55}
56
57/**
58 * @INTERNAL
59 * Configure all of the ASX, GMX, and PKO regsiters required
60 * to get RGMII to function on the supplied interface.
61 *
62 * @param xiface PKO Interface to configure (0 or 1)
63 *
64 * @return Zero on success
65 */
66int __cvmx_helper_rgmii_enable(int xiface)
67{
68 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
69 int interface = xi.interface;
70 int num_ports = cvmx_helper_ports_on_interface(interface);
71 int port;
72 union cvmx_gmxx_inf_mode mode;
73 union cvmx_asxx_tx_prt_en asx_tx;
74 union cvmx_asxx_rx_prt_en asx_rx;
75
76 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
77
78 if (num_ports == -1)
79 return -1;
80 if (mode.s.en == 0)
81 return -1;
82
83 /* Configure the ASX registers needed to use the RGMII ports */
84 asx_tx.u64 = 0;
85 asx_tx.s.prt_en = cvmx_build_mask(num_ports);
86 csr_wr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
87
88 asx_rx.u64 = 0;
89 asx_rx.s.prt_en = cvmx_build_mask(num_ports);
90 csr_wr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
91
92 /* Configure the GMX registers needed to use the RGMII ports */
93 for (port = 0; port < num_ports; port++) {
94 /*
95 * Configure more flexible RGMII preamble
96 * checking. Pass 1 doesn't support this feature.
97 */
98 union cvmx_gmxx_rxx_frm_ctl frm_ctl;
99
100 frm_ctl.u64 = csr_rd(CVMX_GMXX_RXX_FRM_CTL(port, interface));
101 /* New field, so must be compile time */
102 frm_ctl.s.pre_free = 1;
103 csr_wr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
104
105 /*
106 * Each pause frame transmitted will ask for about 10M
107 * bit times before resume. If buffer space comes
108 * available before that time has expired, an XON
109 * pause frame (0 time) will be transmitted to restart
110 * the flow.
111 */
112 csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
113 csr_wr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface),
114 19000);
115
116 csr_wr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
117 csr_wr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
118 }
119
120 __cvmx_helper_setup_gmx(interface, num_ports);
121
122 /* enable the ports now */
123 for (port = 0; port < num_ports; port++) {
124 union cvmx_gmxx_prtx_cfg gmx_cfg;
125
126 cvmx_helper_link_autoconf(
127 cvmx_helper_get_ipd_port(interface, port));
128 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(port, interface));
129 gmx_cfg.s.en = 1;
130 csr_wr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
131 }
132 return 0;
133}
134
135/**
136 * @INTERNAL
137 * Return the link state of an IPD/PKO port as returned by
138 * auto negotiation. The result of this function may not match
139 * Octeon's link config if auto negotiation has changed since
140 * the last call to cvmx_helper_link_set().
141 *
142 * @param ipd_port IPD/PKO port to query
143 *
144 * @return Link state
145 */
146cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
147{
148 int interface = cvmx_helper_get_interface_num(ipd_port);
149 int index = cvmx_helper_get_interface_index_num(ipd_port);
150 union cvmx_asxx_prt_loop asxx_prt_loop;
151
152 asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
153 if (asxx_prt_loop.s.int_loop & (1 << index)) {
154 /* Force 1Gbps full duplex on internal loopback */
155 cvmx_helper_link_info_t result;
156
157 result.u64 = 0;
158 result.s.full_duplex = 1;
159 result.s.link_up = 1;
160 result.s.speed = 1000;
161 return result;
162 } else {
163 return __cvmx_helper_board_link_get(ipd_port);
164 }
165}
166
167/**
168 * @INTERNAL
169 * Return the link state of an IPD/PKO port as returned by
170 * auto negotiation. The result of this function may not match
171 * Octeon's link config if auto negotiation has changed since
172 * the last call to cvmx_helper_link_set().
173 *
174 * @param ipd_port IPD/PKO port to query
175 *
176 * @return Link state
177 */
178cvmx_helper_link_info_t __cvmx_helper_gmii_link_get(int ipd_port)
179{
180 cvmx_helper_link_info_t result;
181 int index = cvmx_helper_get_interface_index_num(ipd_port);
182
183 if (index == 0) {
184 result = __cvmx_helper_rgmii_link_get(ipd_port);
185 } else {
186 result.s.full_duplex = 1;
187 result.s.link_up = 1;
188 result.s.speed = 1000;
189 }
190
191 return result;
192}
193
194/**
195 * @INTERNAL
196 * Configure an IPD/PKO port for the specified link state. This
197 * function does not influence auto negotiation at the PHY level.
198 * The passed link state must always match the link state returned
199 * by cvmx_helper_link_get(). It is normally best to use
200 * cvmx_helper_link_autoconf() instead.
201 *
202 * @param ipd_port IPD/PKO port to configure
203 * @param link_info The new link state
204 *
205 * @return Zero on success, negative on failure
206 */
207int __cvmx_helper_rgmii_link_set(int ipd_port,
208 cvmx_helper_link_info_t link_info)
209{
210 int result = 0;
211 int interface = cvmx_helper_get_interface_num(ipd_port);
212 int index = cvmx_helper_get_interface_index_num(ipd_port);
213 union cvmx_gmxx_prtx_cfg original_gmx_cfg;
214 union cvmx_gmxx_prtx_cfg new_gmx_cfg;
215 union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
216 union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
217 union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
218 union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
219 int i;
220
221 /* Read the current settings so we know the current enable state */
222 original_gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
223 new_gmx_cfg = original_gmx_cfg;
224
225 /* Disable the lowest level RX */
226 csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
227 csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1 << index));
228
229 memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
230 /* Disable all queues so that TX should become idle */
231 for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
232 int queue = cvmx_pko_get_base_queue(ipd_port) + i;
233
234 csr_wr(CVMX_PKO_REG_READ_IDX, queue);
235 pko_mem_queue_qos.u64 = csr_rd(CVMX_PKO_MEM_QUEUE_QOS);
236 pko_mem_queue_qos.s.pid = ipd_port;
237 pko_mem_queue_qos.s.qid = queue;
238 pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
239 pko_mem_queue_qos.s.qos_mask = 0;
240 csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
241 }
242
243 /* Disable backpressure */
244 gmx_tx_ovr_bp.u64 = csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
245 gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
246 gmx_tx_ovr_bp.s.bp &= ~(1 << index);
247 gmx_tx_ovr_bp.s.en |= 1 << index;
248 csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
249 csr_rd(CVMX_GMXX_TX_OVR_BP(interface));
250
251 /*
252 * Poll the GMX state machine waiting for it to become
253 * idle. Preferably we should only change speed when it is
254 * idle. If it doesn't become idle we will still do the speed
255 * change, but there is a slight chance that GMX will
256 * lockup.
257 */
258 csr_wr(CVMX_NPI_DBG_SELECT, interface * 0x800 + index * 0x100 + 0x880);
259 CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 7, ==, 0,
260 10000);
261 CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data & 0xf, ==, 0,
262 10000);
263
264 /* Disable the port before we make any changes */
265 new_gmx_cfg.s.en = 0;
266 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
267 csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
268
269 /* Set full/half duplex */
270 if (!link_info.s.link_up)
271 /* Force full duplex on down links */
272 new_gmx_cfg.s.duplex = 1;
273 else
274 new_gmx_cfg.s.duplex = link_info.s.full_duplex;
275
276 /* Set the link speed. Anything unknown is set to 1Gbps */
277 if (link_info.s.speed == 10) {
278 new_gmx_cfg.s.slottime = 0;
279 new_gmx_cfg.s.speed = 0;
280 } else if (link_info.s.speed == 100) {
281 new_gmx_cfg.s.slottime = 0;
282 new_gmx_cfg.s.speed = 0;
283 } else {
284 new_gmx_cfg.s.slottime = 1;
285 new_gmx_cfg.s.speed = 1;
286 }
287
288 /* Adjust the clocks */
289 if (link_info.s.speed == 10) {
290 csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 50);
291 csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
292 csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
293 } else if (link_info.s.speed == 100) {
294 csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 5);
295 csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
296 csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0);
297 } else {
298 csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
299 csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
300 csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
301 }
302
303 /* Do a read to make sure all setup stuff is complete */
304 csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
305
306 /* Save the new GMX setting without enabling the port */
307 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
308
309 /* Enable the lowest level RX */
310 if (link_info.s.link_up)
311 csr_wr(CVMX_ASXX_RX_PRT_EN(interface),
312 csr_rd(CVMX_ASXX_RX_PRT_EN(interface)) | (1 << index));
313
314 /* Re-enable the TX path */
315 for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
316 int queue = cvmx_pko_get_base_queue(ipd_port) + i;
317
318 csr_wr(CVMX_PKO_REG_READ_IDX, queue);
319 csr_wr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
320 }
321
322 /* Restore backpressure */
323 csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
324
325 /* Restore the GMX enable state. Port config is complete */
326 new_gmx_cfg.s.en = original_gmx_cfg.s.en;
327 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
328
329 return result;
330}
331
332/**
333 * @INTERNAL
334 * Configure a port for internal and/or external loopback. Internal loopback
335 * causes packets sent by the port to be received by Octeon. External loopback
336 * causes packets received from the wire to sent out again.
337 *
338 * @param ipd_port IPD/PKO port to loopback.
339 * @param enable_internal
340 * Non zero if you want internal loopback
341 * @param enable_external
342 * Non zero if you want external loopback
343 *
344 * @return Zero on success, negative on failure.
345 */
346int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
347 int enable_external)
348{
349 int interface = cvmx_helper_get_interface_num(ipd_port);
350 int index = cvmx_helper_get_interface_index_num(ipd_port);
351 int original_enable;
352 union cvmx_gmxx_prtx_cfg gmx_cfg;
353 union cvmx_asxx_prt_loop asxx_prt_loop;
354
355 /* Read the current enable state and save it */
356 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(index, interface));
357 original_enable = gmx_cfg.s.en;
358 /* Force port to be disabled */
359 gmx_cfg.s.en = 0;
360 if (enable_internal) {
361 /* Force speed if we're doing internal loopback */
362 gmx_cfg.s.duplex = 1;
363 gmx_cfg.s.slottime = 1;
364 gmx_cfg.s.speed = 1;
365 csr_wr(CVMX_GMXX_TXX_CLK(index, interface), 1);
366 csr_wr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
367 csr_wr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
368 }
369 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
370
371 /* Set the loopback bits */
372 asxx_prt_loop.u64 = csr_rd(CVMX_ASXX_PRT_LOOP(interface));
373 if (enable_internal)
374 asxx_prt_loop.s.int_loop |= 1 << index;
375 else
376 asxx_prt_loop.s.int_loop &= ~(1 << index);
377 if (enable_external)
378 asxx_prt_loop.s.ext_loop |= 1 << index;
379 else
380 asxx_prt_loop.s.ext_loop &= ~(1 << index);
381 csr_wr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
382
383 /* Force enables in internal loopback */
384 if (enable_internal) {
385 u64 tmp;
386
387 tmp = csr_rd(CVMX_ASXX_TX_PRT_EN(interface));
388 csr_wr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
389 tmp = csr_rd(CVMX_ASXX_RX_PRT_EN(interface));
390 csr_wr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
391 original_enable = 1;
392 }
393
394 /* Restore the enable state */
395 gmx_cfg.s.en = original_enable;
396 csr_wr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
397 return 0;
398}