blob: d1288bb17f3c15c051c3ac7e6211b1b35b6cf2cb [file] [log] [blame]
Michal Simek1d78d682022-01-06 09:49:41 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * phy-zynqmp.c - PHY driver for Xilinx ZynqMP GT.
4 *
5 * Copyright (C) 2018-2021 Xilinx Inc.
6 *
7 * Author: Anurag Kumar Vulisha <anuragku@xilinx.com>
8 * Author: Subbaraya Sundeep <sundeep.lkml@gmail.com>
9 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 */
11
12#include <common.h>
13#include <clk-uclass.h>
14#include <dm.h>
15#include <generic-phy.h>
16#include <log.h>
17#include <power-domain.h>
18#include <regmap.h>
19#include <syscon.h>
20#include <asm/io.h>
21#include <asm/arch/sys_proto.h>
22#include <asm/arch/hardware.h>
23#include <dm/device.h>
24#include <dm/device_compat.h>
25#include <dm/lists.h>
26#include <dt-bindings/phy/phy.h>
27#include <linux/bitops.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30
31/*
32 * Lane Registers
33 */
34
35/* TX De-emphasis parameters */
36#define L0_TX_ANA_TM_18 0x0048
37#define L0_TX_ANA_TM_118 0x01d8
38#define L0_TX_ANA_TM_118_FORCE_17_0 BIT(0)
39
40/* DN Resistor calibration code parameters */
41#define L0_TXPMA_ST_3 0x0b0c
42#define L0_DN_CALIB_CODE 0x3f
43
44/* PMA control parameters */
45#define L0_TXPMD_TM_45 0x0cb4
46#define L0_TXPMD_TM_48 0x0cc0
47#define L0_TXPMD_TM_45_OVER_DP_MAIN BIT(0)
48#define L0_TXPMD_TM_45_ENABLE_DP_MAIN BIT(1)
49#define L0_TXPMD_TM_45_OVER_DP_POST1 BIT(2)
50#define L0_TXPMD_TM_45_ENABLE_DP_POST1 BIT(3)
51#define L0_TXPMD_TM_45_OVER_DP_POST2 BIT(4)
52#define L0_TXPMD_TM_45_ENABLE_DP_POST2 BIT(5)
53
54/* PCS control parameters */
55#define L0_TM_DIG_6 0x106c
56#define L0_TM_DIS_DESCRAMBLE_DECODER 0x0f
57#define L0_TX_DIG_61 0x00f4
58#define L0_TM_DISABLE_SCRAMBLE_ENCODER 0x0f
59
60/* PLL Test Mode register parameters */
61#define L0_TM_PLL_DIG_37 0x2094
62#define L0_TM_COARSE_CODE_LIMIT 0x10
63
64/* PLL SSC step size offsets */
65#define L0_PLL_SS_STEPS_0_LSB 0x2368
66#define L0_PLL_SS_STEPS_1_MSB 0x236c
67#define L0_PLL_SS_STEP_SIZE_0_LSB 0x2370
68#define L0_PLL_SS_STEP_SIZE_1 0x2374
69#define L0_PLL_SS_STEP_SIZE_2 0x2378
70#define L0_PLL_SS_STEP_SIZE_3_MSB 0x237c
71#define L0_PLL_STATUS_READ_1 0x23e4
72
73/* SSC step size parameters */
74#define STEP_SIZE_0_MASK 0xff
75#define STEP_SIZE_1_MASK 0xff
76#define STEP_SIZE_2_MASK 0xff
77#define STEP_SIZE_3_MASK 0x3
78#define STEP_SIZE_SHIFT 8
79#define FORCE_STEP_SIZE 0x10
80#define FORCE_STEPS 0x20
81#define STEPS_0_MASK 0xff
82#define STEPS_1_MASK 0x07
83
84/* Reference clock selection parameters */
85#define L0_Ln_REF_CLK_SEL(n) (0x2860 + (n) * 4)
86#define L0_REF_CLK_SEL_MASK 0x8f
87
88/* Calibration digital logic parameters */
89#define L3_TM_CALIB_DIG19 0xec4c
90#define L3_CALIB_DONE_STATUS 0xef14
91#define L3_TM_CALIB_DIG18 0xec48
92#define L3_TM_CALIB_DIG19_NSW 0x07
93#define L3_TM_CALIB_DIG18_NSW 0xe0
94#define L3_TM_OVERRIDE_NSW_CODE 0x20
95#define L3_CALIB_DONE 0x02
96#define L3_NSW_SHIFT 5
97#define L3_NSW_PIPE_SHIFT 4
98#define L3_NSW_CALIB_SHIFT 3
99
100#define PHY_REG_OFFSET 0x4000
101
102/*
103 * Global Registers
104 */
105
106/* Refclk selection parameters */
107#define PLL_REF_SEL(n) (0x10000 + (n) * 4)
108#define PLL_FREQ_MASK 0x1f
109#define PLL_STATUS_LOCKED 0x10
110
111/* Inter Connect Matrix parameters */
112#define ICM_CFG0 0x10010
113#define ICM_CFG1 0x10014
114#define ICM_CFG0_L0_MASK 0x07
115#define ICM_CFG0_L1_MASK 0x70
116#define ICM_CFG1_L2_MASK 0x07
117#define ICM_CFG2_L3_MASK 0x70
118#define ICM_CFG_SHIFT 4
119
120/* Inter Connect Matrix allowed protocols */
121#define ICM_PROTOCOL_PD 0x0
122#define ICM_PROTOCOL_PCIE 0x1
123#define ICM_PROTOCOL_SATA 0x2
124#define ICM_PROTOCOL_USB 0x3
125#define ICM_PROTOCOL_DP 0x4
126#define ICM_PROTOCOL_SGMII 0x5
127
128/* Test Mode common reset control parameters */
129#define TM_CMN_RST 0x10018
130#define TM_CMN_RST_EN 0x1
131#define TM_CMN_RST_SET 0x2
132#define TM_CMN_RST_MASK 0x3
133
134/* Bus width parameters */
135#define TX_PROT_BUS_WIDTH 0x10040
136#define RX_PROT_BUS_WIDTH 0x10044
137#define PROT_BUS_WIDTH_10 0x0
138#define PROT_BUS_WIDTH_20 0x1
139#define PROT_BUS_WIDTH_40 0x2
140#define PROT_BUS_WIDTH_MASK 0x3
141#define PROT_BUS_WIDTH_SHIFT 2
142
143/* Number of GT lanes */
144#define NUM_LANES 4
145
146/* SIOU SATA control register */
147#define SATA_CONTROL_OFFSET 0x0100
148
149/* Total number of controllers */
150#define CONTROLLERS_PER_LANE 5
151
152/* Protocol Type parameters */
153enum {
154 XPSGTR_TYPE_USB0 = 0, /* USB controller 0 */
155 XPSGTR_TYPE_USB1 = 1, /* USB controller 1 */
156 XPSGTR_TYPE_SATA_0 = 2, /* SATA controller lane 0 */
157 XPSGTR_TYPE_SATA_1 = 3, /* SATA controller lane 1 */
158 XPSGTR_TYPE_PCIE_0 = 4, /* PCIe controller lane 0 */
159 XPSGTR_TYPE_PCIE_1 = 5, /* PCIe controller lane 1 */
160 XPSGTR_TYPE_PCIE_2 = 6, /* PCIe controller lane 2 */
161 XPSGTR_TYPE_PCIE_3 = 7, /* PCIe controller lane 3 */
162 XPSGTR_TYPE_DP_0 = 8, /* Display Port controller lane 0 */
163 XPSGTR_TYPE_DP_1 = 9, /* Display Port controller lane 1 */
164 XPSGTR_TYPE_SGMII0 = 10, /* Ethernet SGMII controller 0 */
165 XPSGTR_TYPE_SGMII1 = 11, /* Ethernet SGMII controller 1 */
166 XPSGTR_TYPE_SGMII2 = 12, /* Ethernet SGMII controller 2 */
167 XPSGTR_TYPE_SGMII3 = 13, /* Ethernet SGMII controller 3 */
168};
169
170/* Timeout values */
Ashok Reddy Somae9869f42022-05-10 07:12:34 -0600171#define TIMEOUT_US 10000
Michal Simek1d78d682022-01-06 09:49:41 +0100172
173#define IOU_SLCR_GEM_CLK_CTRL 0x308
174#define GEM_CTRL_GEM_SGMII_MODE BIT(2)
175#define GEM_CTRL_GEM_REF_SRC_SEL BIT(1)
176
177#define IOU_SLCR_GEM_CTRL 0x360
178#define GEM_CTRL_GEM_SGMII_SD BIT(0)
179
180/**
181 * struct xpsgtr_ssc - structure to hold SSC settings for a lane
182 * @refclk_rate: PLL reference clock frequency
183 * @pll_ref_clk: value to be written to register for corresponding ref clk rate
184 * @steps: number of steps of SSC (Spread Spectrum Clock)
185 * @step_size: step size of each step
186 */
187struct xpsgtr_ssc {
188 u32 refclk_rate;
189 u8 pll_ref_clk;
190 u32 steps;
191 u32 step_size;
192};
193
194/**
195 * struct xpsgtr_phy - representation of a lane
196 * @dev: pointer to the xpsgtr_dev instance
197 * @refclk: reference clock index
198 * @type: controller which uses this lane
199 * @lane: lane number
200 * @protocol: protocol in which the lane operates
201 */
202struct xpsgtr_phy {
203 struct xpsgtr_dev *dev;
204 unsigned int refclk;
205 u8 type;
206 u8 lane;
207 u8 protocol;
208};
209
210/**
211 * struct xpsgtr_dev - representation of a ZynMP GT device
212 * @dev: pointer to device
213 * @serdes: serdes base address
214 * @siou: siou base address
215 * @phys: PHY lanes
216 * @refclk_sscs: spread spectrum settings for the reference clocks
217 * @clk: reference clocks
218 */
219struct xpsgtr_dev {
220 struct udevice *dev;
221 u8 *serdes;
222 u8 *siou;
223 struct xpsgtr_phy phys[NUM_LANES];
224 const struct xpsgtr_ssc *refclk_sscs[NUM_LANES];
225 struct clk clk[NUM_LANES];
226};
227
228/* Configuration Data */
229/* lookup table to hold all settings needed for a ref clock frequency */
230static const struct xpsgtr_ssc ssc_lookup[] = {
231 { 19200000, 0x05, 608, 264020 },
232 { 20000000, 0x06, 634, 243454 },
233 { 24000000, 0x07, 760, 168973 },
234 { 26000000, 0x08, 824, 143860 },
235 { 27000000, 0x09, 856, 86551 },
236 { 38400000, 0x0a, 1218, 65896 },
237 { 40000000, 0x0b, 634, 243454 },
238 { 52000000, 0x0c, 824, 143860 },
239 { 100000000, 0x0d, 1058, 87533 },
240 { 108000000, 0x0e, 856, 86551 },
241 { 125000000, 0x0f, 992, 119497 },
242 { 135000000, 0x10, 1070, 55393 },
243 { 150000000, 0x11, 792, 187091 }
244};
245
246/* I/O Accessors */
247static u32 xpsgtr_read(struct xpsgtr_dev *gtr_dev, u32 reg)
248{
249 return readl(gtr_dev->serdes + reg);
250}
251
252static void xpsgtr_write(struct xpsgtr_dev *gtr_dev, u32 reg, u32 value)
253{
254 writel(value, gtr_dev->serdes + reg);
255}
256
257static void xpsgtr_clr_set(struct xpsgtr_dev *gtr_dev, u32 reg,
258 u32 clr, u32 set)
259{
260 u32 value = xpsgtr_read(gtr_dev, reg);
261
262 value &= ~clr;
263 value |= set;
264 xpsgtr_write(gtr_dev, reg, value);
265}
266
267static u32 xpsgtr_read_phy(struct xpsgtr_phy *gtr_phy, u32 reg)
268{
269 void __iomem *addr = gtr_phy->dev->serdes
270 + gtr_phy->lane * PHY_REG_OFFSET + reg;
271
272 return readl(addr);
273}
274
275static void xpsgtr_write_phy(struct xpsgtr_phy *gtr_phy,
276 u32 reg, u32 value)
277{
278 void __iomem *addr = gtr_phy->dev->serdes
279 + gtr_phy->lane * PHY_REG_OFFSET + reg;
280
281 writel(value, addr);
282}
283
284static void xpsgtr_clr_set_phy(struct xpsgtr_phy *gtr_phy,
285 u32 reg, u32 clr, u32 set)
286{
287 void __iomem *addr = gtr_phy->dev->serdes
288 + gtr_phy->lane * PHY_REG_OFFSET + reg;
289
290 writel((readl(addr) & ~clr) | set, addr);
291}
292
293/* Configure PLL and spread-sprectrum clock. */
294static void xpsgtr_configure_pll(struct xpsgtr_phy *gtr_phy)
295{
296 const struct xpsgtr_ssc *ssc;
297 u32 step_size;
298
299 ssc = gtr_phy->dev->refclk_sscs[gtr_phy->refclk];
300 step_size = ssc->step_size;
301
302 xpsgtr_clr_set(gtr_phy->dev, PLL_REF_SEL(gtr_phy->lane),
303 PLL_FREQ_MASK, ssc->pll_ref_clk);
304
305 /* Enable lane clock sharing, if required */
306 if (gtr_phy->refclk != gtr_phy->lane) {
307 /* Lane3 Ref Clock Selection Register */
308 xpsgtr_clr_set(gtr_phy->dev, L0_Ln_REF_CLK_SEL(gtr_phy->lane),
309 L0_REF_CLK_SEL_MASK, 1 << gtr_phy->refclk);
310 }
311
312 /* SSC step size [7:0] */
313 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_0_LSB,
314 STEP_SIZE_0_MASK, step_size & STEP_SIZE_0_MASK);
315
316 /* SSC step size [15:8] */
317 step_size >>= STEP_SIZE_SHIFT;
318 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_1,
319 STEP_SIZE_1_MASK, step_size & STEP_SIZE_1_MASK);
320
321 /* SSC step size [23:16] */
322 step_size >>= STEP_SIZE_SHIFT;
323 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_2,
324 STEP_SIZE_2_MASK, step_size & STEP_SIZE_2_MASK);
325
326 /* SSC steps [7:0] */
327 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_0_LSB,
328 STEPS_0_MASK, ssc->steps & STEPS_0_MASK);
329
330 /* SSC steps [10:8] */
331 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEPS_1_MSB,
332 STEPS_1_MASK,
333 (ssc->steps >> STEP_SIZE_SHIFT) & STEPS_1_MASK);
334
335 /* SSC step size [24:25] */
336 step_size >>= STEP_SIZE_SHIFT;
337 xpsgtr_clr_set_phy(gtr_phy, L0_PLL_SS_STEP_SIZE_3_MSB,
338 STEP_SIZE_3_MASK, (step_size & STEP_SIZE_3_MASK) |
339 FORCE_STEP_SIZE | FORCE_STEPS);
340}
341
342/* Configure the lane protocol. */
343static void xpsgtr_lane_set_protocol(struct xpsgtr_phy *gtr_phy)
344{
345 struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
346 u8 protocol = gtr_phy->protocol;
347
348 switch (gtr_phy->lane) {
349 case 0:
350 xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L0_MASK, protocol);
351 break;
352 case 1:
353 xpsgtr_clr_set(gtr_dev, ICM_CFG0, ICM_CFG0_L1_MASK,
354 protocol << ICM_CFG_SHIFT);
355 break;
356 case 2:
357 xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L0_MASK, protocol);
358 break;
359 case 3:
360 xpsgtr_clr_set(gtr_dev, ICM_CFG1, ICM_CFG0_L1_MASK,
361 protocol << ICM_CFG_SHIFT);
362 break;
363 default:
364 /* We already checked 0 <= lane <= 3 */
365 break;
366 }
367}
368
369/* Bypass (de)scrambler and 8b/10b decoder and encoder. */
370static void xpsgtr_bypass_scrambler_8b10b(struct xpsgtr_phy *gtr_phy)
371{
372 xpsgtr_write_phy(gtr_phy, L0_TM_DIG_6, L0_TM_DIS_DESCRAMBLE_DECODER);
373 xpsgtr_write_phy(gtr_phy, L0_TX_DIG_61, L0_TM_DISABLE_SCRAMBLE_ENCODER);
374}
375
Michal Simek462f76b2022-02-07 10:36:32 +0100376/* DP-specific initialization. */
377static void xpsgtr_phy_init_dp(struct xpsgtr_phy *gtr_phy)
378{
379 xpsgtr_write_phy(gtr_phy, L0_TXPMD_TM_45,
380 L0_TXPMD_TM_45_OVER_DP_MAIN |
381 L0_TXPMD_TM_45_ENABLE_DP_MAIN |
382 L0_TXPMD_TM_45_OVER_DP_POST1 |
383 L0_TXPMD_TM_45_OVER_DP_POST2 |
384 L0_TXPMD_TM_45_ENABLE_DP_POST2);
385 xpsgtr_write_phy(gtr_phy, L0_TX_ANA_TM_118,
386 L0_TX_ANA_TM_118_FORCE_17_0);
387}
388
389/* SATA-specific initialization. */
390static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
391{
392 struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
393
394 xpsgtr_bypass_scrambler_8b10b(gtr_phy);
395
396 writel(gtr_phy->lane, gtr_dev->siou + SATA_CONTROL_OFFSET);
397}
398
Michal Simek1d78d682022-01-06 09:49:41 +0100399/* SGMII-specific initialization. */
400static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
401{
402 struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
403 u32 shift = gtr_phy->lane * PROT_BUS_WIDTH_SHIFT;
404
405 /* Set SGMII protocol TX and RX bus width to 10 bits. */
406 xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, PROT_BUS_WIDTH_MASK << shift,
407 PROT_BUS_WIDTH_10 << shift);
408
409 xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, PROT_BUS_WIDTH_MASK << shift,
410 PROT_BUS_WIDTH_10 << shift);
411
412 xpsgtr_bypass_scrambler_8b10b(gtr_phy);
413
414 /*
415 * Below code is just temporary solution till we have a way how to
416 * do it via firmware interface in sync with Linux. Till that happen
417 * this is the most sensible thing to do here.
418 */
419 /* GEM I/O Clock Control */
420 clrsetbits_le32(ZYNQMP_IOU_SLCR_BASEADDR + IOU_SLCR_GEM_CLK_CTRL,
421 0xf << shift,
422 (GEM_CTRL_GEM_SGMII_MODE | GEM_CTRL_GEM_REF_SRC_SEL) <<
423 shift);
424
425 /* Setup signal detect */
426 clrsetbits_le32(ZYNQMP_IOU_SLCR_BASEADDR + IOU_SLCR_GEM_CTRL,
427 PROT_BUS_WIDTH_MASK << shift,
428 GEM_CTRL_GEM_SGMII_SD << shift);
429}
430
431static int xpsgtr_init(struct phy *x)
432{
433 struct xpsgtr_dev *gtr_dev = dev_get_priv(x->dev);
434 struct xpsgtr_phy *gtr_phy;
435 u32 phy_lane = x->id;
436
437 gtr_phy = &gtr_dev->phys[phy_lane];
438
439 /* Enable coarse code saturation limiting logic. */
440 xpsgtr_write_phy(gtr_phy, L0_TM_PLL_DIG_37, L0_TM_COARSE_CODE_LIMIT);
441
442 /*
443 * Configure the PLL, the lane protocol, and perform protocol-specific
444 * initialization.
445 */
446 xpsgtr_configure_pll(gtr_phy);
447 xpsgtr_lane_set_protocol(gtr_phy);
448
449 switch (gtr_phy->protocol) {
450 case ICM_PROTOCOL_SGMII:
451 xpsgtr_phy_init_sgmii(gtr_phy);
452 break;
Michal Simek1d78d682022-01-06 09:49:41 +0100453 case ICM_PROTOCOL_SATA:
Michal Simek462f76b2022-02-07 10:36:32 +0100454 xpsgtr_phy_init_sata(gtr_phy);
455 break;
456 case ICM_PROTOCOL_DP:
457 xpsgtr_phy_init_dp(gtr_phy);
458 break;
Michal Simek1d78d682022-01-06 09:49:41 +0100459 }
460
461 dev_dbg(gtr_dev->dev, "lane %u (type %u, protocol %u): init done\n",
462 gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
463
464 return 0;
465}
466
467/* Wait for the PLL to lock (with a timeout). */
468static int xpsgtr_wait_pll_lock(struct phy *phy)
469{
470 struct xpsgtr_dev *gtr_dev = dev_get_priv(phy->dev);
471 struct xpsgtr_phy *gtr_phy;
472 u32 phy_lane = phy->id;
473 int ret = 0;
474 unsigned int timeout = TIMEOUT_US;
475
476 gtr_phy = &gtr_dev->phys[phy_lane];
477
478 dev_dbg(gtr_dev->dev, "Waiting for PLL lock\n");
479
480 while (1) {
481 u32 reg = xpsgtr_read_phy(gtr_phy, L0_PLL_STATUS_READ_1);
482
483 if ((reg & PLL_STATUS_LOCKED) == PLL_STATUS_LOCKED) {
484 ret = 0;
485 break;
486 }
487
488 if (--timeout == 0) {
489 ret = -ETIMEDOUT;
490 break;
491 }
492
493 udelay(1);
494 }
495
496 if (ret == -ETIMEDOUT)
497 dev_err(gtr_dev->dev,
498 "lane %u (type %u, protocol %u): PLL lock timeout\n",
499 gtr_phy->lane, gtr_phy->type, gtr_phy->protocol);
500
501 return ret;
502}
503
504static int xpsgtr_power_on(struct phy *phy)
505{
506 struct xpsgtr_dev *gtr_dev = dev_get_priv(phy->dev);
507 struct xpsgtr_phy *gtr_phy;
508 u32 phy_lane = phy->id;
509 int ret = 0;
510
511 gtr_phy = &gtr_dev->phys[phy_lane];
512
513 /*
514 * Wait for the PLL to lock. For DP, only wait on DP0 to avoid
515 * cumulating waits for both lanes. The user is expected to initialize
516 * lane 0 last.
517 */
518 if (gtr_phy->protocol != ICM_PROTOCOL_DP ||
519 gtr_phy->type == XPSGTR_TYPE_DP_0)
520 ret = xpsgtr_wait_pll_lock(phy);
521
522 return ret;
523}
524
525/*
526 * OF Xlate Support
527 */
528
529/* Set the lane type and protocol based on the PHY type and instance number. */
530static int xpsgtr_set_lane_type(struct xpsgtr_phy *gtr_phy, u8 phy_type,
531 unsigned int phy_instance)
532{
533 unsigned int num_phy_types;
534 const int *phy_types;
535
536 switch (phy_type) {
537 case PHY_TYPE_SATA: {
538 static const int types[] = {
539 XPSGTR_TYPE_SATA_0,
540 XPSGTR_TYPE_SATA_1,
541 };
542
543 phy_types = types;
544 num_phy_types = ARRAY_SIZE(types);
545 gtr_phy->protocol = ICM_PROTOCOL_SATA;
546 break;
547 }
548 case PHY_TYPE_USB3: {
549 static const int types[] = {
550 XPSGTR_TYPE_USB0,
551 XPSGTR_TYPE_USB1,
552 };
553
554 phy_types = types;
555 num_phy_types = ARRAY_SIZE(types);
556 gtr_phy->protocol = ICM_PROTOCOL_USB;
557 break;
558 }
559 case PHY_TYPE_DP: {
560 static const int types[] = {
561 XPSGTR_TYPE_DP_0,
562 XPSGTR_TYPE_DP_1,
563 };
564
565 phy_types = types;
566 num_phy_types = ARRAY_SIZE(types);
567 gtr_phy->protocol = ICM_PROTOCOL_DP;
568 break;
569 }
570 case PHY_TYPE_PCIE: {
571 static const int types[] = {
572 XPSGTR_TYPE_PCIE_0,
573 XPSGTR_TYPE_PCIE_1,
574 XPSGTR_TYPE_PCIE_2,
575 XPSGTR_TYPE_PCIE_3,
576 };
577
578 phy_types = types;
579 num_phy_types = ARRAY_SIZE(types);
580 gtr_phy->protocol = ICM_PROTOCOL_PCIE;
581 break;
582 }
583 case PHY_TYPE_SGMII: {
584 static const int types[] = {
585 XPSGTR_TYPE_SGMII0,
586 XPSGTR_TYPE_SGMII1,
587 XPSGTR_TYPE_SGMII2,
588 XPSGTR_TYPE_SGMII3,
589 };
590
591 phy_types = types;
592 num_phy_types = ARRAY_SIZE(types);
593 gtr_phy->protocol = ICM_PROTOCOL_SGMII;
594 break;
595 }
596 default:
597 return -EINVAL;
598 }
599
600 if (phy_instance >= num_phy_types)
601 return -EINVAL;
602
603 gtr_phy->type = phy_types[phy_instance];
604 return 0;
605}
606
607/*
608 * Valid combinations of controllers and lanes (Interconnect Matrix).
609 */
610static const unsigned int icm_matrix[NUM_LANES][CONTROLLERS_PER_LANE] = {
611 { XPSGTR_TYPE_PCIE_0, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
612 XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII0 },
613 { XPSGTR_TYPE_PCIE_1, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB0,
614 XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII1 },
615 { XPSGTR_TYPE_PCIE_2, XPSGTR_TYPE_SATA_0, XPSGTR_TYPE_USB0,
616 XPSGTR_TYPE_DP_1, XPSGTR_TYPE_SGMII2 },
617 { XPSGTR_TYPE_PCIE_3, XPSGTR_TYPE_SATA_1, XPSGTR_TYPE_USB1,
618 XPSGTR_TYPE_DP_0, XPSGTR_TYPE_SGMII3 }
619};
620
621/* Translate OF phandle and args to PHY instance. */
622static int xpsgtr_of_xlate(struct phy *x,
623 struct ofnode_phandle_args *args)
624{
625 struct xpsgtr_dev *gtr_dev = dev_get_priv(x->dev);
626 struct xpsgtr_phy *gtr_phy;
627 struct udevice *dev = x->dev;
628 unsigned int phy_instance;
629 unsigned int phy_lane;
630 unsigned int phy_type;
631 unsigned int refclk;
632 unsigned int i;
633 int ret;
634
635 if (args->args_count != 4) {
636 dev_err(dev, "Invalid number of cells in 'phy' property\n");
637 return -EINVAL;
638 }
639
640 /*
641 * Get the PHY parameters from the OF arguments and derive the lane
642 * type.
643 */
644 phy_lane = args->args[0];
645 if (phy_lane >= NUM_LANES) {
646 dev_err(dev, "Invalid lane number %u\n", phy_lane);
647 return -EINVAL;
648 }
649
650 gtr_phy = &gtr_dev->phys[phy_lane];
651 phy_type = args->args[1];
652 phy_instance = args->args[2];
653
654 ret = xpsgtr_set_lane_type(gtr_phy, phy_type, phy_instance);
655 if (ret) {
656 dev_err(dev, "Invalid PHY type and/or instance\n");
657 return ret;
658 }
659
660 refclk = args->args[3];
661 if (refclk >= ARRAY_SIZE(gtr_dev->refclk_sscs) ||
662 !gtr_dev->refclk_sscs[refclk]) {
663 dev_err(dev, "Invalid reference clock number %u\n", refclk);
664 return -EINVAL;
665 }
666
667 gtr_phy->refclk = refclk;
668
669 /* This is difference compare to Linux */
670 gtr_phy->dev = gtr_dev;
671 gtr_phy->lane = phy_lane;
672
673 /*
674 * Ensure that the Interconnect Matrix is obeyed, i.e a given lane type
675 * is allowed to operate on the lane.
676 */
677 for (i = 0; i < CONTROLLERS_PER_LANE; i++) {
678 if (icm_matrix[phy_lane][i] == gtr_phy->type) {
679 x->id = phy_lane;
680 return 0;
681 }
682 }
683
684 return -EINVAL;
685}
686
687/*
688 * Probe & Platform Driver
689 */
690static int xpsgtr_get_ref_clocks(struct udevice *dev)
691{
692 unsigned int refclk;
693 struct xpsgtr_dev *gtr_dev = dev_get_priv(dev);
694 int ret;
695
696 for (refclk = 0; refclk < NUM_LANES; ++refclk) {
697 int i;
698 u32 rate;
699 char name[8];
700 struct clk *clk = &gtr_dev->clk[refclk];
701
702 snprintf(name, sizeof(name), "ref%u", refclk);
703 dev_dbg(dev, "Checking name: %s\n", name);
704 ret = clk_get_by_name(dev, name, clk);
705 if (ret == -ENODATA) {
706 dev_dbg(dev, "%s clock not specified (err %d)\n",
707 name, ret);
708 continue;
709 } else if (ret) {
710 dev_dbg(dev, "couldn't get clock %s (err %d)\n",
711 name, ret);
712 return ret;
713 }
714
715 rate = clk_get_rate(clk);
716
717 dev_dbg(dev, "clk rate %d\n", rate);
718
719 ret = clk_enable(clk);
720 if (ret) {
721 dev_err(dev, "failed to enable refclk %d clock\n",
722 refclk);
723 return ret;
724 }
725
726 for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
727 if (rate == ssc_lookup[i].refclk_rate) {
728 gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
729 dev_dbg(dev, "Found rate %d\n", i);
730 break;
731 }
732 }
733
734 if (i == ARRAY_SIZE(ssc_lookup)) {
735 dev_err(dev,
736 "Invalid rate %u for reference clock %u\n",
737 rate, refclk);
738 return -EINVAL;
739 }
740 }
741
742 return 0;
743}
744
745static int xpsgtr_probe(struct udevice *dev)
746{
747 struct xpsgtr_dev *gtr_dev = dev_get_priv(dev);
748
749 gtr_dev->serdes = dev_remap_addr_name(dev, "serdes");
750 if (!gtr_dev->serdes)
751 return -EINVAL;
752
753 gtr_dev->siou = dev_remap_addr_name(dev, "siou");
754 if (!gtr_dev->siou)
755 return -EINVAL;
756
757 gtr_dev->dev = dev;
758
759 return xpsgtr_get_ref_clocks(dev);
760}
761
762static const struct udevice_id xpsgtr_phy_ids[] = {
763 { .compatible = "xlnx,zynqmp-psgtr-v1.1", },
764 { }
765};
766
767static const struct phy_ops xpsgtr_phy_ops = {
768 .init = xpsgtr_init,
769 .of_xlate = xpsgtr_of_xlate,
770 .power_on = xpsgtr_power_on,
771};
772
773U_BOOT_DRIVER(psgtr_phy) = {
774 .name = "psgtr_phy",
775 .id = UCLASS_PHY,
776 .of_match = xpsgtr_phy_ids,
777 .ops = &xpsgtr_phy_ops,
778 .probe = xpsgtr_probe,
779 .priv_auto = sizeof(struct xpsgtr_dev),
780};