blob: 35bda9b34c80e9df981a825ec9b91d974c493143 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: BSD-3-Clause
Dinh Nguyen3da42852015-06-02 22:52:49 -05002/*
3 * Copyright Altera Corporation (C) 2012-2015
Dinh Nguyen3da42852015-06-02 22:52:49 -05004 */
5
6#include <common.h>
7#include <asm/io.h>
8#include <asm/arch/sdram.h>
Marek Vasut04372fb2015-07-18 02:46:56 +02009#include <errno.h>
Marek Vasut9a5a90a2019-10-18 00:22:31 +020010#include <hang.h>
Dinh Nguyen3da42852015-06-02 22:52:49 -050011#include "sequencer.h"
Marek Vasut9c76df52015-08-02 16:55:45 +020012
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020013static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
Marek Vasut139823e2015-08-02 19:47:01 +020014 (struct socfpga_sdr_rw_load_manager *)
15 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020016static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs
17 = (struct socfpga_sdr_rw_load_jump_manager *)
Marek Vasut139823e2015-08-02 19:47:01 +020018 (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020019static const struct socfpga_sdr_reg_file *sdr_reg_file =
Marek Vasuta1c654a2015-07-12 18:31:05 +020020 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020021static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
Marek Vasut139823e2015-08-02 19:47:01 +020022 (struct socfpga_sdr_scc_mgr *)
23 (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020024static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
Marek Vasut1bc6f142015-07-12 18:54:37 +020025 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020026static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
Marek Vasut139823e2015-08-02 19:47:01 +020027 (struct socfpga_phy_mgr_cfg *)
28 (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020029static const struct socfpga_data_mgr *data_mgr =
Marek Vasutc4815f72015-07-12 19:03:33 +020030 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020031static const struct socfpga_sdr_ctrl *sdr_ctrl =
Marek Vasut6cb9f162015-07-12 20:49:39 +020032 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
33
Dinh Nguyen3da42852015-06-02 22:52:49 -050034#define DELTA_D 1
Dinh Nguyen3da42852015-06-02 22:52:49 -050035
36/*
37 * In order to reduce ROM size, most of the selectable calibration steps are
38 * decided at compile time based on the user's calibration mode selection,
39 * as captured by the STATIC_CALIB_STEPS selection below.
40 *
41 * However, to support simulation-time selection of fast simulation mode, where
42 * we skip everything except the bare minimum, we need a few of the steps to
43 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
44 * check, which is based on the rtl-supplied value, or we dynamically compute
45 * the value to use based on the dynamically-chosen calibration mode
46 */
47
48#define DLEVEL 0
49#define STATIC_IN_RTL_SIM 0
50#define STATIC_SKIP_DELAY_LOOPS 0
51
52#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
53 STATIC_SKIP_DELAY_LOOPS)
54
Dinh Nguyen3da42852015-06-02 22:52:49 -050055#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020056 ((non_skip_value) & seq->skip_delay_mask)
Dinh Nguyen3da42852015-06-02 22:52:49 -050057
Marek Vasut9a5a90a2019-10-18 00:22:31 +020058bool dram_is_ddr(const u8 ddr)
59{
60 const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config();
61 const u8 type = (cfg->ctrl_cfg >> SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB) &
62 SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK;
63
64 if (ddr == 2 && type == 1) /* DDR2 */
65 return true;
66
67 if (ddr == 3 && type == 2) /* DDR3 */
68 return true;
69
70 return false;
71}
72
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020073static void set_failing_group_stage(struct socfpga_sdrseq *seq,
74 u32 group, u32 stage, u32 substage)
Dinh Nguyen3da42852015-06-02 22:52:49 -050075{
76 /*
77 * Only set the global stage if there was not been any other
78 * failing group
79 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +020080 if (seq->gbl.error_stage == CAL_STAGE_NIL) {
81 seq->gbl.error_substage = substage;
82 seq->gbl.error_stage = stage;
83 seq->gbl.error_group = group;
Dinh Nguyen3da42852015-06-02 22:52:49 -050084 }
85}
86
Marek Vasut2c0d2d92015-07-12 21:10:24 +020087static void reg_file_set_group(u16 set_group)
Dinh Nguyen3da42852015-06-02 22:52:49 -050088{
Marek Vasut2c0d2d92015-07-12 21:10:24 +020089 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
Dinh Nguyen3da42852015-06-02 22:52:49 -050090}
91
Marek Vasut2c0d2d92015-07-12 21:10:24 +020092static void reg_file_set_stage(u8 set_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -050093{
Marek Vasut2c0d2d92015-07-12 21:10:24 +020094 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
Dinh Nguyen3da42852015-06-02 22:52:49 -050095}
96
Marek Vasut2c0d2d92015-07-12 21:10:24 +020097static void reg_file_set_sub_stage(u8 set_sub_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -050098{
Marek Vasut2c0d2d92015-07-12 21:10:24 +020099 set_sub_stage &= 0xff;
100 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500101}
102
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200103/**
104 * phy_mgr_initialize() - Initialize PHY Manager
105 *
106 * Initialize PHY Manager.
107 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200108static void phy_mgr_initialize(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500109{
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200110 u32 ratio;
111
Dinh Nguyen3da42852015-06-02 22:52:49 -0500112 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200113 /* Calibration has control over path to memory */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500114 /*
115 * In Hard PHY this is a 2-bit control:
116 * 0: AFI Mux Select
117 * 1: DDIO Mux Select
118 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200119 writel(0x3, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500120
121 /* USER memory clock is not stable we begin initialization */
Marek Vasut1273dd92015-07-12 21:05:08 +0200122 writel(0, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500123
124 /* USER calibration status all set to zero */
Marek Vasut1273dd92015-07-12 21:05:08 +0200125 writel(0, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500126
Marek Vasut1273dd92015-07-12 21:05:08 +0200127 writel(0, &phy_mgr_cfg->cal_debug_info);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500128
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200129 /* Init params only if we do NOT skip calibration. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200130 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
Marek Vasut7c89c2d2015-07-17 01:36:32 +0200131 return;
132
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200133 ratio = seq->rwcfg->mem_dq_per_read_dqs /
134 seq->rwcfg->mem_virtual_groups_per_read_dqs;
135 seq->param.read_correct_mask_vg = (1 << ratio) - 1;
136 seq->param.write_correct_mask_vg = (1 << ratio) - 1;
137 seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs)
138 - 1;
139 seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs)
140 - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500141}
142
Marek Vasut080bf642015-07-20 08:15:57 +0200143/**
144 * set_rank_and_odt_mask() - Set Rank and ODT mask
145 * @rank: Rank mask
146 * @odt_mode: ODT mode, OFF or READ_WRITE
147 *
148 * Set Rank and ODT mask (On-Die Termination).
149 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200150static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq,
151 const u32 rank, const u32 odt_mode)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500152{
Marek Vasutb2dfd102015-07-20 08:03:11 +0200153 u32 odt_mask_0 = 0;
154 u32 odt_mask_1 = 0;
155 u32 cs_and_odt_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500156
Marek Vasutb2dfd102015-07-20 08:03:11 +0200157 if (odt_mode == RW_MGR_ODT_MODE_OFF) {
158 odt_mask_0 = 0x0;
159 odt_mask_1 = 0x0;
160 } else { /* RW_MGR_ODT_MODE_READ_WRITE */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200161 switch (seq->rwcfg->mem_number_of_ranks) {
Marek Vasut287cdf62015-07-20 08:09:05 +0200162 case 1: /* 1 Rank */
163 /* Read: ODT = 0 ; Write: ODT = 1 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500164 odt_mask_0 = 0x0;
165 odt_mask_1 = 0x1;
Marek Vasut287cdf62015-07-20 08:09:05 +0200166 break;
167 case 2: /* 2 Ranks */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200168 if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) {
Marek Vasut080bf642015-07-20 08:15:57 +0200169 /*
170 * - Dual-Slot , Single-Rank (1 CS per DIMM)
171 * OR
172 * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
173 *
174 * Since MEM_NUMBER_OF_RANKS is 2, they
175 * are both single rank with 2 CS each
176 * (special for RDIMM).
177 *
Dinh Nguyen3da42852015-06-02 22:52:49 -0500178 * Read: Turn on ODT on the opposite rank
179 * Write: Turn on ODT on all ranks
180 */
181 odt_mask_0 = 0x3 & ~(1 << rank);
182 odt_mask_1 = 0x3;
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200183 if (dram_is_ddr(2))
184 odt_mask_1 &= ~(1 << rank);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500185 } else {
186 /*
Marek Vasut080bf642015-07-20 08:15:57 +0200187 * - Single-Slot , Dual-Rank (2 CS per DIMM)
188 *
189 * Read: Turn on ODT off on all ranks
190 * Write: Turn on ODT on active rank
Dinh Nguyen3da42852015-06-02 22:52:49 -0500191 */
192 odt_mask_0 = 0x0;
193 odt_mask_1 = 0x3 & (1 << rank);
194 }
Marek Vasut287cdf62015-07-20 08:09:05 +0200195 break;
196 case 4: /* 4 Ranks */
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200197 /*
198 * DDR3 Read, DDR2 Read/Write:
Dinh Nguyen3da42852015-06-02 22:52:49 -0500199 * ----------+-----------------------+
Dinh Nguyen3da42852015-06-02 22:52:49 -0500200 * | ODT |
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200201 * +-----------------------+
Dinh Nguyen3da42852015-06-02 22:52:49 -0500202 * Rank | 3 | 2 | 1 | 0 |
203 * ----------+-----+-----+-----+-----+
204 * 0 | 0 | 1 | 0 | 0 |
205 * 1 | 1 | 0 | 0 | 0 |
206 * 2 | 0 | 0 | 0 | 1 |
207 * 3 | 0 | 0 | 1 | 0 |
208 * ----------+-----+-----+-----+-----+
209 *
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200210 * DDR3 Write:
Dinh Nguyen3da42852015-06-02 22:52:49 -0500211 * ----------+-----------------------+
Dinh Nguyen3da42852015-06-02 22:52:49 -0500212 * | ODT |
213 * Write To +-----------------------+
214 * Rank | 3 | 2 | 1 | 0 |
215 * ----------+-----+-----+-----+-----+
216 * 0 | 0 | 1 | 0 | 1 |
217 * 1 | 1 | 0 | 1 | 0 |
218 * 2 | 0 | 1 | 0 | 1 |
219 * 3 | 1 | 0 | 1 | 0 |
220 * ----------+-----+-----+-----+-----+
221 */
222 switch (rank) {
223 case 0:
224 odt_mask_0 = 0x4;
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200225 if (dram_is_ddr(2))
226 odt_mask_1 = 0x4;
227 else if (dram_is_ddr(3))
228 odt_mask_1 = 0x5;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500229 break;
230 case 1:
231 odt_mask_0 = 0x8;
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200232 if (dram_is_ddr(2))
233 odt_mask_1 = 0x8;
234 else if (dram_is_ddr(3))
235 odt_mask_1 = 0xA;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500236 break;
237 case 2:
238 odt_mask_0 = 0x1;
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200239 if (dram_is_ddr(2))
240 odt_mask_1 = 0x1;
241 else if (dram_is_ddr(3))
242 odt_mask_1 = 0x5;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500243 break;
244 case 3:
245 odt_mask_0 = 0x2;
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200246 if (dram_is_ddr(2))
247 odt_mask_1 = 0x2;
248 else if (dram_is_ddr(3))
249 odt_mask_1 = 0xA;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500250 break;
251 }
Marek Vasut287cdf62015-07-20 08:09:05 +0200252 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500253 }
Dinh Nguyen3da42852015-06-02 22:52:49 -0500254 }
255
Marek Vasutb2dfd102015-07-20 08:03:11 +0200256 cs_and_odt_mask = (0xFF & ~(1 << rank)) |
257 ((0xFF & odt_mask_0) << 8) |
258 ((0xFF & odt_mask_1) << 16);
Marek Vasut1273dd92015-07-12 21:05:08 +0200259 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
260 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500261}
262
Marek Vasutc76976d2015-07-12 22:28:33 +0200263/**
264 * scc_mgr_set() - Set SCC Manager register
265 * @off: Base offset in SCC Manager space
266 * @grp: Read/Write group
267 * @val: Value to be set
268 *
269 * This function sets the SCC Manager (Scan Chain Control Manager) register.
270 */
271static void scc_mgr_set(u32 off, u32 grp, u32 val)
272{
273 writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
274}
275
Marek Vasute893f4d2015-07-20 07:16:42 +0200276/**
277 * scc_mgr_initialize() - Initialize SCC Manager registers
278 *
279 * Initialize SCC Manager registers.
280 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500281static void scc_mgr_initialize(void)
282{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500283 /*
Marek Vasute893f4d2015-07-20 07:16:42 +0200284 * Clear register file for HPS. 16 (2^4) is the size of the
285 * full register file in the scc mgr:
286 * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
287 * MEM_IF_READ_DQS_WIDTH - 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500288 */
Marek Vasutc76976d2015-07-12 22:28:33 +0200289 int i;
Marek Vasute893f4d2015-07-20 07:16:42 +0200290
Dinh Nguyen3da42852015-06-02 22:52:49 -0500291 for (i = 0; i < 16; i++) {
Marek Vasutea9aa242016-04-04 21:21:05 +0200292 debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -0500293 __func__, __LINE__, i);
Marek Vasut8e9e62c2016-04-04 17:28:16 +0200294 scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500295 }
296}
297
Marek Vasut5ded7322015-08-02 19:42:26 +0200298static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200299{
Marek Vasutc76976d2015-07-12 22:28:33 +0200300 scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200301}
302
Marek Vasut5ded7322015-08-02 19:42:26 +0200303static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500304{
Marek Vasutc76976d2015-07-12 22:28:33 +0200305 scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500306}
307
Marek Vasut5ded7322015-08-02 19:42:26 +0200308static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500309{
Marek Vasutc76976d2015-07-12 22:28:33 +0200310 scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500311}
312
Marek Vasut5ded7322015-08-02 19:42:26 +0200313static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200314{
Marek Vasutc76976d2015-07-12 22:28:33 +0200315 scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200316}
317
Marek Vasut70ed80a2016-04-04 21:16:18 +0200318static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
319{
320 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
321}
322
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200323static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq,
324 u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200325{
Marek Vasut70ed80a2016-04-04 21:16:18 +0200326 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200327 seq->rwcfg->mem_dq_per_write_dqs, delay);
328}
329
330static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm,
331 u32 delay)
332{
333 scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
334 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasut70ed80a2016-04-04 21:16:18 +0200335 delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200336}
337
Marek Vasut5ded7322015-08-02 19:42:26 +0200338static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200339{
Marek Vasutc76976d2015-07-12 22:28:33 +0200340 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200341}
342
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200343static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq,
344 u32 delay)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200345{
Marek Vasutc76976d2015-07-12 22:28:33 +0200346 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200347 seq->rwcfg->mem_dq_per_write_dqs, delay);
348}
349
350static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm,
351 u32 delay)
352{
353 scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
354 seq->rwcfg->mem_dq_per_write_dqs + 1 + dm,
Marek Vasutc76976d2015-07-12 22:28:33 +0200355 delay);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200356}
357
358/* load up dqs config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200359static void scc_mgr_load_dqs(u32 dqs)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200360{
361 writel(dqs, &sdr_scc_mgr->dqs_ena);
362}
363
364/* load up dqs io config settings */
365static void scc_mgr_load_dqs_io(void)
366{
367 writel(0, &sdr_scc_mgr->dqs_io_ena);
368}
369
370/* load up dq config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200371static void scc_mgr_load_dq(u32 dq_in_group)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200372{
373 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
374}
375
376/* load up dm config settings */
Marek Vasut5ded7322015-08-02 19:42:26 +0200377static void scc_mgr_load_dm(u32 dm)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200378{
379 writel(dm, &sdr_scc_mgr->dm_ena);
380}
381
Marek Vasut0b69b802015-07-12 23:25:21 +0200382/**
383 * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
384 * @off: Base offset in SCC Manager space
385 * @grp: Read/Write group
386 * @val: Value to be set
387 * @update: If non-zero, trigger SCC Manager update for all ranks
388 *
389 * This function sets the SCC Manager (Scan Chain Control Manager) register
390 * and optionally triggers the SCC update for all ranks.
391 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200392static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq,
393 const u32 off, const u32 grp, const u32 val,
Marek Vasut0b69b802015-07-12 23:25:21 +0200394 const int update)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500395{
Marek Vasut0b69b802015-07-12 23:25:21 +0200396 u32 r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500397
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200398 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500399 r += NUM_RANKS_PER_SHADOW_REG) {
Marek Vasut0b69b802015-07-12 23:25:21 +0200400 scc_mgr_set(off, grp, val);
Marek Vasut162d60e2015-07-12 23:14:33 +0200401
Marek Vasut0b69b802015-07-12 23:25:21 +0200402 if (update || (r == 0)) {
403 writel(grp, &sdr_scc_mgr->dqs_ena);
Marek Vasut1273dd92015-07-12 21:05:08 +0200404 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500405 }
406 }
407}
408
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200409static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq,
410 u32 read_group, u32 phase)
Marek Vasut0b69b802015-07-12 23:25:21 +0200411{
412 /*
413 * USER although the h/w doesn't support different phases per
414 * shadow register, for simplicity our scc manager modeling
415 * keeps different phase settings per shadow reg, and it's
416 * important for us to keep them in sync to match h/w.
417 * for efficiency, the scan chain update should occur only
418 * once to sr0.
419 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200420 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET,
Marek Vasut0b69b802015-07-12 23:25:21 +0200421 read_group, phase, 0);
422}
423
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200424static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq,
425 u32 write_group, u32 phase)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500426{
Marek Vasut0b69b802015-07-12 23:25:21 +0200427 /*
428 * USER although the h/w doesn't support different phases per
429 * shadow register, for simplicity our scc manager modeling
430 * keeps different phase settings per shadow reg, and it's
431 * important for us to keep them in sync to match h/w.
432 * for efficiency, the scan chain update should occur only
433 * once to sr0.
434 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200435 scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
Marek Vasut0b69b802015-07-12 23:25:21 +0200436 write_group, phase, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500437}
438
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200439static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq,
440 u32 read_group, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500441{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500442 /*
443 * In shadow register mode, the T11 settings are stored in
444 * registers in the core, which are updated by the DQS_ENA
445 * signals. Not issuing the SCC_MGR_UPD command allows us to
446 * save lots of rank switching overhead, by calling
447 * select_shadow_regs_for_update with update_scan_chains
448 * set to 0.
449 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200450 scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET,
Marek Vasut0b69b802015-07-12 23:25:21 +0200451 read_group, delay, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500452}
453
Marek Vasut5be355c2015-07-12 23:39:06 +0200454/**
455 * scc_mgr_set_oct_out1_delay() - Set OCT output delay
456 * @write_group: Write group
457 * @delay: Delay value
458 *
459 * This function sets the OCT output delay in SCC manager.
460 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200461static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq,
462 const u32 write_group, const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500463{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200464 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
465 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasut5be355c2015-07-12 23:39:06 +0200466 const int base = write_group * ratio;
467 int i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500468 /*
469 * Load the setting in the SCC manager
470 * Although OCT affects only write data, the OCT delay is controlled
471 * by the DQS logic block which is instantiated once per read group.
472 * For protocols where a write group consists of multiple read groups,
473 * the setting must be set multiple times.
474 */
Marek Vasut5be355c2015-07-12 23:39:06 +0200475 for (i = 0; i < ratio; i++)
476 scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500477}
478
Marek Vasut37a37ca2015-07-19 01:32:55 +0200479/**
480 * scc_mgr_set_hhp_extras() - Set HHP extras.
481 *
482 * Load the fixed setting in the SCC manager HHP extras.
483 */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500484static void scc_mgr_set_hhp_extras(void)
485{
486 /*
487 * Load the fixed setting in the SCC manager
Marek Vasut37a37ca2015-07-19 01:32:55 +0200488 * bits: 0:0 = 1'b1 - DQS bypass
489 * bits: 1:1 = 1'b1 - DQ bypass
490 * bits: 4:2 = 3'b001 - rfifo_mode
491 * bits: 6:5 = 2'b01 - rfifo clock_select
492 * bits: 7:7 = 1'b0 - separate gating from ungating setting
493 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
Dinh Nguyen3da42852015-06-02 22:52:49 -0500494 */
Marek Vasut37a37ca2015-07-19 01:32:55 +0200495 const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
496 (1 << 2) | (1 << 1) | (1 << 0);
497 const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
498 SCC_MGR_HHP_GLOBALS_OFFSET |
499 SCC_MGR_HHP_EXTRAS_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500500
Marek Vasutea9aa242016-04-04 21:21:05 +0200501 debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n",
Marek Vasut37a37ca2015-07-19 01:32:55 +0200502 __func__, __LINE__);
503 writel(value, addr);
Marek Vasutea9aa242016-04-04 21:21:05 +0200504 debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n",
Marek Vasut37a37ca2015-07-19 01:32:55 +0200505 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500506}
507
Marek Vasutf42af352015-07-20 04:41:53 +0200508/**
509 * scc_mgr_zero_all() - Zero all DQS config
510 *
511 * Zero all DQS config.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500512 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200513static void scc_mgr_zero_all(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500514{
Marek Vasutf42af352015-07-20 04:41:53 +0200515 int i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500516
517 /*
518 * USER Zero all DQS config settings, across all groups and all
519 * shadow registers
520 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200521 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasutf42af352015-07-20 04:41:53 +0200522 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200523 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -0500524 /*
525 * The phases actually don't exist on a per-rank basis,
526 * but there's no harm updating them several times, so
527 * let's keep the code simple.
528 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200529 scc_mgr_set_dqs_bus_in_delay(i,
530 seq->iocfg->dqs_in_reserve
531 );
Dinh Nguyen3da42852015-06-02 22:52:49 -0500532 scc_mgr_set_dqs_en_phase(i, 0);
533 scc_mgr_set_dqs_en_delay(i, 0);
534 }
535
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200536 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -0500537 scc_mgr_set_dqdqs_output_phase(i, 0);
Marek Vasutf42af352015-07-20 04:41:53 +0200538 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200539 scc_mgr_set_oct_out1_delay(seq, i,
540 seq->iocfg->dqs_out_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500541 }
542 }
543
Marek Vasutf42af352015-07-20 04:41:53 +0200544 /* Multicast to all DQS group enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200545 writel(0xff, &sdr_scc_mgr->dqs_ena);
546 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500547}
548
Marek Vasutc5c5f532015-07-17 02:06:20 +0200549/**
550 * scc_set_bypass_mode() - Set bypass mode and trigger SCC update
551 * @write_group: Write group
552 *
553 * Set bypass mode and trigger SCC update.
554 */
555static void scc_set_bypass_mode(const u32 write_group)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500556{
Marek Vasutc5c5f532015-07-17 02:06:20 +0200557 /* Multicast to all DQ enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200558 writel(0xff, &sdr_scc_mgr->dq_ena);
559 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500560
Marek Vasutc5c5f532015-07-17 02:06:20 +0200561 /* Update current DQS IO enable. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200562 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500563
Marek Vasutc5c5f532015-07-17 02:06:20 +0200564 /* Update the DQS logic. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200565 writel(write_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500566
Marek Vasutc5c5f532015-07-17 02:06:20 +0200567 /* Hit update. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200568 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500569}
570
Marek Vasut5e837892015-07-13 00:30:09 +0200571/**
572 * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
573 * @write_group: Write group
574 *
575 * Load DQS settings for Write Group, do not trigger SCC update.
576 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200577static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq,
578 const u32 write_group)
Marek Vasut5ff825b2015-07-12 22:11:55 +0200579{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200580 const int ratio = seq->rwcfg->mem_if_read_dqs_width /
581 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasut5e837892015-07-13 00:30:09 +0200582 const int base = write_group * ratio;
583 int i;
Marek Vasut5ff825b2015-07-12 22:11:55 +0200584 /*
Marek Vasut5e837892015-07-13 00:30:09 +0200585 * Load the setting in the SCC manager
Marek Vasut5ff825b2015-07-12 22:11:55 +0200586 * Although OCT affects only write data, the OCT delay is controlled
587 * by the DQS logic block which is instantiated once per read group.
588 * For protocols where a write group consists of multiple read groups,
Marek Vasut5e837892015-07-13 00:30:09 +0200589 * the setting must be set multiple times.
Marek Vasut5ff825b2015-07-12 22:11:55 +0200590 */
Marek Vasut5e837892015-07-13 00:30:09 +0200591 for (i = 0; i < ratio; i++)
592 writel(base + i, &sdr_scc_mgr->dqs_ena);
Marek Vasut5ff825b2015-07-12 22:11:55 +0200593}
594
Marek Vasutd41ea932015-07-20 08:41:04 +0200595/**
596 * scc_mgr_zero_group() - Zero all configs for a group
597 *
598 * Zero DQ, DM, DQS and OCT configs for a group.
599 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200600static void scc_mgr_zero_group(struct socfpga_sdrseq *seq,
601 const u32 write_group, const int out_only)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500602{
Marek Vasutd41ea932015-07-20 08:41:04 +0200603 int i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500604
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200605 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasutd41ea932015-07-20 08:41:04 +0200606 r += NUM_RANKS_PER_SHADOW_REG) {
607 /* Zero all DQ config settings. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200608 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200609 scc_mgr_set_dq_out1_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500610 if (!out_only)
Marek Vasut07aee5b2015-07-12 22:07:33 +0200611 scc_mgr_set_dq_in_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500612 }
613
Marek Vasutd41ea932015-07-20 08:41:04 +0200614 /* Multicast to all DQ enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200615 writel(0xff, &sdr_scc_mgr->dq_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500616
Marek Vasutd41ea932015-07-20 08:41:04 +0200617 /* Zero all DM config settings. */
Marek Vasut70ed80a2016-04-04 21:16:18 +0200618 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
619 if (!out_only)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200620 scc_mgr_set_dm_in_delay(seq, i, 0);
621 scc_mgr_set_dm_out1_delay(seq, i, 0);
Marek Vasut70ed80a2016-04-04 21:16:18 +0200622 }
Dinh Nguyen3da42852015-06-02 22:52:49 -0500623
Marek Vasutd41ea932015-07-20 08:41:04 +0200624 /* Multicast to all DM enables. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200625 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500626
Marek Vasutd41ea932015-07-20 08:41:04 +0200627 /* Zero all DQS IO settings. */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500628 if (!out_only)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200629 scc_mgr_set_dqs_io_in_delay(seq, 0);
Marek Vasutd41ea932015-07-20 08:41:04 +0200630
631 /* Arria V/Cyclone V don't have out2. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200632 scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve);
633 scc_mgr_set_oct_out1_delay(seq, write_group,
634 seq->iocfg->dqs_out_reserve);
635 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500636
Marek Vasutd41ea932015-07-20 08:41:04 +0200637 /* Multicast to all DQS IO enables (only 1 in total). */
Marek Vasut1273dd92015-07-12 21:05:08 +0200638 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500639
Marek Vasutd41ea932015-07-20 08:41:04 +0200640 /* Hit update to zero everything. */
Marek Vasut1273dd92015-07-12 21:05:08 +0200641 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500642 }
643}
644
Dinh Nguyen3da42852015-06-02 22:52:49 -0500645/*
646 * apply and load a particular input delay for the DQ pins in a group
647 * group_bgn is the index of the first dq pin (in the write group)
648 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200649static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq,
650 u32 group_bgn, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500651{
Marek Vasut5ded7322015-08-02 19:42:26 +0200652 u32 i, p;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500653
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200654 for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs;
655 i++, p++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200656 scc_mgr_set_dq_in_delay(p, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500657 scc_mgr_load_dq(p);
658 }
659}
660
Marek Vasut300c2e62015-07-17 05:42:49 +0200661/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200662 * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the
663 * DQ pins in a group
Marek Vasut300c2e62015-07-17 05:42:49 +0200664 * @delay: Delay value
665 *
666 * Apply and load a particular output delay for the DQ pins in a group.
667 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200668static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq,
669 const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500670{
Marek Vasut300c2e62015-07-17 05:42:49 +0200671 int i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500672
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200673 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
Marek Vasut300c2e62015-07-17 05:42:49 +0200674 scc_mgr_set_dq_out1_delay(i, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500675 scc_mgr_load_dq(i);
676 }
677}
678
679/* apply and load a particular output delay for the DM pins in a group */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200680static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq,
681 u32 delay1)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500682{
Marek Vasut5ded7322015-08-02 19:42:26 +0200683 u32 i;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500684
685 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200686 scc_mgr_set_dm_out1_delay(seq, i, delay1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500687 scc_mgr_load_dm(i);
688 }
689}
690
691
692/* apply and load delay on both DQS and OCT out1 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200693static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq,
694 u32 write_group, u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500695{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200696 scc_mgr_set_dqs_out1_delay(seq, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500697 scc_mgr_load_dqs_io();
698
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200699 scc_mgr_set_oct_out1_delay(seq, write_group, delay);
700 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500701}
702
Marek Vasut5cb1b502015-07-17 05:33:28 +0200703/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200704 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
705 * side: DQ, DM, DQS, OCT
Marek Vasut5cb1b502015-07-17 05:33:28 +0200706 * @write_group: Write group
707 * @delay: Delay value
708 *
709 * Apply a delay to the entire output side: DQ, DM, DQS, OCT.
710 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200711static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq,
712 const u32 write_group,
Marek Vasut8eccde32015-07-17 05:30:14 +0200713 const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500714{
Marek Vasut8eccde32015-07-17 05:30:14 +0200715 u32 i, new_delay;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500716
Marek Vasut8eccde32015-07-17 05:30:14 +0200717 /* DQ shift */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200718 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500719 scc_mgr_load_dq(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500720
Marek Vasut8eccde32015-07-17 05:30:14 +0200721 /* DM shift */
722 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500723 scc_mgr_load_dm(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500724
Marek Vasut5cb1b502015-07-17 05:33:28 +0200725 /* DQS shift */
726 new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200727 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasutea9aa242016-04-04 21:21:05 +0200728 debug_cond(DLEVEL >= 1,
Marek Vasut5cb1b502015-07-17 05:33:28 +0200729 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
730 __func__, __LINE__, write_group, delay, new_delay,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200731 seq->iocfg->io_out2_delay_max,
732 new_delay - seq->iocfg->io_out2_delay_max);
733 new_delay -= seq->iocfg->io_out2_delay_max;
734 scc_mgr_set_dqs_out1_delay(seq, new_delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500735 }
736
737 scc_mgr_load_dqs_io();
738
Marek Vasut5cb1b502015-07-17 05:33:28 +0200739 /* OCT shift */
740 new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200741 if (new_delay > seq->iocfg->io_out2_delay_max) {
Marek Vasutea9aa242016-04-04 21:21:05 +0200742 debug_cond(DLEVEL >= 1,
Marek Vasut5cb1b502015-07-17 05:33:28 +0200743 "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
744 __func__, __LINE__, write_group, delay,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200745 new_delay, seq->iocfg->io_out2_delay_max,
746 new_delay - seq->iocfg->io_out2_delay_max);
747 new_delay -= seq->iocfg->io_out2_delay_max;
748 scc_mgr_set_oct_out1_delay(seq, write_group, new_delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500749 }
750
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200751 scc_mgr_load_dqs_for_write_group(seq, write_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500752}
753
Marek Vasutf51a7d32015-07-19 02:18:21 +0200754/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200755 * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output
756 * side to all ranks
Marek Vasutf51a7d32015-07-19 02:18:21 +0200757 * @write_group: Write group
758 * @delay: Delay value
759 *
760 * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500761 */
Marek Vasutf51a7d32015-07-19 02:18:21 +0200762static void
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200763scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq,
764 const u32 write_group,
Marek Vasutf51a7d32015-07-19 02:18:21 +0200765 const u32 delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500766{
Marek Vasutf51a7d32015-07-19 02:18:21 +0200767 int r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500768
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200769 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasutf51a7d32015-07-19 02:18:21 +0200770 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200771 scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay);
Marek Vasut1273dd92015-07-12 21:05:08 +0200772 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500773 }
774}
775
Marek Vasutf936f942015-07-26 11:07:19 +0200776/**
777 * set_jump_as_return() - Return instruction optimization
778 *
779 * Optimization used to recover some slots in ddr3 inst_rom could be
780 * applied to other protocols if we wanted to
781 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200782static void set_jump_as_return(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500783{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500784 /*
Marek Vasutf936f942015-07-26 11:07:19 +0200785 * To save space, we replace return with jump to special shared
Dinh Nguyen3da42852015-06-02 22:52:49 -0500786 * RETURN instruction so we set the counter to large value so that
Marek Vasutf936f942015-07-26 11:07:19 +0200787 * we always jump.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500788 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200789 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200790 writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500791}
792
Marek Vasut3de96222015-07-26 11:46:04 +0200793/**
794 * delay_for_n_mem_clocks() - Delay for N memory clocks
795 * @clocks: Length of the delay
796 *
797 * Delay for N memory clocks.
Dinh Nguyen3da42852015-06-02 22:52:49 -0500798 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200799static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq,
800 const u32 clocks)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500801{
Marek Vasut90a584b2015-07-26 11:11:28 +0200802 u32 afi_clocks;
Marek Vasut6a39be62015-07-26 11:42:53 +0200803 u16 c_loop;
804 u8 inner;
805 u8 outer;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500806
807 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
808
Marek Vasutcbcaf462015-07-26 11:34:09 +0200809 /* Scale (rounding up) to get afi clocks. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200810 afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio);
Marek Vasutcbcaf462015-07-26 11:34:09 +0200811 if (afi_clocks) /* Temporary underflow protection */
812 afi_clocks--;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500813
814 /*
Marek Vasut90a584b2015-07-26 11:11:28 +0200815 * Note, we don't bother accounting for being off a little
816 * bit because of a few extra instructions in outer loops.
817 * Note, the loops have a test at the end, and do the test
818 * before the decrement, and so always perform the loop
Dinh Nguyen3da42852015-06-02 22:52:49 -0500819 * 1 time more than the counter value
820 */
Marek Vasut6a39be62015-07-26 11:42:53 +0200821 c_loop = afi_clocks >> 16;
822 outer = c_loop ? 0xff : (afi_clocks >> 8);
823 inner = outer ? 0xff : afi_clocks;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500824
825 /*
826 * rom instructions are structured as follows:
827 *
828 * IDLE_LOOP2: jnz cntr0, TARGET_A
829 * IDLE_LOOP1: jnz cntr1, TARGET_B
830 * return
831 *
832 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
833 * TARGET_B is set to IDLE_LOOP2 as well
834 *
835 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
836 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
837 *
838 * a little confusing, but it helps save precious space in the inst_rom
839 * and sequencer rom and keeps the delays more accurate and reduces
840 * overhead
841 */
Marek Vasutcbcaf462015-07-26 11:34:09 +0200842 if (afi_clocks < 0x100) {
Marek Vasut1273dd92015-07-12 21:05:08 +0200843 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasut139823e2015-08-02 19:47:01 +0200844 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500845
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200846 writel(seq->rwcfg->idle_loop1,
Marek Vasut139823e2015-08-02 19:47:01 +0200847 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500848
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200849 writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +0200850 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500851 } else {
Marek Vasut1273dd92015-07-12 21:05:08 +0200852 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
Marek Vasut139823e2015-08-02 19:47:01 +0200853 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500854
Marek Vasut1273dd92015-07-12 21:05:08 +0200855 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
Marek Vasut139823e2015-08-02 19:47:01 +0200856 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500857
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200858 writel(seq->rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200859 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500860
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200861 writel(seq->rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200862 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500863
Marek Vasut0c1b81b2015-07-26 11:44:54 +0200864 do {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200865 writel(seq->rwcfg->idle_loop2,
Marek Vasut139823e2015-08-02 19:47:01 +0200866 SDR_PHYGRP_RWMGRGRP_ADDRESS |
867 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Marek Vasut0c1b81b2015-07-26 11:44:54 +0200868 } while (c_loop-- != 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500869 }
870 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
871}
872
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200873static void delay_for_n_ns(struct socfpga_sdrseq *seq, const u32 ns)
874{
875 delay_for_n_mem_clocks(seq, (ns * seq->misccfg->afi_clk_freq *
876 seq->misccfg->afi_rate_ratio) / 1000);
877}
878
Marek Vasut944fe712015-07-13 00:44:30 +0200879/**
880 * rw_mgr_mem_init_load_regs() - Load instruction registers
881 * @cntr0: Counter 0 value
882 * @cntr1: Counter 1 value
883 * @cntr2: Counter 2 value
884 * @jump: Jump instruction value
885 *
886 * Load instruction registers.
887 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200888static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq,
889 u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
Marek Vasut944fe712015-07-13 00:44:30 +0200890{
Marek Vasut5ded7322015-08-02 19:42:26 +0200891 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut944fe712015-07-13 00:44:30 +0200892 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
893
894 /* Load counters */
895 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
896 &sdr_rw_load_mgr_regs->load_cntr0);
897 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
898 &sdr_rw_load_mgr_regs->load_cntr1);
899 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
900 &sdr_rw_load_mgr_regs->load_cntr2);
901
902 /* Load jump address */
903 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
904 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
905 writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
906
907 /* Execute count instruction */
908 writel(jump, grpaddr);
909}
910
Marek Vasutecd23342015-07-13 00:51:05 +0200911/**
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200912 * rw_mgr_mem_load_user_ddr2() - Load user calibration values for DDR2
913 * @handoff: Indicate whether this is initialization or handoff phase
914 *
915 * Load user calibration values and optionally precharge the banks.
916 */
917static void rw_mgr_mem_load_user_ddr2(struct socfpga_sdrseq *seq,
918 const int handoff)
919{
920 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
921 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
922 u32 r;
923
924 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
925 /* set rank */
926 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
927
928 /* precharge all banks ... */
929 writel(seq->rwcfg->precharge_all, grpaddr);
930
931 writel(seq->rwcfg->emr2, grpaddr);
932 writel(seq->rwcfg->emr3, grpaddr);
933 writel(seq->rwcfg->emr, grpaddr);
934
935 if (handoff) {
936 writel(seq->rwcfg->mr_user, grpaddr);
937 continue;
938 }
939
940 writel(seq->rwcfg->mr_dll_reset, grpaddr);
941
942 writel(seq->rwcfg->precharge_all, grpaddr);
943
944 writel(seq->rwcfg->refresh, grpaddr);
945 delay_for_n_ns(seq, 200);
946 writel(seq->rwcfg->refresh, grpaddr);
947 delay_for_n_ns(seq, 200);
948
949 writel(seq->rwcfg->mr_calib, grpaddr);
950 writel(/*seq->rwcfg->*/0x0b, grpaddr); // EMR_OCD_ENABLE
951 writel(seq->rwcfg->emr, grpaddr);
952 delay_for_n_mem_clocks(seq, 200);
953 }
954}
955
956/**
957 * rw_mgr_mem_load_user_ddr3() - Load user calibration values
Marek Vasutecd23342015-07-13 00:51:05 +0200958 * @fin1: Final instruction 1
959 * @fin2: Final instruction 2
960 * @precharge: If 1, precharge the banks at the end
961 *
962 * Load user calibration values and optionally precharge the banks.
963 */
Marek Vasut9a5a90a2019-10-18 00:22:31 +0200964static void rw_mgr_mem_load_user_ddr3(struct socfpga_sdrseq *seq,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200965 const u32 fin1, const u32 fin2,
Marek Vasutecd23342015-07-13 00:51:05 +0200966 const int precharge)
967{
968 u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
969 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
970 u32 r;
971
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200972 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasutecd23342015-07-13 00:51:05 +0200973 /* set rank */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200974 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Marek Vasutecd23342015-07-13 00:51:05 +0200975
976 /* precharge all banks ... */
977 if (precharge)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200978 writel(seq->rwcfg->precharge_all, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +0200979
980 /*
981 * USER Use Mirror-ed commands for odd ranks if address
982 * mirrorring is on
983 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200984 if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) {
985 set_jump_as_return(seq);
986 writel(seq->rwcfg->mrs2_mirr, grpaddr);
987 delay_for_n_mem_clocks(seq, 4);
988 set_jump_as_return(seq);
989 writel(seq->rwcfg->mrs3_mirr, grpaddr);
990 delay_for_n_mem_clocks(seq, 4);
991 set_jump_as_return(seq);
992 writel(seq->rwcfg->mrs1_mirr, grpaddr);
993 delay_for_n_mem_clocks(seq, 4);
994 set_jump_as_return(seq);
Marek Vasutecd23342015-07-13 00:51:05 +0200995 writel(fin1, grpaddr);
996 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +0200997 set_jump_as_return(seq);
998 writel(seq->rwcfg->mrs2, grpaddr);
999 delay_for_n_mem_clocks(seq, 4);
1000 set_jump_as_return(seq);
1001 writel(seq->rwcfg->mrs3, grpaddr);
1002 delay_for_n_mem_clocks(seq, 4);
1003 set_jump_as_return(seq);
1004 writel(seq->rwcfg->mrs1, grpaddr);
1005 set_jump_as_return(seq);
Marek Vasutecd23342015-07-13 00:51:05 +02001006 writel(fin2, grpaddr);
1007 }
1008
1009 if (precharge)
1010 continue;
1011
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001012 set_jump_as_return(seq);
1013 writel(seq->rwcfg->zqcl, grpaddr);
Marek Vasutecd23342015-07-13 00:51:05 +02001014
1015 /* tZQinit = tDLLK = 512 ck cycles */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001016 delay_for_n_mem_clocks(seq, 512);
Marek Vasutecd23342015-07-13 00:51:05 +02001017 }
1018}
1019
Marek Vasut8e9d7d02015-07-26 10:57:06 +02001020/**
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001021 * rw_mgr_mem_load_user() - Load user calibration values
1022 * @fin1: Final instruction 1
1023 * @fin2: Final instruction 2
1024 * @precharge: If 1, precharge the banks at the end
1025 *
1026 * Load user calibration values and optionally precharge the banks.
1027 */
1028static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq,
1029 const u32 fin1, const u32 fin2,
1030 const int precharge)
1031{
1032 if (dram_is_ddr(2))
1033 rw_mgr_mem_load_user_ddr2(seq, precharge);
1034 else if (dram_is_ddr(3))
1035 rw_mgr_mem_load_user_ddr3(seq, fin1, fin2, precharge);
1036 else
1037 hang();
1038}
1039/**
Marek Vasut8e9d7d02015-07-26 10:57:06 +02001040 * rw_mgr_mem_initialize() - Initialize RW Manager
1041 *
1042 * Initialize RW Manager.
1043 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001044static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001045{
Dinh Nguyen3da42852015-06-02 22:52:49 -05001046 debug("%s:%d\n", __func__, __LINE__);
1047
1048 /* The reset / cke part of initialization is broadcasted to all ranks */
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001049 if (dram_is_ddr(3)) {
1050 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1051 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
1052 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05001053
1054 /*
1055 * Here's how you load register for a loop
1056 * Counters are located @ 0x800
1057 * Jump address are located @ 0xC00
1058 * For both, registers 0 to 3 are selected using bits 3 and 2, like
1059 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
1060 * I know this ain't pretty, but Avalon bus throws away the 2 least
1061 * significant bits
1062 */
1063
Marek Vasut8e9d7d02015-07-26 10:57:06 +02001064 /* Start with memory RESET activated */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001065
1066 /* tINIT = 200us */
1067
1068 /*
1069 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
1070 * If a and b are the number of iteration in 2 nested loops
1071 * it takes the following number of cycles to complete the operation:
1072 * number_of_cycles = ((2 + n) * a + 2) * b
1073 * where n is the number of instruction in the inner loop
1074 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
1075 * b = 6A
1076 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001077 rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val,
1078 seq->misccfg->tinit_cntr1_val,
1079 seq->misccfg->tinit_cntr2_val,
1080 seq->rwcfg->init_reset_0_cke_0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001081
Marek Vasut8e9d7d02015-07-26 10:57:06 +02001082 /* Indicate that memory is stable. */
Marek Vasut1273dd92015-07-12 21:05:08 +02001083 writel(1, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001084
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001085 if (dram_is_ddr(2)) {
1086 writel(seq->rwcfg->nop, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1087 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001088
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001089 /* Bring up clock enable. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001090
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001091 /* tXRP < 400 ck cycles */
1092 delay_for_n_ns(seq, 400);
1093 } else if (dram_is_ddr(3)) {
1094 /*
1095 * transition the RESET to high
1096 * Wait for 500us
1097 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001098
Marek Vasut9a5a90a2019-10-18 00:22:31 +02001099 /*
1100 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
1101 * If a and b are the number of iteration in 2 nested loops
1102 * it takes the following number of cycles to complete the
1103 * operation number_of_cycles = ((2 + n) * a + 2) * b
1104 * where n is the number of instruction in the inner loop
1105 * One possible solution is
1106 * n = 2 , a = 131 , b = 256 => a = 83, b = FF
1107 */
1108 rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val,
1109 seq->misccfg->treset_cntr1_val,
1110 seq->misccfg->treset_cntr2_val,
1111 seq->rwcfg->init_reset_1_cke_0);
1112 /* Bring up clock enable. */
1113
1114 /* tXRP < 250 ck cycles */
1115 delay_for_n_mem_clocks(seq, 250);
1116 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05001117
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001118 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr,
1119 seq->rwcfg->mrs0_dll_reset, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001120}
1121
Marek Vasutf1f22f72015-07-26 10:59:19 +02001122/**
1123 * rw_mgr_mem_handoff() - Hand off the memory to user
1124 *
1125 * At the end of calibration we have to program the user settings in
1126 * and hand off the memory to the user.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001127 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001128static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001129{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001130 rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr,
1131 seq->rwcfg->mrs0_user, 1);
Marek Vasutecd23342015-07-13 00:51:05 +02001132 /*
Marek Vasutf1f22f72015-07-26 10:59:19 +02001133 * Need to wait tMOD (12CK or 15ns) time before issuing other
1134 * commands, but we will have plenty of NIOS cycles before actual
1135 * handoff so its okay.
Marek Vasutecd23342015-07-13 00:51:05 +02001136 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001137}
1138
Marek Vasut8371c2e2015-07-21 06:00:36 +02001139/**
1140 * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
1141 * @group: Write Group
1142 * @use_dm: Use DM
1143 *
1144 * Issue write test command. Two variants are provided, one that just tests
1145 * a write pattern and another that tests datamask functionality.
Marek Vasutad64769c2015-07-21 05:43:37 +02001146 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001147static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq,
1148 u32 group, u32 test_dm)
Marek Vasutad64769c2015-07-21 05:43:37 +02001149{
Marek Vasut8371c2e2015-07-21 06:00:36 +02001150 const u32 quick_write_mode =
1151 (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001152 seq->misccfg->enable_super_quick_calibration;
Marek Vasut8371c2e2015-07-21 06:00:36 +02001153 u32 mcc_instruction;
1154 u32 rw_wl_nop_cycles;
Marek Vasutad64769c2015-07-21 05:43:37 +02001155
1156 /*
1157 * Set counter and jump addresses for the right
1158 * number of NOP cycles.
1159 * The number of supported NOP cycles can range from -1 to infinity
1160 * Three different cases are handled:
1161 *
1162 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
1163 * mechanism will be used to insert the right number of NOPs
1164 *
1165 * 2. For a number of NOP cycles equals to 0, the micro-instruction
1166 * issuing the write command will jump straight to the
1167 * micro-instruction that turns on DQS (for DDRx), or outputs write
1168 * data (for RLD), skipping
1169 * the NOP micro-instruction all together
1170 *
1171 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
1172 * turned on in the same micro-instruction that issues the write
1173 * command. Then we need
1174 * to directly jump to the micro-instruction that sends out the data
1175 *
1176 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
1177 * (2 and 3). One jump-counter (0) is used to perform multiple
1178 * write-read operations.
1179 * one counter left to issue this command in "multiple-group" mode
1180 */
1181
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001182 rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles;
Marek Vasutad64769c2015-07-21 05:43:37 +02001183
1184 if (rw_wl_nop_cycles == -1) {
1185 /*
1186 * CNTR 2 - We want to execute the special write operation that
1187 * turns on DQS right away and then skip directly to the
1188 * instruction that sends out the data. We set the counter to a
1189 * large number so that the jump is always taken.
1190 */
1191 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1192
1193 /* CNTR 3 - Not used */
1194 if (test_dm) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001195 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
1196 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data,
Marek Vasutad64769c2015-07-21 05:43:37 +02001197 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001198 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasutad64769c2015-07-21 05:43:37 +02001199 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
1200 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001201 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1;
1202 writel(seq->rwcfg->lfsr_wr_rd_bank_0_data,
Marek Vasut139823e2015-08-02 19:47:01 +02001203 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001204 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001205 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001206 }
1207 } else if (rw_wl_nop_cycles == 0) {
1208 /*
1209 * CNTR 2 - We want to skip the NOP operation and go straight
1210 * to the DQS enable instruction. We set the counter to a large
1211 * number so that the jump is always taken.
1212 */
1213 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
1214
1215 /* CNTR 3 - Not used */
1216 if (test_dm) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001217 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1218 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
Marek Vasutad64769c2015-07-21 05:43:37 +02001219 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1220 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001221 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1222 writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs,
Marek Vasut139823e2015-08-02 19:47:01 +02001223 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Marek Vasutad64769c2015-07-21 05:43:37 +02001224 }
1225 } else {
1226 /*
1227 * CNTR 2 - In this case we want to execute the next instruction
1228 * and NOT take the jump. So we set the counter to 0. The jump
1229 * address doesn't count.
1230 */
1231 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
1232 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
1233
1234 /*
1235 * CNTR 3 - Set the nop counter to the number of cycles we
1236 * need to loop for, minus 1.
1237 */
1238 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
1239 if (test_dm) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001240 mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0;
1241 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001242 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001243 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001244 mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0;
1245 writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop,
Marek Vasut139823e2015-08-02 19:47:01 +02001246 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Marek Vasutad64769c2015-07-21 05:43:37 +02001247 }
1248 }
1249
1250 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1251 RW_MGR_RESET_READ_DATAPATH_OFFSET);
1252
1253 if (quick_write_mode)
1254 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
1255 else
1256 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
1257
1258 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
1259
1260 /*
1261 * CNTR 1 - This is used to ensure enough time elapses
1262 * for read data to come back.
1263 */
1264 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
1265
1266 if (test_dm) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001267 writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait,
Marek Vasut139823e2015-08-02 19:47:01 +02001268 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasutad64769c2015-07-21 05:43:37 +02001269 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001270 writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait,
Marek Vasut139823e2015-08-02 19:47:01 +02001271 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Marek Vasutad64769c2015-07-21 05:43:37 +02001272 }
1273
Marek Vasut8371c2e2015-07-21 06:00:36 +02001274 writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
1275 RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
1276 (group << 2));
Marek Vasutad64769c2015-07-21 05:43:37 +02001277}
1278
Marek Vasut4a82854b2015-07-21 05:57:11 +02001279/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001280 * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple
1281 * pass
Marek Vasut4a82854b2015-07-21 05:57:11 +02001282 * @rank_bgn: Rank number
1283 * @write_group: Write Group
1284 * @use_dm: Use DM
1285 * @all_correct: All bits must be correct in the mask
1286 * @bit_chk: Resulting bit mask after the test
1287 * @all_ranks: Test all ranks
1288 *
1289 * Test writes, can check for a single bit pass or multiple bit pass.
1290 */
Marek Vasutb9452ea2015-07-21 05:54:39 +02001291static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001292rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq,
1293 const u32 rank_bgn, const u32 write_group,
Marek Vasutb9452ea2015-07-21 05:54:39 +02001294 const u32 use_dm, const u32 all_correct,
1295 u32 *bit_chk, const u32 all_ranks)
Marek Vasutad64769c2015-07-21 05:43:37 +02001296{
Marek Vasutb9452ea2015-07-21 05:54:39 +02001297 const u32 rank_end = all_ranks ?
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001298 seq->rwcfg->mem_number_of_ranks :
Marek Vasutb9452ea2015-07-21 05:54:39 +02001299 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001300 const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs /
1301 seq->rwcfg->mem_virtual_groups_per_write_dqs;
1302 const u32 correct_mask_vg = seq->param.write_correct_mask_vg;
Marek Vasutb9452ea2015-07-21 05:54:39 +02001303
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001304 u32 tmp_bit_chk, base_rw_mgr, group;
Marek Vasutb9452ea2015-07-21 05:54:39 +02001305 int vg, r;
Marek Vasutad64769c2015-07-21 05:43:37 +02001306
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001307 *bit_chk = seq->param.write_correct_mask;
Marek Vasutad64769c2015-07-21 05:43:37 +02001308
1309 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutb9452ea2015-07-21 05:54:39 +02001310 /* Set rank */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001311 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Marek Vasutad64769c2015-07-21 05:43:37 +02001312
1313 tmp_bit_chk = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001314 for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1;
Marek Vasutb9452ea2015-07-21 05:54:39 +02001315 vg >= 0; vg--) {
1316 /* Reset the FIFOs to get pointers to known state. */
Marek Vasutad64769c2015-07-21 05:43:37 +02001317 writel(0, &phy_mgr_cmd->fifo_reset);
1318
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001319 group = write_group *
1320 seq->rwcfg->mem_virtual_groups_per_write_dqs
1321 + vg;
1322 rw_mgr_mem_calibrate_write_test_issue(seq, group,
1323 use_dm);
Marek Vasutad64769c2015-07-21 05:43:37 +02001324
Marek Vasutb9452ea2015-07-21 05:54:39 +02001325 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
1326 tmp_bit_chk <<= shift_ratio;
1327 tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
Marek Vasutad64769c2015-07-21 05:43:37 +02001328 }
Marek Vasutb9452ea2015-07-21 05:54:39 +02001329
Marek Vasutad64769c2015-07-21 05:43:37 +02001330 *bit_chk &= tmp_bit_chk;
1331 }
1332
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001333 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasutad64769c2015-07-21 05:43:37 +02001334 if (all_correct) {
Marek Vasutea9aa242016-04-04 21:21:05 +02001335 debug_cond(DLEVEL >= 2,
Marek Vasutb9452ea2015-07-21 05:54:39 +02001336 "write_test(%u,%u,ALL) : %u == %u => %i\n",
1337 write_group, use_dm, *bit_chk,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001338 seq->param.write_correct_mask,
1339 *bit_chk == seq->param.write_correct_mask);
1340 return *bit_chk == seq->param.write_correct_mask;
Marek Vasutad64769c2015-07-21 05:43:37 +02001341 } else {
Marek Vasutea9aa242016-04-04 21:21:05 +02001342 debug_cond(DLEVEL >= 2,
Marek Vasutb9452ea2015-07-21 05:54:39 +02001343 "write_test(%u,%u,ONE) : %u != %i => %i\n",
1344 write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
Marek Vasutad64769c2015-07-21 05:43:37 +02001345 return *bit_chk != 0x00;
1346 }
1347}
1348
Marek Vasutd844c7d2015-07-18 03:55:07 +02001349/**
1350 * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
1351 * @rank_bgn: Rank number
1352 * @group: Read/Write Group
1353 * @all_ranks: Test all ranks
1354 *
1355 * Performs a guaranteed read on the patterns we are going to use during a
1356 * read test to ensure memory works.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001357 */
Marek Vasutd844c7d2015-07-18 03:55:07 +02001358static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001359rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq,
1360 const u32 rank_bgn, const u32 group,
Marek Vasutd844c7d2015-07-18 03:55:07 +02001361 const u32 all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001362{
Marek Vasutd844c7d2015-07-18 03:55:07 +02001363 const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1364 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1365 const u32 addr_offset =
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001366 (group * seq->rwcfg->mem_virtual_groups_per_read_dqs)
1367 << 2;
Marek Vasutd844c7d2015-07-18 03:55:07 +02001368 const u32 rank_end = all_ranks ?
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001369 seq->rwcfg->mem_number_of_ranks :
Marek Vasutd844c7d2015-07-18 03:55:07 +02001370 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001371 const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs /
1372 seq->rwcfg->mem_virtual_groups_per_read_dqs;
1373 const u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001374
Marek Vasutd844c7d2015-07-18 03:55:07 +02001375 u32 tmp_bit_chk, base_rw_mgr, bit_chk;
1376 int vg, r;
1377 int ret = 0;
1378
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001379 bit_chk = seq->param.read_correct_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001380
1381 for (r = rank_bgn; r < rank_end; r++) {
Marek Vasutd844c7d2015-07-18 03:55:07 +02001382 /* Set rank */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001383 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001384
1385 /* Load up a constant bursts of read commands */
Marek Vasut1273dd92015-07-12 21:05:08 +02001386 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001387 writel(seq->rwcfg->guaranteed_read,
Marek Vasut139823e2015-08-02 19:47:01 +02001388 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001389
Marek Vasut1273dd92015-07-12 21:05:08 +02001390 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001391 writel(seq->rwcfg->guaranteed_read_cont,
Marek Vasut139823e2015-08-02 19:47:01 +02001392 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001393
1394 tmp_bit_chk = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001395 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
Marek Vasutd844c7d2015-07-18 03:55:07 +02001396 vg >= 0; vg--) {
1397 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02001398 writel(0, &phy_mgr_cmd->fifo_reset);
1399 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1400 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001401 writel(seq->rwcfg->guaranteed_read,
Marek Vasutd844c7d2015-07-18 03:55:07 +02001402 addr + addr_offset + (vg << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001403
Marek Vasut1273dd92015-07-12 21:05:08 +02001404 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Marek Vasutd844c7d2015-07-18 03:55:07 +02001405 tmp_bit_chk <<= shift_ratio;
1406 tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001407 }
Marek Vasutd844c7d2015-07-18 03:55:07 +02001408
1409 bit_chk &= tmp_bit_chk;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001410 }
1411
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001412 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001413
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001414 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasutd844c7d2015-07-18 03:55:07 +02001415
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001416 if (bit_chk != seq->param.read_correct_mask)
Marek Vasutd844c7d2015-07-18 03:55:07 +02001417 ret = -EIO;
1418
Marek Vasutea9aa242016-04-04 21:21:05 +02001419 debug_cond(DLEVEL >= 1,
Marek Vasutd844c7d2015-07-18 03:55:07 +02001420 "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
1421 __func__, __LINE__, group, bit_chk,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001422 seq->param.read_correct_mask, ret);
Marek Vasutd844c7d2015-07-18 03:55:07 +02001423
1424 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001425}
1426
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001427/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001428 * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read
1429 * test
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001430 * @rank_bgn: Rank number
1431 * @all_ranks: Test all ranks
1432 *
1433 * Load up the patterns we are going to use during a read test.
1434 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001435static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq,
1436 const u32 rank_bgn,
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001437 const int all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001438{
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001439 const u32 rank_end = all_ranks ?
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001440 seq->rwcfg->mem_number_of_ranks :
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001441 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1442 u32 r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001443
1444 debug("%s:%d\n", __func__, __LINE__);
Marek Vasutb6cb7f92015-07-18 03:34:22 +02001445
Dinh Nguyen3da42852015-06-02 22:52:49 -05001446 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001447 /* set rank */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001448 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001449
1450 /* Load up a constant bursts */
Marek Vasut1273dd92015-07-12 21:05:08 +02001451 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001452
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001453 writel(seq->rwcfg->guaranteed_write_wait0,
Marek Vasut139823e2015-08-02 19:47:01 +02001454 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001455
Marek Vasut1273dd92015-07-12 21:05:08 +02001456 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001457
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001458 writel(seq->rwcfg->guaranteed_write_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02001459 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001460
Marek Vasut1273dd92015-07-12 21:05:08 +02001461 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001462
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001463 writel(seq->rwcfg->guaranteed_write_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02001464 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001465
Marek Vasut1273dd92015-07-12 21:05:08 +02001466 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001467
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001468 writel(seq->rwcfg->guaranteed_write_wait3,
Marek Vasut139823e2015-08-02 19:47:01 +02001469 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001470
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001471 writel(seq->rwcfg->guaranteed_write,
1472 SDR_PHYGRP_RWMGRGRP_ADDRESS |
1473 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001474 }
1475
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001476 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001477}
1478
Marek Vasut783fcf52015-07-20 03:26:05 +02001479/**
1480 * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
1481 * @rank_bgn: Rank number
1482 * @group: Read/Write group
1483 * @num_tries: Number of retries of the test
1484 * @all_correct: All bits must be correct in the mask
1485 * @bit_chk: Resulting bit mask after the test
1486 * @all_groups: Test all R/W groups
1487 * @all_ranks: Test all ranks
1488 *
1489 * Try a read and see if it returns correct data back. Test has dummy reads
1490 * inserted into the mix used to align DQS enable. Test has more thorough
1491 * checks than the regular read test.
Dinh Nguyen3da42852015-06-02 22:52:49 -05001492 */
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001493static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001494rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq,
1495 const u32 rank_bgn, const u32 group,
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001496 const u32 num_tries, const u32 all_correct,
1497 u32 *bit_chk,
1498 const u32 all_groups, const u32 all_ranks)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001499{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001500 const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks :
Dinh Nguyen3da42852015-06-02 22:52:49 -05001501 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001502 const u32 quick_read_mode =
1503 ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001504 seq->misccfg->enable_super_quick_calibration);
1505 u32 correct_mask_vg = seq->param.read_correct_mask_vg;
Marek Vasut3cb8bf32015-07-19 07:48:58 +02001506 u32 tmp_bit_chk;
1507 u32 base_rw_mgr;
1508 u32 addr;
1509
1510 int r, vg, ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001511
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001512 *bit_chk = seq->param.read_correct_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001513
1514 for (r = rank_bgn; r < rank_end; r++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001515 /* set rank */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001516 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001517
Marek Vasut1273dd92015-07-12 21:05:08 +02001518 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001519
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001520 writel(seq->rwcfg->read_b2b_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02001521 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001522
Marek Vasut1273dd92015-07-12 21:05:08 +02001523 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001524 writel(seq->rwcfg->read_b2b_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02001525 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001526
Dinh Nguyen3da42852015-06-02 22:52:49 -05001527 if (quick_read_mode)
Marek Vasut1273dd92015-07-12 21:05:08 +02001528 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001529 /* need at least two (1+1) reads to capture failures */
1530 else if (all_groups)
Marek Vasut1273dd92015-07-12 21:05:08 +02001531 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001532 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001533 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001534
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001535 writel(seq->rwcfg->read_b2b,
Marek Vasut139823e2015-08-02 19:47:01 +02001536 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001537 if (all_groups)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001538 writel(seq->rwcfg->mem_if_read_dqs_width *
1539 seq->rwcfg->mem_virtual_groups_per_read_dqs - 1,
Marek Vasut1273dd92015-07-12 21:05:08 +02001540 &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001541 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001542 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001543
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001544 writel(seq->rwcfg->read_b2b,
Marek Vasut139823e2015-08-02 19:47:01 +02001545 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001546
1547 tmp_bit_chk = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001548 for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1;
1549 vg >= 0; vg--) {
Marek Vasutba522c72015-07-19 07:57:28 +02001550 /* Reset the FIFOs to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02001551 writel(0, &phy_mgr_cmd->fifo_reset);
1552 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1553 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001554
Marek Vasutba522c72015-07-19 07:57:28 +02001555 if (all_groups) {
1556 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1557 RW_MGR_RUN_ALL_GROUPS_OFFSET;
1558 } else {
1559 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1560 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1561 }
Marek Vasutc4815f72015-07-12 19:03:33 +02001562
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001563 writel(seq->rwcfg->read_b2b, addr +
Marek Vasut139823e2015-08-02 19:47:01 +02001564 ((group *
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001565 seq->rwcfg->mem_virtual_groups_per_read_dqs +
Marek Vasut139823e2015-08-02 19:47:01 +02001566 vg) << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001567
Marek Vasut1273dd92015-07-12 21:05:08 +02001568 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001569 tmp_bit_chk <<=
1570 seq->rwcfg->mem_dq_per_read_dqs /
1571 seq->rwcfg->mem_virtual_groups_per_read_dqs;
Marek Vasutba522c72015-07-19 07:57:28 +02001572 tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001573 }
Marek Vasut7ce23bb2015-07-19 07:51:17 +02001574
Dinh Nguyen3da42852015-06-02 22:52:49 -05001575 *bit_chk &= tmp_bit_chk;
1576 }
1577
Marek Vasutc4815f72015-07-12 19:03:33 +02001578 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001579 writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001580
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001581 set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF);
Marek Vasut3853d652015-07-19 07:44:21 +02001582
Dinh Nguyen3da42852015-06-02 22:52:49 -05001583 if (all_correct) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001584 ret = (*bit_chk == seq->param.read_correct_mask);
Marek Vasutea9aa242016-04-04 21:21:05 +02001585 debug_cond(DLEVEL >= 2,
Marek Vasut3853d652015-07-19 07:44:21 +02001586 "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
1587 __func__, __LINE__, group, all_groups, *bit_chk,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001588 seq->param.read_correct_mask, ret);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001589 } else {
Marek Vasut3853d652015-07-19 07:44:21 +02001590 ret = (*bit_chk != 0x00);
Marek Vasutea9aa242016-04-04 21:21:05 +02001591 debug_cond(DLEVEL >= 2,
Marek Vasut3853d652015-07-19 07:44:21 +02001592 "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
1593 __func__, __LINE__, group, all_groups, *bit_chk,
1594 0, ret);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001595 }
Marek Vasut3853d652015-07-19 07:44:21 +02001596
1597 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001598}
1599
Marek Vasut96df6032015-07-19 07:35:36 +02001600/**
1601 * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
1602 * @grp: Read/Write group
1603 * @num_tries: Number of retries of the test
1604 * @all_correct: All bits must be correct in the mask
1605 * @all_groups: Test all R/W groups
1606 *
1607 * Perform a READ test across all memory ranks.
1608 */
1609static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001610rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq,
1611 const u32 grp, const u32 num_tries,
Marek Vasut96df6032015-07-19 07:35:36 +02001612 const u32 all_correct,
1613 const u32 all_groups)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001614{
Marek Vasut96df6032015-07-19 07:35:36 +02001615 u32 bit_chk;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001616 return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries,
1617 all_correct, &bit_chk, all_groups,
1618 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001619}
1620
Marek Vasut60bb8a82015-07-19 06:25:27 +02001621/**
1622 * rw_mgr_incr_vfifo() - Increase VFIFO value
1623 * @grp: Read/Write group
Marek Vasut60bb8a82015-07-19 06:25:27 +02001624 *
1625 * Increase VFIFO value.
1626 */
Marek Vasut8c887b62015-07-19 06:37:51 +02001627static void rw_mgr_incr_vfifo(const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001628{
Marek Vasut1273dd92015-07-12 21:05:08 +02001629 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001630}
1631
Marek Vasut60bb8a82015-07-19 06:25:27 +02001632/**
1633 * rw_mgr_decr_vfifo() - Decrease VFIFO value
1634 * @grp: Read/Write group
Marek Vasut60bb8a82015-07-19 06:25:27 +02001635 *
1636 * Decrease VFIFO value.
1637 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001638static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001639{
Marek Vasut60bb8a82015-07-19 06:25:27 +02001640 u32 i;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001641
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001642 for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++)
Marek Vasut8c887b62015-07-19 06:37:51 +02001643 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001644}
1645
Marek Vasutd145ca92015-07-19 06:45:43 +02001646/**
1647 * find_vfifo_failing_read() - Push VFIFO to get a failing read
1648 * @grp: Read/Write group
1649 *
1650 * Push VFIFO until a failing read happens.
1651 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001652static int find_vfifo_failing_read(struct socfpga_sdrseq *seq,
1653 const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001654{
Marek Vasut96df6032015-07-19 07:35:36 +02001655 u32 v, ret, fail_cnt = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001656
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001657 for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) {
Marek Vasutea9aa242016-04-04 21:21:05 +02001658 debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05001659 __func__, __LINE__, v);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001660 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1661 PASS_ONE_BIT, 0);
Marek Vasutd145ca92015-07-19 06:45:43 +02001662 if (!ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001663 fail_cnt++;
1664
1665 if (fail_cnt == 2)
Marek Vasutd145ca92015-07-19 06:45:43 +02001666 return v;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001667 }
1668
Marek Vasutd145ca92015-07-19 06:45:43 +02001669 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001670 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001671 }
1672
Marek Vasutd145ca92015-07-19 06:45:43 +02001673 /* No failing read found! Something must have gone wrong. */
Marek Vasutea9aa242016-04-04 21:21:05 +02001674 debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
Marek Vasutd145ca92015-07-19 06:45:43 +02001675 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001676}
1677
Marek Vasut192d6f92015-07-19 05:26:49 +02001678/**
Marek Vasut52e8f212015-07-19 07:27:06 +02001679 * sdr_find_phase_delay() - Find DQS enable phase or delay
1680 * @working: If 1, look for working phase/delay, if 0, look for non-working
1681 * @delay: If 1, look for delay, if 0, look for phase
1682 * @grp: Read/Write group
1683 * @work: Working window position
1684 * @work_inc: Working window increment
1685 * @pd: DQS Phase/Delay Iterator
1686 *
1687 * Find working or non-working DQS enable phase setting.
1688 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001689static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working,
1690 int delay, const u32 grp, u32 *work,
1691 const u32 work_inc, u32 *pd)
Marek Vasut52e8f212015-07-19 07:27:06 +02001692{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001693 const u32 max = delay ? seq->iocfg->dqs_en_delay_max :
1694 seq->iocfg->dqs_en_phase_max;
Marek Vasut96df6032015-07-19 07:35:36 +02001695 u32 ret;
Marek Vasut52e8f212015-07-19 07:27:06 +02001696
1697 for (; *pd <= max; (*pd)++) {
1698 if (delay)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001699 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd);
Marek Vasut52e8f212015-07-19 07:27:06 +02001700 else
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001701 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd);
Marek Vasut52e8f212015-07-19 07:27:06 +02001702
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001703 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1704 PASS_ONE_BIT, 0);
Marek Vasut52e8f212015-07-19 07:27:06 +02001705 if (!working)
1706 ret = !ret;
1707
1708 if (ret)
1709 return 0;
1710
1711 if (work)
1712 *work += work_inc;
1713 }
1714
1715 return -EINVAL;
1716}
1717/**
Marek Vasut192d6f92015-07-19 05:26:49 +02001718 * sdr_find_phase() - Find DQS enable phase
1719 * @working: If 1, look for working phase, if 0, look for non-working phase
1720 * @grp: Read/Write group
Marek Vasut192d6f92015-07-19 05:26:49 +02001721 * @work: Working window position
1722 * @i: Iterator
1723 * @p: DQS Phase Iterator
Marek Vasut192d6f92015-07-19 05:26:49 +02001724 *
1725 * Find working or non-working DQS enable phase setting.
1726 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001727static int sdr_find_phase(struct socfpga_sdrseq *seq, int working,
1728 const u32 grp, u32 *work, u32 *i, u32 *p)
Marek Vasut192d6f92015-07-19 05:26:49 +02001729{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001730 const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1);
Marek Vasut52e8f212015-07-19 07:27:06 +02001731 int ret;
Marek Vasut192d6f92015-07-19 05:26:49 +02001732
1733 for (; *i < end; (*i)++) {
1734 if (working)
1735 *p = 0;
1736
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001737 ret = sdr_find_phase_delay(seq, working, 0, grp, work,
1738 seq->iocfg->delay_per_opa_tap, p);
Marek Vasut52e8f212015-07-19 07:27:06 +02001739 if (!ret)
1740 return 0;
Marek Vasut192d6f92015-07-19 05:26:49 +02001741
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001742 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasut192d6f92015-07-19 05:26:49 +02001743 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001744 rw_mgr_incr_vfifo(grp);
Marek Vasut192d6f92015-07-19 05:26:49 +02001745 if (!working)
1746 *p = 0;
1747 }
1748 }
1749
1750 return -EINVAL;
1751}
1752
Marek Vasut4c5e5842015-07-19 06:04:00 +02001753/**
1754 * sdr_working_phase() - Find working DQS enable phase
1755 * @grp: Read/Write group
1756 * @work_bgn: Working window start position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001757 * @d: dtaps output value
1758 * @p: DQS Phase Iterator
1759 * @i: Iterator
1760 *
1761 * Find working DQS enable phase setting.
1762 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001763static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp,
1764 u32 *work_bgn, u32 *d, u32 *p, u32 *i)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001765{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001766 const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1767 seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasut192d6f92015-07-19 05:26:49 +02001768 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001769
Marek Vasut192d6f92015-07-19 05:26:49 +02001770 *work_bgn = 0;
1771
1772 for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
1773 *i = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001774 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d);
1775 ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p);
Marek Vasut192d6f92015-07-19 05:26:49 +02001776 if (!ret)
1777 return 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001778 *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001779 }
1780
Marek Vasut38ed6922015-07-19 05:01:12 +02001781 /* Cannot find working solution */
Marek Vasutea9aa242016-04-04 21:21:05 +02001782 debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
Marek Vasut192d6f92015-07-19 05:26:49 +02001783 __func__, __LINE__);
1784 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001785}
1786
Marek Vasut4c5e5842015-07-19 06:04:00 +02001787/**
1788 * sdr_backup_phase() - Find DQS enable backup phase
1789 * @grp: Read/Write group
1790 * @work_bgn: Working window start position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001791 * @p: DQS Phase Iterator
1792 *
1793 * Find DQS enable backup phase setting.
1794 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001795static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp,
1796 u32 *work_bgn, u32 *p)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001797{
Marek Vasut96df6032015-07-19 07:35:36 +02001798 u32 tmp_delay, d;
Marek Vasut4c5e5842015-07-19 06:04:00 +02001799 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001800
1801 /* Special case code for backing up a phase */
1802 if (*p == 0) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001803 *p = seq->iocfg->dqs_en_phase_max;
1804 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001805 } else {
1806 (*p)--;
1807 }
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001808 tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap;
1809 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001810
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001811 for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
Marek Vasut139823e2015-08-02 19:47:01 +02001812 d++) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001813 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001814
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001815 ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
1816 PASS_ONE_BIT, 0);
Marek Vasut4c5e5842015-07-19 06:04:00 +02001817 if (ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001818 *work_bgn = tmp_delay;
1819 break;
1820 }
Marek Vasut49891df62015-07-19 05:48:30 +02001821
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001822 tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001823 }
1824
Marek Vasut4c5e5842015-07-19 06:04:00 +02001825 /* Restore VFIFO to old state before we decremented it (if needed). */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001826 (*p)++;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001827 if (*p > seq->iocfg->dqs_en_phase_max) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05001828 *p = 0;
Marek Vasut8c887b62015-07-19 06:37:51 +02001829 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001830 }
1831
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001832 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001833}
1834
Marek Vasut4c5e5842015-07-19 06:04:00 +02001835/**
1836 * sdr_nonworking_phase() - Find non-working DQS enable phase
1837 * @grp: Read/Write group
1838 * @work_end: Working window end position
Marek Vasut4c5e5842015-07-19 06:04:00 +02001839 * @p: DQS Phase Iterator
1840 * @i: Iterator
1841 *
1842 * Find non-working DQS enable phase setting.
1843 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001844static int sdr_nonworking_phase(struct socfpga_sdrseq *seq,
1845 const u32 grp, u32 *work_end, u32 *p, u32 *i)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001846{
Marek Vasut192d6f92015-07-19 05:26:49 +02001847 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001848
1849 (*p)++;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001850 *work_end += seq->iocfg->delay_per_opa_tap;
1851 if (*p > seq->iocfg->dqs_en_phase_max) {
Marek Vasut192d6f92015-07-19 05:26:49 +02001852 /* Fiddle with FIFO. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001853 *p = 0;
Marek Vasut8c887b62015-07-19 06:37:51 +02001854 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001855 }
1856
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001857 ret = sdr_find_phase(seq, 0, grp, work_end, i, p);
Marek Vasut192d6f92015-07-19 05:26:49 +02001858 if (ret) {
1859 /* Cannot see edge of failing read. */
Marek Vasutea9aa242016-04-04 21:21:05 +02001860 debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n",
Marek Vasut192d6f92015-07-19 05:26:49 +02001861 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001862 }
1863
Marek Vasut192d6f92015-07-19 05:26:49 +02001864 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001865}
1866
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001867/**
1868 * sdr_find_window_center() - Find center of the working DQS window.
1869 * @grp: Read/Write group
1870 * @work_bgn: First working settings
1871 * @work_end: Last working settings
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001872 *
1873 * Find center of the working DQS enable window.
1874 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001875static int sdr_find_window_center(struct socfpga_sdrseq *seq,
1876 const u32 grp, const u32 work_bgn,
Marek Vasut8c887b62015-07-19 06:37:51 +02001877 const u32 work_end)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001878{
Marek Vasut96df6032015-07-19 07:35:36 +02001879 u32 work_mid;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001880 int tmp_delay = 0;
Marek Vasut28fd2422015-07-19 02:56:59 +02001881 int i, p, d;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001882
Marek Vasut28fd2422015-07-19 02:56:59 +02001883 work_mid = (work_bgn + work_end) / 2;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001884
Marek Vasutea9aa242016-04-04 21:21:05 +02001885 debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n",
Marek Vasut28fd2422015-07-19 02:56:59 +02001886 work_bgn, work_end, work_mid);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001887 /* Get the middle delay to be less than a VFIFO delay */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001888 tmp_delay = (seq->iocfg->dqs_en_phase_max + 1)
1889 * seq->iocfg->delay_per_opa_tap;
Marek Vasut28fd2422015-07-19 02:56:59 +02001890
Marek Vasutea9aa242016-04-04 21:21:05 +02001891 debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay);
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001892 work_mid %= tmp_delay;
Marek Vasutea9aa242016-04-04 21:21:05 +02001893 debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001894
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001895 tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap);
1896 if (tmp_delay > seq->iocfg->dqs_en_phase_max
1897 * seq->iocfg->delay_per_opa_tap) {
1898 tmp_delay = seq->iocfg->dqs_en_phase_max
1899 * seq->iocfg->delay_per_opa_tap;
1900 }
1901 p = tmp_delay / seq->iocfg->delay_per_opa_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001902
Marek Vasutea9aa242016-04-04 21:21:05 +02001903 debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001904
Marek Vasut139823e2015-08-02 19:47:01 +02001905 d = DIV_ROUND_UP(work_mid - tmp_delay,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001906 seq->iocfg->delay_per_dqs_en_dchain_tap);
1907 if (d > seq->iocfg->dqs_en_delay_max)
1908 d = seq->iocfg->dqs_en_delay_max;
1909 tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap;
Marek Vasutcbb0b7e2015-07-19 04:04:33 +02001910
Marek Vasutea9aa242016-04-04 21:21:05 +02001911 debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
Marek Vasut28fd2422015-07-19 02:56:59 +02001912
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001913 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
1914 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001915
1916 /*
1917 * push vfifo until we can successfully calibrate. We can do this
1918 * because the largest possible margin in 1 VFIFO cycle.
1919 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001920 for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) {
Marek Vasutea9aa242016-04-04 21:21:05 +02001921 debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n");
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001922 if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1,
Dinh Nguyen3da42852015-06-02 22:52:49 -05001923 PASS_ONE_BIT,
Marek Vasut96df6032015-07-19 07:35:36 +02001924 0)) {
Marek Vasutea9aa242016-04-04 21:21:05 +02001925 debug_cond(DLEVEL >= 2,
Marek Vasut8c887b62015-07-19 06:37:51 +02001926 "%s:%d center: found: ptap=%u dtap=%u\n",
1927 __func__, __LINE__, p, d);
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001928 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001929 }
1930
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001931 /* Fiddle with FIFO. */
Marek Vasut8c887b62015-07-19 06:37:51 +02001932 rw_mgr_incr_vfifo(grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001933 }
1934
Marek Vasutea9aa242016-04-04 21:21:05 +02001935 debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n",
Marek Vasut0a13a0f2015-07-19 04:14:32 +02001936 __func__, __LINE__);
1937 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001938}
1939
Marek Vasut33756892015-07-20 09:11:09 +02001940/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001941 * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to
1942 * use
Marek Vasut33756892015-07-20 09:11:09 +02001943 * @grp: Read/Write Group
1944 *
1945 * Find a good DQS enable to use.
1946 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001947static int
1948rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq,
1949 const u32 grp)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001950{
Marek Vasut57355402015-07-20 09:20:20 +02001951 u32 d, p, i;
1952 u32 dtaps_per_ptap;
1953 u32 work_bgn, work_end;
Marek Vasut35e47b72015-08-10 23:01:43 +02001954 u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
Marek Vasut57355402015-07-20 09:20:20 +02001955 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001956
1957 debug("%s:%d %u\n", __func__, __LINE__, grp);
1958
1959 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1960
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001961 scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0);
1962 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001963
Marek Vasut2f3589c2015-07-19 02:42:21 +02001964 /* Step 0: Determine number of delay taps for each phase tap. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001965 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap /
1966 seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001967
Marek Vasut2f3589c2015-07-19 02:42:21 +02001968 /* Step 1: First push vfifo until we get a failing read. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001969 find_vfifo_failing_read(seq, grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001970
Marek Vasut2f3589c2015-07-19 02:42:21 +02001971 /* Step 2: Find first working phase, increment in ptaps. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001972 work_bgn = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001973 ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i);
Marek Vasut914546e2015-07-20 09:20:42 +02001974 if (ret)
1975 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001976
1977 work_end = work_bgn;
1978
1979 /*
Marek Vasut2f3589c2015-07-19 02:42:21 +02001980 * If d is 0 then the working window covers a phase tap and we can
1981 * follow the old procedure. Otherwise, we've found the beginning
Dinh Nguyen3da42852015-06-02 22:52:49 -05001982 * and we need to increment the dtaps until we find the end.
1983 */
1984 if (d == 0) {
Marek Vasut2f3589c2015-07-19 02:42:21 +02001985 /*
1986 * Step 3a: If we have room, back off by one and
1987 * increment in dtaps.
1988 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001989 sdr_backup_phase(seq, grp, &work_bgn, &p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001990
Marek Vasut2f3589c2015-07-19 02:42:21 +02001991 /*
1992 * Step 4a: go forward from working phase to non working
1993 * phase, increment in ptaps.
1994 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02001995 ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i);
Marek Vasut914546e2015-07-20 09:20:42 +02001996 if (ret)
1997 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001998
Marek Vasut2f3589c2015-07-19 02:42:21 +02001999 /* Step 5a: Back off one from last, increment in dtaps. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002000
2001 /* Special case code for backing up a phase */
2002 if (p == 0) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002003 p = seq->iocfg->dqs_en_phase_max;
2004 rw_mgr_decr_vfifo(seq, grp);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002005 } else {
2006 p = p - 1;
2007 }
2008
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002009 work_end -= seq->iocfg->delay_per_opa_tap;
2010 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002011
Dinh Nguyen3da42852015-06-02 22:52:49 -05002012 d = 0;
2013
Marek Vasutea9aa242016-04-04 21:21:05 +02002014 debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002015 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002016 }
2017
Marek Vasut2f3589c2015-07-19 02:42:21 +02002018 /* The dtap increment to find the failing edge is done here. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002019 sdr_find_phase_delay(seq, 0, 1, grp, &work_end,
2020 seq->iocfg->delay_per_dqs_en_dchain_tap, &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002021
2022 /* Go back to working dtap */
2023 if (d != 0)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002024 work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002025
Marek Vasutea9aa242016-04-04 21:21:05 +02002026 debug_cond(DLEVEL >= 2,
Marek Vasut2f3589c2015-07-19 02:42:21 +02002027 "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
2028 __func__, __LINE__, p, d - 1, work_end);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002029
2030 if (work_end < work_bgn) {
2031 /* nil range */
Marek Vasutea9aa242016-04-04 21:21:05 +02002032 debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002033 __func__, __LINE__);
Marek Vasut914546e2015-07-20 09:20:42 +02002034 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002035 }
2036
Marek Vasutea9aa242016-04-04 21:21:05 +02002037 debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05002038 __func__, __LINE__, work_bgn, work_end);
2039
Dinh Nguyen3da42852015-06-02 22:52:49 -05002040 /*
Marek Vasut2f3589c2015-07-19 02:42:21 +02002041 * We need to calculate the number of dtaps that equal a ptap.
2042 * To do that we'll back up a ptap and re-find the edge of the
2043 * window using dtaps
Dinh Nguyen3da42852015-06-02 22:52:49 -05002044 */
Marek Vasutea9aa242016-04-04 21:21:05 +02002045 debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002046 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002047
2048 /* Special case code for backing up a phase */
2049 if (p == 0) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002050 p = seq->iocfg->dqs_en_phase_max;
2051 rw_mgr_decr_vfifo(seq, grp);
Marek Vasutea9aa242016-04-04 21:21:05 +02002052 debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002053 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002054 } else {
2055 p = p - 1;
Marek Vasutea9aa242016-04-04 21:21:05 +02002056 debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002057 __func__, __LINE__, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002058 }
2059
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002060 scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002061
2062 /*
2063 * Increase dtap until we first see a passing read (in case the
Marek Vasut2f3589c2015-07-19 02:42:21 +02002064 * window is smaller than a ptap), and then a failing read to
2065 * mark the edge of the window again.
Dinh Nguyen3da42852015-06-02 22:52:49 -05002066 */
2067
Marek Vasut2f3589c2015-07-19 02:42:21 +02002068 /* Find a passing read. */
Marek Vasutea9aa242016-04-04 21:21:05 +02002069 debug_cond(DLEVEL >= 2, "%s:%d find passing read\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05002070 __func__, __LINE__);
Marek Vasut52e8f212015-07-19 07:27:06 +02002071
Dinh Nguyen3da42852015-06-02 22:52:49 -05002072 initial_failing_dtap = d;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002073
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002074 found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002075 if (found_passing_read) {
Marek Vasut2f3589c2015-07-19 02:42:21 +02002076 /* Find a failing read. */
Marek Vasutea9aa242016-04-04 21:21:05 +02002077 debug_cond(DLEVEL >= 2, "%s:%d find failing read\n",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002078 __func__, __LINE__);
Marek Vasut52e8f212015-07-19 07:27:06 +02002079 d++;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002080 found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL,
2081 0, &d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002082 } else {
Marek Vasutea9aa242016-04-04 21:21:05 +02002083 debug_cond(DLEVEL >= 1,
Marek Vasut2f3589c2015-07-19 02:42:21 +02002084 "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
2085 __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002086 }
2087
2088 /*
2089 * The dynamically calculated dtaps_per_ptap is only valid if we
2090 * found a passing/failing read. If we didn't, it means d hit the max
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002091 * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
Dinh Nguyen3da42852015-06-02 22:52:49 -05002092 * statically calculated value.
2093 */
2094 if (found_passing_read && found_failing_read)
2095 dtaps_per_ptap = d - initial_failing_dtap;
2096
Marek Vasut1273dd92015-07-12 21:05:08 +02002097 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
Marek Vasutea9aa242016-04-04 21:21:05 +02002098 debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
Marek Vasut2f3589c2015-07-19 02:42:21 +02002099 __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002100
Marek Vasut2f3589c2015-07-19 02:42:21 +02002101 /* Step 6: Find the centre of the window. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002102 ret = sdr_find_window_center(seq, grp, work_bgn, work_end);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002103
Marek Vasut914546e2015-07-20 09:20:42 +02002104 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002105}
2106
Marek Vasutc4907892015-07-13 02:11:02 +02002107/**
Marek Vasut901dc362015-07-13 02:48:34 +02002108 * search_stop_check() - Check if the detected edge is valid
2109 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2110 * @d: DQS delay
2111 * @rank_bgn: Rank number
2112 * @write_group: Write Group
2113 * @read_group: Read Group
2114 * @bit_chk: Resulting bit mask after the test
2115 * @sticky_bit_chk: Resulting sticky bit mask after the test
2116 * @use_read_test: Perform read test
2117 *
2118 * Test if the found edge is valid.
2119 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002120static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write,
2121 const int d, const int rank_bgn,
Marek Vasut901dc362015-07-13 02:48:34 +02002122 const u32 write_group, const u32 read_group,
2123 u32 *bit_chk, u32 *sticky_bit_chk,
2124 const u32 use_read_test)
2125{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002126 const u32 ratio = seq->rwcfg->mem_if_read_dqs_width /
2127 seq->rwcfg->mem_if_write_dqs_width;
2128 const u32 correct_mask = write ? seq->param.write_correct_mask :
2129 seq->param.read_correct_mask;
2130 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2131 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut901dc362015-07-13 02:48:34 +02002132 u32 ret;
2133 /*
2134 * Stop searching when the read test doesn't pass AND when
2135 * we've seen a passing read on every bit.
2136 */
2137 if (write) { /* WRITE-ONLY */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002138 ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn,
2139 write_group, 0,
2140 PASS_ONE_BIT, bit_chk,
2141 0);
Marek Vasut901dc362015-07-13 02:48:34 +02002142 } else if (use_read_test) { /* READ-ONLY */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002143 ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group,
Marek Vasut901dc362015-07-13 02:48:34 +02002144 NUM_READ_PB_TESTS,
2145 PASS_ONE_BIT, bit_chk,
2146 0, 0);
2147 } else { /* READ-ONLY */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002148 rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0,
Marek Vasut901dc362015-07-13 02:48:34 +02002149 PASS_ONE_BIT, bit_chk, 0);
2150 *bit_chk = *bit_chk >> (per_dqs *
2151 (read_group - (write_group * ratio)));
2152 ret = (*bit_chk == 0);
2153 }
2154 *sticky_bit_chk = *sticky_bit_chk | *bit_chk;
2155 ret = ret && (*sticky_bit_chk == correct_mask);
Marek Vasutea9aa242016-04-04 21:21:05 +02002156 debug_cond(DLEVEL >= 2,
Marek Vasut901dc362015-07-13 02:48:34 +02002157 "%s:%d center(left): dtap=%u => %u == %u && %u",
2158 __func__, __LINE__, d,
2159 *sticky_bit_chk, correct_mask, ret);
2160 return ret;
2161}
2162
2163/**
Marek Vasut71120772015-07-13 02:38:15 +02002164 * search_left_edge() - Find left edge of DQ/DQS working phase
2165 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2166 * @rank_bgn: Rank number
2167 * @write_group: Write Group
2168 * @read_group: Read Group
2169 * @test_bgn: Rank number to begin the test
Marek Vasut71120772015-07-13 02:38:15 +02002170 * @sticky_bit_chk: Resulting sticky bit mask after the test
2171 * @left_edge: Left edge of the DQ/DQS phase
2172 * @right_edge: Right edge of the DQ/DQS phase
2173 * @use_read_test: Perform read test
2174 *
2175 * Find left edge of DQ/DQS working phase.
2176 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002177static void search_left_edge(struct socfpga_sdrseq *seq, const int write,
2178 const int rank_bgn, const u32 write_group,
2179 const u32 read_group, const u32 test_bgn,
2180 u32 *sticky_bit_chk, int *left_edge,
2181 int *right_edge, const u32 use_read_test)
Marek Vasut71120772015-07-13 02:38:15 +02002182{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002183 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2184 seq->iocfg->io_in_delay_max;
2185 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2186 seq->iocfg->dqs_in_delay_max;
2187 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2188 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut0c4be192015-07-18 20:34:00 +02002189 u32 stop, bit_chk;
Marek Vasut71120772015-07-13 02:38:15 +02002190 int i, d;
2191
2192 for (d = 0; d <= dqs_max; d++) {
2193 if (write)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002194 scc_mgr_apply_group_dq_out1_delay(seq, d);
Marek Vasut71120772015-07-13 02:38:15 +02002195 else
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002196 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d);
Marek Vasut71120772015-07-13 02:38:15 +02002197
2198 writel(0, &sdr_scc_mgr->update);
2199
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002200 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasut0c4be192015-07-18 20:34:00 +02002201 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut901dc362015-07-13 02:48:34 +02002202 use_read_test);
Marek Vasut71120772015-07-13 02:38:15 +02002203 if (stop == 1)
2204 break;
2205
2206 /* stop != 1 */
2207 for (i = 0; i < per_dqs; i++) {
Marek Vasut0c4be192015-07-18 20:34:00 +02002208 if (bit_chk & 1) {
Marek Vasut71120772015-07-13 02:38:15 +02002209 /*
2210 * Remember a passing test as
2211 * the left_edge.
2212 */
2213 left_edge[i] = d;
2214 } else {
2215 /*
2216 * If a left edge has not been seen
2217 * yet, then a future passing test
2218 * will mark this edge as the right
2219 * edge.
2220 */
2221 if (left_edge[i] == delay_max + 1)
2222 right_edge[i] = -(d + 1);
2223 }
Marek Vasut0c4be192015-07-18 20:34:00 +02002224 bit_chk >>= 1;
Marek Vasut71120772015-07-13 02:38:15 +02002225 }
2226 }
2227
2228 /* Reset DQ delay chains to 0 */
2229 if (write)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002230 scc_mgr_apply_group_dq_out1_delay(seq, 0);
Marek Vasut71120772015-07-13 02:38:15 +02002231 else
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002232 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasut71120772015-07-13 02:38:15 +02002233
2234 *sticky_bit_chk = 0;
2235 for (i = per_dqs - 1; i >= 0; i--) {
Marek Vasutea9aa242016-04-04 21:21:05 +02002236 debug_cond(DLEVEL >= 2,
Marek Vasut71120772015-07-13 02:38:15 +02002237 "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
2238 __func__, __LINE__, i, left_edge[i],
2239 i, right_edge[i]);
2240
2241 /*
2242 * Check for cases where we haven't found the left edge,
2243 * which makes our assignment of the the right edge invalid.
2244 * Reset it to the illegal value.
2245 */
2246 if ((left_edge[i] == delay_max + 1) &&
2247 (right_edge[i] != delay_max + 1)) {
2248 right_edge[i] = delay_max + 1;
Marek Vasutea9aa242016-04-04 21:21:05 +02002249 debug_cond(DLEVEL >= 2,
Marek Vasut71120772015-07-13 02:38:15 +02002250 "%s:%d vfifo_center: reset right_edge[%u]: %d\n",
2251 __func__, __LINE__, i, right_edge[i]);
2252 }
2253
2254 /*
2255 * Reset sticky bit
2256 * READ: except for bits where we have seen both
2257 * the left and right edge.
2258 * WRITE: except for bits where we have seen the
2259 * left edge.
2260 */
2261 *sticky_bit_chk <<= 1;
2262 if (write) {
2263 if (left_edge[i] != delay_max + 1)
2264 *sticky_bit_chk |= 1;
2265 } else {
2266 if ((left_edge[i] != delay_max + 1) &&
2267 (right_edge[i] != delay_max + 1))
2268 *sticky_bit_chk |= 1;
2269 }
2270 }
Marek Vasut71120772015-07-13 02:38:15 +02002271}
2272
2273/**
Marek Vasutc4907892015-07-13 02:11:02 +02002274 * search_right_edge() - Find right edge of DQ/DQS working phase
2275 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2276 * @rank_bgn: Rank number
2277 * @write_group: Write Group
2278 * @read_group: Read Group
2279 * @start_dqs: DQS start phase
2280 * @start_dqs_en: DQS enable start phase
Marek Vasutc4907892015-07-13 02:11:02 +02002281 * @sticky_bit_chk: Resulting sticky bit mask after the test
2282 * @left_edge: Left edge of the DQ/DQS phase
2283 * @right_edge: Right edge of the DQ/DQS phase
2284 * @use_read_test: Perform read test
2285 *
2286 * Find right edge of DQ/DQS working phase.
2287 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002288static int search_right_edge(struct socfpga_sdrseq *seq, const int write,
2289 const int rank_bgn, const u32 write_group,
2290 const u32 read_group, const int start_dqs,
2291 const int start_dqs_en, u32 *sticky_bit_chk,
2292 int *left_edge, int *right_edge,
2293 const u32 use_read_test)
Marek Vasutc4907892015-07-13 02:11:02 +02002294{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002295 const u32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2296 seq->iocfg->io_in_delay_max;
2297 const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max :
2298 seq->iocfg->dqs_in_delay_max;
2299 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2300 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut0c4be192015-07-18 20:34:00 +02002301 u32 stop, bit_chk;
Marek Vasutc4907892015-07-13 02:11:02 +02002302 int i, d;
2303
2304 for (d = 0; d <= dqs_max - start_dqs; d++) {
2305 if (write) { /* WRITE-ONLY */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002306 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
2307 write_group,
Marek Vasutc4907892015-07-13 02:11:02 +02002308 d + start_dqs);
2309 } else { /* READ-ONLY */
2310 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002311 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut5ded7322015-08-02 19:42:26 +02002312 u32 delay = d + start_dqs_en;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002313 if (delay > seq->iocfg->dqs_en_delay_max)
2314 delay = seq->iocfg->dqs_en_delay_max;
Marek Vasutc4907892015-07-13 02:11:02 +02002315 scc_mgr_set_dqs_en_delay(read_group, delay);
2316 }
2317 scc_mgr_load_dqs(read_group);
2318 }
2319
2320 writel(0, &sdr_scc_mgr->update);
2321
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002322 stop = search_stop_check(seq, write, d, rank_bgn, write_group,
Marek Vasut0c4be192015-07-18 20:34:00 +02002323 read_group, &bit_chk, sticky_bit_chk,
Marek Vasut901dc362015-07-13 02:48:34 +02002324 use_read_test);
Marek Vasutc4907892015-07-13 02:11:02 +02002325 if (stop == 1) {
2326 if (write && (d == 0)) { /* WRITE-ONLY */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002327 for (i = 0;
2328 i < seq->rwcfg->mem_dq_per_write_dqs;
Marek Vasut139823e2015-08-02 19:47:01 +02002329 i++) {
Marek Vasutc4907892015-07-13 02:11:02 +02002330 /*
2331 * d = 0 failed, but it passed when
2332 * testing the left edge, so it must be
2333 * marginal, set it to -1
2334 */
2335 if (right_edge[i] == delay_max + 1 &&
2336 left_edge[i] != delay_max + 1)
2337 right_edge[i] = -1;
2338 }
2339 }
2340 break;
2341 }
2342
2343 /* stop != 1 */
2344 for (i = 0; i < per_dqs; i++) {
Marek Vasut0c4be192015-07-18 20:34:00 +02002345 if (bit_chk & 1) {
Marek Vasutc4907892015-07-13 02:11:02 +02002346 /*
2347 * Remember a passing test as
2348 * the right_edge.
2349 */
2350 right_edge[i] = d;
2351 } else {
2352 if (d != 0) {
2353 /*
2354 * If a right edge has not
2355 * been seen yet, then a future
2356 * passing test will mark this
2357 * edge as the left edge.
2358 */
2359 if (right_edge[i] == delay_max + 1)
2360 left_edge[i] = -(d + 1);
2361 } else {
2362 /*
2363 * d = 0 failed, but it passed
2364 * when testing the left edge,
2365 * so it must be marginal, set
2366 * it to -1
2367 */
2368 if (right_edge[i] == delay_max + 1 &&
2369 left_edge[i] != delay_max + 1)
2370 right_edge[i] = -1;
2371 /*
2372 * If a right edge has not been
2373 * seen yet, then a future
2374 * passing test will mark this
2375 * edge as the left edge.
2376 */
2377 else if (right_edge[i] == delay_max + 1)
2378 left_edge[i] = -(d + 1);
2379 }
2380 }
2381
Marek Vasutea9aa242016-04-04 21:21:05 +02002382 debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ",
Marek Vasutc4907892015-07-13 02:11:02 +02002383 __func__, __LINE__, d);
Marek Vasutea9aa242016-04-04 21:21:05 +02002384 debug_cond(DLEVEL >= 2,
Marek Vasutc4907892015-07-13 02:11:02 +02002385 "bit_chk_test=%i left_edge[%u]: %d ",
Marek Vasut0c4be192015-07-18 20:34:00 +02002386 bit_chk & 1, i, left_edge[i]);
Marek Vasutea9aa242016-04-04 21:21:05 +02002387 debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i,
Marek Vasutc4907892015-07-13 02:11:02 +02002388 right_edge[i]);
Marek Vasut0c4be192015-07-18 20:34:00 +02002389 bit_chk >>= 1;
Marek Vasutc4907892015-07-13 02:11:02 +02002390 }
2391 }
2392
2393 /* Check that all bits have a window */
2394 for (i = 0; i < per_dqs; i++) {
Marek Vasutea9aa242016-04-04 21:21:05 +02002395 debug_cond(DLEVEL >= 2,
Marek Vasutc4907892015-07-13 02:11:02 +02002396 "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
2397 __func__, __LINE__, i, left_edge[i],
2398 i, right_edge[i]);
2399 if ((left_edge[i] == dqs_max + 1) ||
2400 (right_edge[i] == dqs_max + 1))
2401 return i + 1; /* FIXME: If we fail, retval > 0 */
2402 }
2403
2404 return 0;
2405}
2406
Marek Vasutafb3eb82015-07-18 19:18:06 +02002407/**
2408 * get_window_mid_index() - Find the best middle setting of DQ/DQS phase
2409 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2410 * @left_edge: Left edge of the DQ/DQS phase
2411 * @right_edge: Right edge of the DQ/DQS phase
2412 * @mid_min: Best DQ/DQS phase middle setting
2413 *
2414 * Find index and value of the middle of the DQ/DQS working phase.
2415 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002416static int get_window_mid_index(struct socfpga_sdrseq *seq,
2417 const int write, int *left_edge,
Marek Vasutafb3eb82015-07-18 19:18:06 +02002418 int *right_edge, int *mid_min)
2419{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002420 const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2421 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasutafb3eb82015-07-18 19:18:06 +02002422 int i, mid, min_index;
2423
2424 /* Find middle of window for each DQ bit */
2425 *mid_min = left_edge[0] - right_edge[0];
2426 min_index = 0;
2427 for (i = 1; i < per_dqs; i++) {
2428 mid = left_edge[i] - right_edge[i];
2429 if (mid < *mid_min) {
2430 *mid_min = mid;
2431 min_index = i;
2432 }
2433 }
2434
2435 /*
2436 * -mid_min/2 represents the amount that we need to move DQS.
2437 * If mid_min is odd and positive we'll need to add one to make
2438 * sure the rounding in further calculations is correct (always
2439 * bias to the right), so just add 1 for all positive values.
2440 */
2441 if (*mid_min > 0)
2442 (*mid_min)++;
2443 *mid_min = *mid_min / 2;
2444
Marek Vasutea9aa242016-04-04 21:21:05 +02002445 debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
Marek Vasutafb3eb82015-07-18 19:18:06 +02002446 __func__, __LINE__, *mid_min, min_index);
2447 return min_index;
2448}
2449
Marek Vasutffb8b662015-07-18 19:46:26 +02002450/**
2451 * center_dq_windows() - Center the DQ/DQS windows
2452 * @write: Perform read (Stage 2) or write (Stage 3) calibration
2453 * @left_edge: Left edge of the DQ/DQS phase
2454 * @right_edge: Right edge of the DQ/DQS phase
2455 * @mid_min: Adjusted DQ/DQS phase middle setting
2456 * @orig_mid_min: Original DQ/DQS phase middle setting
2457 * @min_index: DQ/DQS phase middle setting index
2458 * @test_bgn: Rank number to begin the test
2459 * @dq_margin: Amount of shift for the DQ
2460 * @dqs_margin: Amount of shift for the DQS
2461 *
2462 * Align the DQ/DQS windows in each group.
2463 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002464static void center_dq_windows(struct socfpga_sdrseq *seq,
2465 const int write, int *left_edge, int *right_edge,
Marek Vasutffb8b662015-07-18 19:46:26 +02002466 const int mid_min, const int orig_mid_min,
2467 const int min_index, const int test_bgn,
2468 int *dq_margin, int *dqs_margin)
2469{
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002470 const s32 delay_max = write ? seq->iocfg->io_out1_delay_max :
2471 seq->iocfg->io_in_delay_max;
2472 const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs :
2473 seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasute026b982016-04-05 23:17:35 +02002474 const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
Marek Vasutffb8b662015-07-18 19:46:26 +02002475 SCC_MGR_IO_IN_DELAY_OFFSET;
Marek Vasute026b982016-04-05 23:17:35 +02002476 const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
Marek Vasutffb8b662015-07-18 19:46:26 +02002477
Marek Vasute026b982016-04-05 23:17:35 +02002478 s32 temp_dq_io_delay1;
Marek Vasutffb8b662015-07-18 19:46:26 +02002479 int shift_dq, i, p;
2480
2481 /* Initialize data for export structures */
2482 *dqs_margin = delay_max + 1;
2483 *dq_margin = delay_max + 1;
2484
2485 /* add delay to bring centre of all DQ windows to the same "level" */
2486 for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
2487 /* Use values before divide by 2 to reduce round off error */
2488 shift_dq = (left_edge[i] - right_edge[i] -
2489 (left_edge[min_index] - right_edge[min_index]))/2 +
2490 (orig_mid_min - mid_min);
2491
Marek Vasutea9aa242016-04-04 21:21:05 +02002492 debug_cond(DLEVEL >= 2,
Marek Vasutffb8b662015-07-18 19:46:26 +02002493 "vfifo_center: before: shift_dq[%u]=%d\n",
2494 i, shift_dq);
2495
Marek Vasute026b982016-04-05 23:17:35 +02002496 temp_dq_io_delay1 = readl(addr + (i << 2));
Marek Vasutffb8b662015-07-18 19:46:26 +02002497
2498 if (shift_dq + temp_dq_io_delay1 > delay_max)
Marek Vasute026b982016-04-05 23:17:35 +02002499 shift_dq = delay_max - temp_dq_io_delay1;
Marek Vasutffb8b662015-07-18 19:46:26 +02002500 else if (shift_dq + temp_dq_io_delay1 < 0)
2501 shift_dq = -temp_dq_io_delay1;
2502
Marek Vasutea9aa242016-04-04 21:21:05 +02002503 debug_cond(DLEVEL >= 2,
Marek Vasutffb8b662015-07-18 19:46:26 +02002504 "vfifo_center: after: shift_dq[%u]=%d\n",
2505 i, shift_dq);
2506
2507 if (write)
Marek Vasut139823e2015-08-02 19:47:01 +02002508 scc_mgr_set_dq_out1_delay(i,
2509 temp_dq_io_delay1 + shift_dq);
Marek Vasutffb8b662015-07-18 19:46:26 +02002510 else
Marek Vasut139823e2015-08-02 19:47:01 +02002511 scc_mgr_set_dq_in_delay(p,
2512 temp_dq_io_delay1 + shift_dq);
Marek Vasutffb8b662015-07-18 19:46:26 +02002513
2514 scc_mgr_load_dq(p);
2515
Marek Vasutea9aa242016-04-04 21:21:05 +02002516 debug_cond(DLEVEL >= 2,
Marek Vasutffb8b662015-07-18 19:46:26 +02002517 "vfifo_center: margin[%u]=[%d,%d]\n", i,
2518 left_edge[i] - shift_dq + (-mid_min),
2519 right_edge[i] + shift_dq - (-mid_min));
2520
2521 /* To determine values for export structures */
2522 if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
2523 *dq_margin = left_edge[i] - shift_dq + (-mid_min);
2524
2525 if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
2526 *dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2527 }
Marek Vasutffb8b662015-07-18 19:46:26 +02002528}
2529
Marek Vasutac63b9a2015-07-21 04:27:32 +02002530/**
2531 * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
2532 * @rank_bgn: Rank number
2533 * @rw_group: Read/Write Group
2534 * @test_bgn: Rank at which the test begins
2535 * @use_read_test: Perform a read test
2536 * @update_fom: Update FOM
2537 *
2538 * Per-bit deskew DQ and centering.
2539 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002540static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq,
2541 const u32 rank_bgn,
2542 const u32 rw_group,
2543 const u32 test_bgn,
2544 const int use_read_test,
2545 const int update_fom)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002546{
Marek Vasut5d6db442015-07-18 19:57:12 +02002547 const u32 addr =
2548 SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
Marek Vasut0113c3e2015-07-18 20:42:27 +02002549 (rw_group << 2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002550 /*
2551 * Store these as signed since there are comparisons with
2552 * signed numbers.
2553 */
Marek Vasut5ded7322015-08-02 19:42:26 +02002554 u32 sticky_bit_chk;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002555 s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs];
2556 s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs];
2557 s32 orig_mid_min, mid_min;
2558 s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
2559 s32 dq_margin, dqs_margin;
Marek Vasut5d6db442015-07-18 19:57:12 +02002560 int i, min_index;
Marek Vasutc4907892015-07-13 02:11:02 +02002561 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002562
Marek Vasut0113c3e2015-07-18 20:42:27 +02002563 debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002564
Marek Vasut5d6db442015-07-18 19:57:12 +02002565 start_dqs = readl(addr);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002566 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
2567 start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002568
2569 /* set the left and right edge of each bit to an illegal value */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002570 /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002571 sticky_bit_chk = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002572 for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) {
2573 left_edge[i] = seq->iocfg->io_in_delay_max + 1;
2574 right_edge[i] = seq->iocfg->io_in_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002575 }
2576
Dinh Nguyen3da42852015-06-02 22:52:49 -05002577 /* Search for the left edge of the window for each bit */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002578 search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn,
Marek Vasut0c4be192015-07-18 20:34:00 +02002579 &sticky_bit_chk,
Marek Vasut71120772015-07-13 02:38:15 +02002580 left_edge, right_edge, use_read_test);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002581
Marek Vasutf0712c32015-07-18 08:01:45 +02002582
Dinh Nguyen3da42852015-06-02 22:52:49 -05002583 /* Search for the right edge of the window for each bit */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002584 ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group,
Marek Vasutc4907892015-07-13 02:11:02 +02002585 start_dqs, start_dqs_en,
Marek Vasut0c4be192015-07-18 20:34:00 +02002586 &sticky_bit_chk,
Marek Vasutc4907892015-07-13 02:11:02 +02002587 left_edge, right_edge, use_read_test);
2588 if (ret) {
2589 /*
2590 * Restore delay chain settings before letting the loop
2591 * in rw_mgr_mem_calibrate_vfifo to retry different
2592 * dqs/ck relationships.
2593 */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002594 scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002595 if (seq->iocfg->shift_dqs_en_when_shift_dqs)
Marek Vasut0113c3e2015-07-18 20:42:27 +02002596 scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002597
Marek Vasut0113c3e2015-07-18 20:42:27 +02002598 scc_mgr_load_dqs(rw_group);
Marek Vasut1273dd92015-07-12 21:05:08 +02002599 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002600
Marek Vasutea9aa242016-04-04 21:21:05 +02002601 debug_cond(DLEVEL >= 1,
Marek Vasutc4907892015-07-13 02:11:02 +02002602 "%s:%d vfifo_center: failed to find edge [%u]: %d %d",
2603 __func__, __LINE__, i, left_edge[i], right_edge[i]);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002604 if (use_read_test) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002605 set_failing_group_stage(seq, rw_group *
2606 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutc4907892015-07-13 02:11:02 +02002607 CAL_STAGE_VFIFO,
2608 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002609 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002610 set_failing_group_stage(seq, rw_group *
2611 seq->rwcfg->mem_dq_per_read_dqs + i,
Marek Vasutc4907892015-07-13 02:11:02 +02002612 CAL_STAGE_VFIFO_AFTER_WRITES,
2613 CAL_SUBSTAGE_VFIFO_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002614 }
Marek Vasut98668242015-07-18 20:44:28 +02002615 return -EIO;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002616 }
2617
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002618 min_index = get_window_mid_index(seq, 0, left_edge, right_edge,
2619 &mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002620
2621 /* Determine the amount we can change DQS (which is -mid_min) */
2622 orig_mid_min = mid_min;
2623 new_dqs = start_dqs - mid_min;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002624 if (new_dqs > seq->iocfg->dqs_in_delay_max)
2625 new_dqs = seq->iocfg->dqs_in_delay_max;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002626 else if (new_dqs < 0)
2627 new_dqs = 0;
2628
2629 mid_min = start_dqs - new_dqs;
Marek Vasutea9aa242016-04-04 21:21:05 +02002630 debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -05002631 mid_min, new_dqs);
2632
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002633 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
2634 if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max)
Marek Vasut139823e2015-08-02 19:47:01 +02002635 mid_min += start_dqs_en - mid_min -
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002636 seq->iocfg->dqs_en_delay_max;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002637 else if (start_dqs_en - mid_min < 0)
2638 mid_min += start_dqs_en - mid_min;
2639 }
2640 new_dqs = start_dqs - mid_min;
2641
Marek Vasutea9aa242016-04-04 21:21:05 +02002642 debug_cond(DLEVEL >= 1,
Marek Vasutf0712c32015-07-18 08:01:45 +02002643 "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
2644 start_dqs,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002645 seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
Dinh Nguyen3da42852015-06-02 22:52:49 -05002646 new_dqs, mid_min);
2647
Marek Vasutffb8b662015-07-18 19:46:26 +02002648 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002649 center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasutffb8b662015-07-18 19:46:26 +02002650 min_index, test_bgn, &dq_margin, &dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002651
Dinh Nguyen3da42852015-06-02 22:52:49 -05002652 /* Move DQS-en */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002653 if (seq->iocfg->shift_dqs_en_when_shift_dqs) {
Marek Vasut5d6db442015-07-18 19:57:12 +02002654 final_dqs_en = start_dqs_en - mid_min;
Marek Vasut0113c3e2015-07-18 20:42:27 +02002655 scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
2656 scc_mgr_load_dqs(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002657 }
2658
2659 /* Move DQS */
Marek Vasut0113c3e2015-07-18 20:42:27 +02002660 scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
2661 scc_mgr_load_dqs(rw_group);
Marek Vasutea9aa242016-04-04 21:21:05 +02002662 debug_cond(DLEVEL >= 2,
Marek Vasutf0712c32015-07-18 08:01:45 +02002663 "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
2664 __func__, __LINE__, dq_margin, dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002665
2666 /*
2667 * Do not remove this line as it makes sure all of our decisions
2668 * have been applied. Apply the update bit.
2669 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002670 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002671
Marek Vasut98668242015-07-18 20:44:28 +02002672 if ((dq_margin < 0) || (dqs_margin < 0))
2673 return -EINVAL;
2674
2675 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002676}
2677
Marek Vasutbce24ef2015-07-17 03:16:45 +02002678/**
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002679 * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the
2680 * device
Marek Vasut04372fb2015-07-18 02:46:56 +02002681 * @rw_group: Read/Write Group
2682 * @phase: DQ/DQS phase
2683 *
2684 * Because initially no communication ca be reliably performed with the memory
2685 * device, the sequencer uses a guaranteed write mechanism to write data into
2686 * the memory device.
2687 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002688static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq,
2689 const u32 rw_group,
Marek Vasut04372fb2015-07-18 02:46:56 +02002690 const u32 phase)
2691{
Marek Vasut04372fb2015-07-18 02:46:56 +02002692 int ret;
2693
2694 /* Set a particular DQ/DQS phase. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002695 scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase);
Marek Vasut04372fb2015-07-18 02:46:56 +02002696
Marek Vasutea9aa242016-04-04 21:21:05 +02002697 debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n",
Marek Vasut04372fb2015-07-18 02:46:56 +02002698 __func__, __LINE__, rw_group, phase);
2699
2700 /*
2701 * Altera EMI_RM 2015.05.04 :: Figure 1-25
2702 * Load up the patterns used by read calibration using the
2703 * current DQDQS phase.
2704 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002705 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Marek Vasut04372fb2015-07-18 02:46:56 +02002706
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002707 if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
Marek Vasut04372fb2015-07-18 02:46:56 +02002708 return 0;
2709
2710 /*
2711 * Altera EMI_RM 2015.05.04 :: Figure 1-26
2712 * Back-to-Back reads of the patterns used for calibration.
2713 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002714 ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1);
Marek Vasutd844c7d2015-07-18 03:55:07 +02002715 if (ret)
Marek Vasutea9aa242016-04-04 21:21:05 +02002716 debug_cond(DLEVEL >= 1,
Marek Vasut04372fb2015-07-18 02:46:56 +02002717 "%s:%d Guaranteed read test failed: g=%u p=%u\n",
2718 __func__, __LINE__, rw_group, phase);
Marek Vasutd844c7d2015-07-18 03:55:07 +02002719 return ret;
Marek Vasut04372fb2015-07-18 02:46:56 +02002720}
2721
2722/**
Marek Vasutf09da112015-07-18 02:57:32 +02002723 * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
2724 * @rw_group: Read/Write Group
2725 * @test_bgn: Rank at which the test begins
2726 *
2727 * DQS enable calibration ensures reliable capture of the DQ signal without
2728 * glitches on the DQS line.
2729 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002730static int
2731rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq,
2732 const u32 rw_group,
2733 const u32 test_bgn)
Marek Vasutf09da112015-07-18 02:57:32 +02002734{
Marek Vasutf09da112015-07-18 02:57:32 +02002735 /*
2736 * Altera EMI_RM 2015.05.04 :: Figure 1-27
2737 * DQS and DQS Eanble Signal Relationships.
2738 */
Marek Vasut28ea8272015-07-18 04:28:42 +02002739
2740 /* We start at zero, so have one less dq to devide among */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002741 const u32 delay_step = seq->iocfg->io_in_delay_max /
2742 (seq->rwcfg->mem_dq_per_read_dqs - 1);
Marek Vasut914546e2015-07-20 09:20:42 +02002743 int ret;
Marek Vasut28ea8272015-07-18 04:28:42 +02002744 u32 i, p, d, r;
2745
2746 debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
2747
2748 /* Try different dq_in_delays since the DQ path is shorter than DQS. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002749 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut28ea8272015-07-18 04:28:42 +02002750 r += NUM_RANKS_PER_SHADOW_REG) {
2751 for (i = 0, p = test_bgn, d = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002752 i < seq->rwcfg->mem_dq_per_read_dqs;
Marek Vasut28ea8272015-07-18 04:28:42 +02002753 i++, p++, d += delay_step) {
Marek Vasutea9aa242016-04-04 21:21:05 +02002754 debug_cond(DLEVEL >= 1,
Marek Vasut28ea8272015-07-18 04:28:42 +02002755 "%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
2756 __func__, __LINE__, rw_group, r, i, p, d);
2757
2758 scc_mgr_set_dq_in_delay(p, d);
2759 scc_mgr_load_dq(p);
2760 }
2761
2762 writel(0, &sdr_scc_mgr->update);
2763 }
2764
2765 /*
2766 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
2767 * dq_in_delay values
2768 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002769 ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group);
Marek Vasut28ea8272015-07-18 04:28:42 +02002770
Marek Vasutea9aa242016-04-04 21:21:05 +02002771 debug_cond(DLEVEL >= 1,
Marek Vasut28ea8272015-07-18 04:28:42 +02002772 "%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
Marek Vasut914546e2015-07-20 09:20:42 +02002773 __func__, __LINE__, rw_group, !ret);
Marek Vasut28ea8272015-07-18 04:28:42 +02002774
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002775 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut28ea8272015-07-18 04:28:42 +02002776 r += NUM_RANKS_PER_SHADOW_REG) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002777 scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0);
Marek Vasut28ea8272015-07-18 04:28:42 +02002778 writel(0, &sdr_scc_mgr->update);
2779 }
2780
Marek Vasut914546e2015-07-20 09:20:42 +02002781 return ret;
Marek Vasutf09da112015-07-18 02:57:32 +02002782}
2783
2784/**
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002785 * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
2786 * @rw_group: Read/Write Group
2787 * @test_bgn: Rank at which the test begins
2788 * @use_read_test: Perform a read test
2789 * @update_fom: Update FOM
2790 *
2791 * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
2792 * within a group.
2793 */
2794static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002795rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq,
2796 const u32 rw_group, const u32 test_bgn,
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002797 const int use_read_test,
2798 const int update_fom)
2799
2800{
2801 int ret, grp_calibrated;
2802 u32 rank_bgn, sr;
2803
2804 /*
2805 * Altera EMI_RM 2015.05.04 :: Figure 1-28
2806 * Read per-bit deskew can be done on a per shadow register basis.
2807 */
2808 grp_calibrated = 1;
2809 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002810 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002811 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002812 ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group,
Marek Vasut0113c3e2015-07-18 20:42:27 +02002813 test_bgn,
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002814 use_read_test,
2815 update_fom);
Marek Vasut98668242015-07-18 20:44:28 +02002816 if (!ret)
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002817 continue;
2818
2819 grp_calibrated = 0;
2820 }
2821
2822 if (!grp_calibrated)
2823 return -EIO;
2824
2825 return 0;
2826}
2827
2828/**
Marek Vasutbce24ef2015-07-17 03:16:45 +02002829 * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
2830 * @rw_group: Read/Write Group
2831 * @test_bgn: Rank at which the test begins
Dinh Nguyen3da42852015-06-02 22:52:49 -05002832 *
Marek Vasutbce24ef2015-07-17 03:16:45 +02002833 * Stage 1: Calibrate the read valid prediction FIFO.
2834 *
2835 * This function implements UniPHY calibration Stage 1, as explained in
2836 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2837 *
2838 * - read valid prediction will consist of finding:
2839 * - DQS enable phase and DQS enable delay (DQS Enable Calibration)
2840 * - DQS input phase and DQS input delay (DQ/DQS Centering)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002841 * - we also do a per-bit deskew on the DQ lines.
2842 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002843static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq,
2844 const u32 rw_group, const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002845{
Marek Vasut5ded7322015-08-02 19:42:26 +02002846 u32 p, d;
2847 u32 dtaps_per_ptap;
2848 u32 failed_substage;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002849
Marek Vasut04372fb2015-07-18 02:46:56 +02002850 int ret;
2851
Marek Vasutc336ca32015-07-17 04:24:18 +02002852 debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002853
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002854 /* Update info for sims */
2855 reg_file_set_group(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002856 reg_file_set_stage(CAL_STAGE_VFIFO);
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002857 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002858
Marek Vasut7c0a9df2015-07-18 03:15:34 +02002859 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2860
2861 /* USER Determine number of delay taps for each phase tap. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002862 dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
2863 seq->iocfg->delay_per_dqs_en_dchain_tap)
2864 - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002865
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002866 for (d = 0; d <= dtaps_per_ptap; d += 2) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05002867 /*
2868 * In RLDRAMX we may be messing the delay of pins in
Marek Vasutc336ca32015-07-17 04:24:18 +02002869 * the same write rw_group but outside of the current read
2870 * the rw_group, but that's ok because we haven't calibrated
Marek Vasutac70d2f2015-07-17 03:44:26 +02002871 * output side yet.
Dinh Nguyen3da42852015-06-02 22:52:49 -05002872 */
2873 if (d > 0) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002874 scc_mgr_apply_group_all_out_delay_add_all_ranks(seq,
2875 rw_group,
2876 d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002877 }
2878
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002879 for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) {
Marek Vasut04372fb2015-07-18 02:46:56 +02002880 /* 1) Guaranteed Write */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002881 ret = rw_mgr_mem_calibrate_guaranteed_write(seq,
2882 rw_group,
2883 p);
Marek Vasut04372fb2015-07-18 02:46:56 +02002884 if (ret)
2885 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002886
Marek Vasutf09da112015-07-18 02:57:32 +02002887 /* 2) DQS Enable Calibration */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002888 ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq,
2889 rw_group,
Marek Vasutf09da112015-07-18 02:57:32 +02002890 test_bgn);
2891 if (ret) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05002892 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002893 continue;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002894 }
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002895
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002896 /* 3) Centering DQ/DQS */
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002897 /*
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002898 * If doing read after write calibration, do not update
2899 * FOM now. Do it then.
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002900 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002901 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq,
2902 rw_group,
2903 test_bgn,
2904 1, 0);
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002905 if (ret) {
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002906 failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002907 continue;
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002908 }
2909
Marek Vasut16cfc4b2015-07-18 03:10:31 +02002910 /* All done. */
2911 goto cal_done_ok;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002912 }
2913 }
2914
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002915 /* Calibration Stage 1 failed. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002916 set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO,
2917 failed_substage);
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002918 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002919
Marek Vasutfe2d0a22015-07-17 03:50:17 +02002920 /* Calibration Stage 1 completed OK. */
2921cal_done_ok:
Dinh Nguyen3da42852015-06-02 22:52:49 -05002922 /*
2923 * Reset the delay chains back to zero if they have moved > 1
2924 * (check for > 1 because loop will increase d even when pass in
2925 * first case).
2926 */
2927 if (d > 2)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002928 scc_mgr_zero_group(seq, rw_group, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002929
2930 return 1;
2931}
2932
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002933/**
2934 * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
2935 * @rw_group: Read/Write Group
2936 * @test_bgn: Rank at which the test begins
2937 *
2938 * Stage 3: DQ/DQS Centering.
2939 *
2940 * This function implements UniPHY calibration Stage 3, as explained in
2941 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2942 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002943static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq,
2944 const u32 rw_group,
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002945 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002946{
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002947 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002948
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002949 debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002950
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002951 /* Update info for sims. */
2952 reg_file_set_group(rw_group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002953 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2954 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2955
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002956 ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0,
2957 1);
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002958 if (ret)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002959 set_failing_group_stage(seq, rw_group,
Dinh Nguyen3da42852015-06-02 22:52:49 -05002960 CAL_STAGE_VFIFO_AFTER_WRITES,
2961 CAL_SUBSTAGE_VFIFO_CENTER);
Marek Vasut78cdd7d2015-07-18 05:58:44 +02002962 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002963}
2964
Marek Vasutc9842782015-07-21 06:18:57 +02002965/**
2966 * rw_mgr_mem_calibrate_lfifo() - Minimize latency
2967 *
2968 * Stage 4: Minimize latency.
2969 *
2970 * This function implements UniPHY calibration Stage 4, as explained in
2971 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
2972 * Calibrate LFIFO to find smallest read latency.
2973 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002974static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05002975{
Marek Vasutc9842782015-07-21 06:18:57 +02002976 int found_one = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002977
2978 debug("%s:%d\n", __func__, __LINE__);
2979
Marek Vasutc9842782015-07-21 06:18:57 +02002980 /* Update info for sims. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002981 reg_file_set_stage(CAL_STAGE_LFIFO);
2982 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2983
2984 /* Load up the patterns used by read calibration for all ranks */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002985 rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002986
Dinh Nguyen3da42852015-06-02 22:52:49 -05002987 do {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002988 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasutea9aa242016-04-04 21:21:05 +02002989 debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002990 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002991
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02002992 if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0,
2993 NUM_READ_TESTS,
Marek Vasutc9842782015-07-21 06:18:57 +02002994 PASS_ALL_BITS, 1))
Dinh Nguyen3da42852015-06-02 22:52:49 -05002995 break;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002996
2997 found_one = 1;
Marek Vasutc9842782015-07-21 06:18:57 +02002998 /*
2999 * Reduce read latency and see if things are
3000 * working correctly.
3001 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003002 seq->gbl.curr_read_lat--;
3003 } while (seq->gbl.curr_read_lat > 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003004
Marek Vasutc9842782015-07-21 06:18:57 +02003005 /* Reset the fifos to get pointers to known state. */
Marek Vasut1273dd92015-07-12 21:05:08 +02003006 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003007
3008 if (found_one) {
Marek Vasutc9842782015-07-21 06:18:57 +02003009 /* Add a fudge factor to the read latency that was determined */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003010 seq->gbl.curr_read_lat += 2;
3011 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Marek Vasutea9aa242016-04-04 21:21:05 +02003012 debug_cond(DLEVEL >= 2,
Marek Vasutc9842782015-07-21 06:18:57 +02003013 "%s:%d lfifo: success: using read_lat=%u\n",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003014 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003015 } else {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003016 set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO,
Dinh Nguyen3da42852015-06-02 22:52:49 -05003017 CAL_SUBSTAGE_READ_LATENCY);
3018
Marek Vasutea9aa242016-04-04 21:21:05 +02003019 debug_cond(DLEVEL >= 2,
Marek Vasutc9842782015-07-21 06:18:57 +02003020 "%s:%d lfifo: failed at initial read_lat=%u\n",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003021 __func__, __LINE__, seq->gbl.curr_read_lat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003022 }
Marek Vasutc9842782015-07-21 06:18:57 +02003023
3024 return found_one;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003025}
3026
Marek Vasutc8570af2015-07-21 05:26:58 +02003027/**
3028 * search_window() - Search for the/part of the window with DM/DQS shift
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003029 * @search_dm: If 1, search for the DM shift, if 0, search for DQS
3030 * shift
Marek Vasutc8570af2015-07-21 05:26:58 +02003031 * @rank_bgn: Rank number
3032 * @write_group: Write Group
3033 * @bgn_curr: Current window begin
3034 * @end_curr: Current window end
3035 * @bgn_best: Current best window begin
3036 * @end_best: Current best window end
3037 * @win_best: Size of the best window
3038 * @new_dqs: New DQS value (only applicable if search_dm = 0).
3039 *
3040 * Search for the/part of the window with DM/DQS shift.
3041 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003042static void search_window(struct socfpga_sdrseq *seq,
3043 const int search_dm, const u32 rank_bgn,
3044 const u32 write_group, int *bgn_curr, int *end_curr,
3045 int *bgn_best, int *end_best, int *win_best,
3046 int new_dqs)
Marek Vasutc8570af2015-07-21 05:26:58 +02003047{
3048 u32 bit_chk;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003049 const int max = seq->iocfg->io_out1_delay_max - new_dqs;
Marek Vasutc8570af2015-07-21 05:26:58 +02003050 int d, di;
3051
3052 /* Search for the/part of the window with DM/DQS shift. */
3053 for (di = max; di >= 0; di -= DELTA_D) {
3054 if (search_dm) {
3055 d = di;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003056 scc_mgr_apply_group_dm_out1_delay(seq, d);
Marek Vasutc8570af2015-07-21 05:26:58 +02003057 } else {
3058 /* For DQS, we go from 0...max */
3059 d = max - di;
3060 /*
Marek Vasut139823e2015-08-02 19:47:01 +02003061 * Note: This only shifts DQS, so are we limiting
3062 * ourselves to width of DQ unnecessarily.
Marek Vasutc8570af2015-07-21 05:26:58 +02003063 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003064 scc_mgr_apply_group_dqs_io_and_oct_out1(seq,
3065 write_group,
Marek Vasutc8570af2015-07-21 05:26:58 +02003066 d + new_dqs);
3067 }
3068
3069 writel(0, &sdr_scc_mgr->update);
3070
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003071 if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group,
3072 1, PASS_ALL_BITS, &bit_chk,
Marek Vasutc8570af2015-07-21 05:26:58 +02003073 0)) {
3074 /* Set current end of the window. */
3075 *end_curr = search_dm ? -d : d;
3076
3077 /*
3078 * If a starting edge of our window has not been seen
3079 * this is our current start of the DM window.
3080 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003081 if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1)
Marek Vasutc8570af2015-07-21 05:26:58 +02003082 *bgn_curr = search_dm ? -d : d;
3083
3084 /*
3085 * If current window is bigger than best seen.
3086 * Set best seen to be current window.
3087 */
3088 if ((*end_curr - *bgn_curr + 1) > *win_best) {
3089 *win_best = *end_curr - *bgn_curr + 1;
3090 *bgn_best = *bgn_curr;
3091 *end_best = *end_curr;
3092 }
3093 } else {
3094 /* We just saw a failing test. Reset temp edge. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003095 *bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3096 *end_curr = seq->iocfg->io_out1_delay_max + 1;
Marek Vasutc8570af2015-07-21 05:26:58 +02003097
3098 /* Early exit is only applicable to DQS. */
3099 if (search_dm)
3100 continue;
3101
3102 /*
3103 * Early exit optimization: if the remaining delay
3104 * chain space is less than already seen largest
3105 * window we can exit.
3106 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003107 if (*win_best - 1 > seq->iocfg->io_out1_delay_max
3108 - new_dqs - d)
Marek Vasutc8570af2015-07-21 05:26:58 +02003109 break;
3110 }
3111 }
3112}
3113
Dinh Nguyen3da42852015-06-02 22:52:49 -05003114/*
Marek Vasuta386a502015-07-21 05:33:49 +02003115 * rw_mgr_mem_calibrate_writes_center() - Center all windows
3116 * @rank_bgn: Rank number
3117 * @write_group: Write group
3118 * @test_bgn: Rank at which the test begins
3119 *
3120 * Center all windows. Do per-bit-deskew to possibly increase size of
Dinh Nguyen3da42852015-06-02 22:52:49 -05003121 * certain windows.
3122 */
Marek Vasut3b44f552015-07-21 05:00:42 +02003123static int
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003124rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq,
3125 const u32 rank_bgn, const u32 write_group,
Marek Vasut3b44f552015-07-21 05:00:42 +02003126 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003127{
Marek Vasutc8570af2015-07-21 05:26:58 +02003128 int i;
Marek Vasut3b44f552015-07-21 05:00:42 +02003129 u32 sticky_bit_chk;
3130 u32 min_index;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003131 int left_edge[seq->rwcfg->mem_dq_per_write_dqs];
3132 int right_edge[seq->rwcfg->mem_dq_per_write_dqs];
Marek Vasut3b44f552015-07-21 05:00:42 +02003133 int mid;
3134 int mid_min, orig_mid_min;
3135 int new_dqs, start_dqs;
3136 int dq_margin, dqs_margin, dm_margin;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003137 int bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3138 int end_curr = seq->iocfg->io_out1_delay_max + 1;
3139 int bgn_best = seq->iocfg->io_out1_delay_max + 1;
3140 int end_best = seq->iocfg->io_out1_delay_max + 1;
Marek Vasut3b44f552015-07-21 05:00:42 +02003141 int win_best = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003142
Marek Vasutc4907892015-07-13 02:11:02 +02003143 int ret;
3144
Dinh Nguyen3da42852015-06-02 22:52:49 -05003145 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
3146
3147 dm_margin = 0;
3148
Marek Vasutc6540872015-07-21 05:29:05 +02003149 start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
3150 SCC_MGR_IO_OUT1_DELAY_OFFSET) +
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003151 (seq->rwcfg->mem_dq_per_write_dqs << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003152
Marek Vasut3b44f552015-07-21 05:00:42 +02003153 /* Per-bit deskew. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003154
3155 /*
Marek Vasut3b44f552015-07-21 05:00:42 +02003156 * Set the left and right edge of each bit to an illegal value.
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003157 * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen3da42852015-06-02 22:52:49 -05003158 */
3159 sticky_bit_chk = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003160 for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) {
3161 left_edge[i] = seq->iocfg->io_out1_delay_max + 1;
3162 right_edge[i] = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003163 }
3164
Marek Vasut3b44f552015-07-21 05:00:42 +02003165 /* Search for the left edge of the window for each bit. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003166 search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn,
Marek Vasut0c4be192015-07-18 20:34:00 +02003167 &sticky_bit_chk,
Marek Vasut71120772015-07-13 02:38:15 +02003168 left_edge, right_edge, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003169
Marek Vasut3b44f552015-07-21 05:00:42 +02003170 /* Search for the right edge of the window for each bit. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003171 ret = search_right_edge(seq, 1, rank_bgn, write_group, 0,
Marek Vasutc4907892015-07-13 02:11:02 +02003172 start_dqs, 0,
Marek Vasut0c4be192015-07-18 20:34:00 +02003173 &sticky_bit_chk,
Marek Vasutc4907892015-07-13 02:11:02 +02003174 left_edge, right_edge, 0);
3175 if (ret) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003176 set_failing_group_stage(seq, test_bgn + ret - 1,
3177 CAL_STAGE_WRITES,
Marek Vasutc4907892015-07-13 02:11:02 +02003178 CAL_SUBSTAGE_WRITES_CENTER);
Marek Vasutd043ee52015-07-21 05:32:49 +02003179 return -EINVAL;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003180 }
3181
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003182 min_index = get_window_mid_index(seq, 1, left_edge, right_edge,
3183 &mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003184
Marek Vasut3b44f552015-07-21 05:00:42 +02003185 /* Determine the amount we can change DQS (which is -mid_min). */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003186 orig_mid_min = mid_min;
3187 new_dqs = start_dqs;
3188 mid_min = 0;
Marek Vasutea9aa242016-04-04 21:21:05 +02003189 debug_cond(DLEVEL >= 1,
Marek Vasut3b44f552015-07-21 05:00:42 +02003190 "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n",
3191 __func__, __LINE__, start_dqs, new_dqs, mid_min);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003192
Marek Vasutffb8b662015-07-18 19:46:26 +02003193 /* Add delay to bring centre of all DQ windows to the same "level". */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003194 center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min,
Marek Vasutffb8b662015-07-18 19:46:26 +02003195 min_index, 0, &dq_margin, &dqs_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003196
3197 /* Move DQS */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003198 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Marek Vasut1273dd92015-07-12 21:05:08 +02003199 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003200
3201 /* Centre DM */
Marek Vasutea9aa242016-04-04 21:21:05 +02003202 debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003203
3204 /*
Marek Vasut3b44f552015-07-21 05:00:42 +02003205 * Set the left and right edge of each bit to an illegal value.
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003206 * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value.
Dinh Nguyen3da42852015-06-02 22:52:49 -05003207 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003208 left_edge[0] = seq->iocfg->io_out1_delay_max + 1;
3209 right_edge[0] = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003210
Marek Vasut3b44f552015-07-21 05:00:42 +02003211 /* Search for the/part of the window with DM shift. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003212 search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasutc8570af2015-07-21 05:26:58 +02003213 &bgn_best, &end_best, &win_best, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003214
Marek Vasut3b44f552015-07-21 05:00:42 +02003215 /* Reset DM delay chains to 0. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003216 scc_mgr_apply_group_dm_out1_delay(seq, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003217
3218 /*
3219 * Check to see if the current window nudges up aganist 0 delay.
3220 * If so we need to continue the search by shifting DQS otherwise DQS
Marek Vasut3b44f552015-07-21 05:00:42 +02003221 * search begins as a new search.
3222 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003223 if (end_curr != 0) {
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003224 bgn_curr = seq->iocfg->io_out1_delay_max + 1;
3225 end_curr = seq->iocfg->io_out1_delay_max + 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003226 }
3227
Marek Vasut3b44f552015-07-21 05:00:42 +02003228 /* Search for the/part of the window with DQS shifts. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003229 search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr,
Marek Vasutc8570af2015-07-21 05:26:58 +02003230 &bgn_best, &end_best, &win_best, new_dqs);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003231
Marek Vasut3b44f552015-07-21 05:00:42 +02003232 /* Assign left and right edge for cal and reporting. */
3233 left_edge[0] = -1 * bgn_best;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003234 right_edge[0] = end_best;
3235
Marek Vasutea9aa242016-04-04 21:21:05 +02003236 debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n",
Marek Vasut3b44f552015-07-21 05:00:42 +02003237 __func__, __LINE__, left_edge[0], right_edge[0]);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003238
Marek Vasut3b44f552015-07-21 05:00:42 +02003239 /* Move DQS (back to orig). */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003240 scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003241
3242 /* Move DM */
3243
Marek Vasut3b44f552015-07-21 05:00:42 +02003244 /* Find middle of window for the DM bit. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003245 mid = (left_edge[0] - right_edge[0]) / 2;
3246
Marek Vasut3b44f552015-07-21 05:00:42 +02003247 /* Only move right, since we are not moving DQS/DQ. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003248 if (mid < 0)
3249 mid = 0;
3250
Marek Vasut3b44f552015-07-21 05:00:42 +02003251 /* dm_marign should fail if we never find a window. */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003252 if (win_best == 0)
3253 dm_margin = -1;
3254 else
3255 dm_margin = left_edge[0] - mid;
3256
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003257 scc_mgr_apply_group_dm_out1_delay(seq, mid);
Marek Vasut1273dd92015-07-12 21:05:08 +02003258 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003259
Marek Vasutea9aa242016-04-04 21:21:05 +02003260 debug_cond(DLEVEL >= 2,
Marek Vasut3b44f552015-07-21 05:00:42 +02003261 "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n",
3262 __func__, __LINE__, left_edge[0], right_edge[0],
3263 mid, dm_margin);
3264 /* Export values. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003265 seq->gbl.fom_out += dq_margin + dqs_margin;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003266
Marek Vasutea9aa242016-04-04 21:21:05 +02003267 debug_cond(DLEVEL >= 2,
Marek Vasut3b44f552015-07-21 05:00:42 +02003268 "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n",
3269 __func__, __LINE__, dq_margin, dqs_margin, dm_margin);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003270
3271 /*
3272 * Do not remove this line as it makes sure all of our
3273 * decisions have been applied.
3274 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003275 writel(0, &sdr_scc_mgr->update);
Marek Vasut3b44f552015-07-21 05:00:42 +02003276
Marek Vasutd043ee52015-07-21 05:32:49 +02003277 if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0))
3278 return -EINVAL;
3279
3280 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003281}
3282
Marek Vasutdb3a6062015-07-18 07:23:25 +02003283/**
3284 * rw_mgr_mem_calibrate_writes() - Write Calibration Part One
3285 * @rank_bgn: Rank number
3286 * @group: Read/Write Group
3287 * @test_bgn: Rank at which the test begins
3288 *
3289 * Stage 2: Write Calibration Part One.
3290 *
3291 * This function implements UniPHY calibration Stage 2, as explained in
3292 * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
3293 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003294static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq,
3295 const u32 rank_bgn, const u32 group,
Marek Vasutdb3a6062015-07-18 07:23:25 +02003296 const u32 test_bgn)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003297{
Marek Vasutdb3a6062015-07-18 07:23:25 +02003298 int ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003299
Marek Vasutdb3a6062015-07-18 07:23:25 +02003300 /* Update info for sims */
3301 debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn);
3302
3303 reg_file_set_group(group);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003304 reg_file_set_stage(CAL_STAGE_WRITES);
3305 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3306
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003307 ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group,
3308 test_bgn);
Marek Vasutd043ee52015-07-21 05:32:49 +02003309 if (ret)
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003310 set_failing_group_stage(seq, group, CAL_STAGE_WRITES,
Dinh Nguyen3da42852015-06-02 22:52:49 -05003311 CAL_SUBSTAGE_WRITES_CENTER);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003312
Marek Vasutd043ee52015-07-21 05:32:49 +02003313 return ret;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003314}
3315
Marek Vasut4b0ac262015-07-20 07:33:33 +02003316/**
3317 * mem_precharge_and_activate() - Precharge all banks and activate
3318 *
3319 * Precharge all banks and activate row 0 in bank "000..." and bank "111...".
3320 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003321static void mem_precharge_and_activate(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003322{
Marek Vasut4b0ac262015-07-20 07:33:33 +02003323 int r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003324
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003325 for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) {
Marek Vasut4b0ac262015-07-20 07:33:33 +02003326 /* Set rank. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003327 set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003328
Marek Vasut4b0ac262015-07-20 07:33:33 +02003329 /* Precharge all banks. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003330 writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS |
Marek Vasut1273dd92015-07-12 21:05:08 +02003331 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003332
Marek Vasut1273dd92015-07-12 21:05:08 +02003333 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003334 writel(seq->rwcfg->activate_0_and_1_wait1,
Marek Vasut139823e2015-08-02 19:47:01 +02003335 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003336
Marek Vasut1273dd92015-07-12 21:05:08 +02003337 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003338 writel(seq->rwcfg->activate_0_and_1_wait2,
Marek Vasut139823e2015-08-02 19:47:01 +02003339 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003340
Marek Vasut4b0ac262015-07-20 07:33:33 +02003341 /* Activate rows. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003342 writel(seq->rwcfg->activate_0_and_1,
3343 SDR_PHYGRP_RWMGRGRP_ADDRESS |
3344 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003345 }
3346}
3347
Marek Vasut16502a02015-07-17 01:57:41 +02003348/**
3349 * mem_init_latency() - Configure memory RLAT and WLAT settings
3350 *
3351 * Configure memory RLAT and WLAT parameters.
3352 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003353static void mem_init_latency(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003354{
Marek Vasut16502a02015-07-17 01:57:41 +02003355 /*
3356 * For AV/CV, LFIFO is hardened and always runs at full rate
3357 * so max latency in AFI clocks, used here, is correspondingly
3358 * smaller.
3359 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003360 const u32 max_latency = (1 << seq->misccfg->max_latency_count_width)
3361 - 1;
Marek Vasut16502a02015-07-17 01:57:41 +02003362 u32 rlat, wlat;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003363
3364 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut16502a02015-07-17 01:57:41 +02003365
3366 /*
3367 * Read in write latency.
3368 * WL for Hard PHY does not include additive latency.
3369 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003370 wlat = readl(&data_mgr->t_wl_add);
3371 wlat += readl(&data_mgr->mem_t_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003372
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003373 seq->gbl.rw_wl_nop_cycles = wlat - 1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003374
Marek Vasut16502a02015-07-17 01:57:41 +02003375 /* Read in readl latency. */
Marek Vasut1273dd92015-07-12 21:05:08 +02003376 rlat = readl(&data_mgr->t_rl_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003377
Marek Vasut16502a02015-07-17 01:57:41 +02003378 /* Set a pretty high read latency initially. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003379 seq->gbl.curr_read_lat = rlat + 16;
3380 if (seq->gbl.curr_read_lat > max_latency)
3381 seq->gbl.curr_read_lat = max_latency;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003382
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003383 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003384
Marek Vasut16502a02015-07-17 01:57:41 +02003385 /* Advertise write latency. */
3386 writel(wlat, &phy_mgr_cfg->afi_wlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003387}
3388
Marek Vasut51cea0b2015-07-26 10:54:15 +02003389/**
3390 * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings
3391 *
3392 * Set VFIFO and LFIFO to instant-on settings in skip calibration mode.
3393 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003394static void mem_skip_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003395{
Marek Vasut5ded7322015-08-02 19:42:26 +02003396 u32 vfifo_offset;
3397 u32 i, j, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003398
3399 debug("%s:%d\n", __func__, __LINE__);
3400 /* Need to update every shadow register set used by the interface */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003401 for (r = 0; r < seq->rwcfg->mem_number_of_ranks;
Marek Vasut51cea0b2015-07-26 10:54:15 +02003402 r += NUM_RANKS_PER_SHADOW_REG) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003403 /*
3404 * Set output phase alignment settings appropriate for
3405 * skip calibration.
3406 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003407 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003408 scc_mgr_set_dqs_en_phase(i, 0);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003409 if (seq->iocfg->dll_chain_length == 6)
Marek Vasut160695d2015-08-02 19:10:58 +02003410 scc_mgr_set_dqdqs_output_phase(i, 6);
3411 else
3412 scc_mgr_set_dqdqs_output_phase(i, 7);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003413 /*
3414 * Case:33398
3415 *
3416 * Write data arrives to the I/O two cycles before write
3417 * latency is reached (720 deg).
3418 * -> due to bit-slip in a/c bus
3419 * -> to allow board skew where dqs is longer than ck
3420 * -> how often can this happen!?
3421 * -> can claim back some ptaps for high freq
3422 * support if we can relax this, but i digress...
3423 *
3424 * The write_clk leads mem_ck by 90 deg
3425 * The minimum ptap of the OPA is 180 deg
3426 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3427 * The write_clk is always delayed by 2 ptaps
3428 *
3429 * Hence, to make DQS aligned to CK, we need to delay
3430 * DQS by:
Marek Vasut139823e2015-08-02 19:47:01 +02003431 * (720 - 90 - 180 - 2) *
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003432 * (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003433 *
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003434 * Dividing the above by
3435 (360 / seq->iocfg->dll_chain_length)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003436 * gives us the number of ptaps, which simplies to:
3437 *
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003438 * (1.25 * seq->iocfg->dll_chain_length - 2)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003439 */
Marek Vasut51cea0b2015-07-26 10:54:15 +02003440 scc_mgr_set_dqdqs_output_phase(i,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003441 ((125 * seq->iocfg->dll_chain_length)
3442 / 100) - 2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003443 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003444 writel(0xff, &sdr_scc_mgr->dqs_ena);
3445 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003446
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003447 for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003448 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3449 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003450 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003451 writel(0xff, &sdr_scc_mgr->dq_ena);
3452 writel(0xff, &sdr_scc_mgr->dm_ena);
3453 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003454 }
3455
3456 /* Compensate for simulation model behaviour */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003457 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003458 scc_mgr_set_dqs_bus_in_delay(i, 10);
3459 scc_mgr_load_dqs(i);
3460 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003461 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003462
3463 /*
3464 * ArriaV has hard FIFOs that can only be initialized by incrementing
3465 * in sequencer.
3466 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003467 vfifo_offset = seq->misccfg->calib_vfifo_offset;
Marek Vasut51cea0b2015-07-26 10:54:15 +02003468 for (j = 0; j < vfifo_offset; j++)
Marek Vasut1273dd92015-07-12 21:05:08 +02003469 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
Marek Vasut1273dd92015-07-12 21:05:08 +02003470 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003471
3472 /*
Marek Vasut51cea0b2015-07-26 10:54:15 +02003473 * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal
3474 * setting from generation-time constant.
Dinh Nguyen3da42852015-06-02 22:52:49 -05003475 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003476 seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset;
3477 writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003478}
3479
Marek Vasut3589fbf2015-07-20 04:34:51 +02003480/**
3481 * mem_calibrate() - Memory calibration entry point.
3482 *
3483 * Perform memory calibration.
3484 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003485static u32 mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003486{
Marek Vasut5ded7322015-08-02 19:42:26 +02003487 u32 i;
3488 u32 rank_bgn, sr;
3489 u32 write_group, write_test_bgn;
3490 u32 read_group, read_test_bgn;
3491 u32 run_groups, current_run;
3492 u32 failing_groups = 0;
3493 u32 group_failed = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003494
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003495 const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width /
3496 seq->rwcfg->mem_if_write_dqs_width;
Marek Vasut33c42bb2015-07-17 02:21:47 +02003497
Dinh Nguyen3da42852015-06-02 22:52:49 -05003498 debug("%s:%d\n", __func__, __LINE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003499
Marek Vasut16502a02015-07-17 01:57:41 +02003500 /* Initialize the data settings */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003501 seq->gbl.error_substage = CAL_SUBSTAGE_NIL;
3502 seq->gbl.error_stage = CAL_STAGE_NIL;
3503 seq->gbl.error_group = 0xff;
3504 seq->gbl.fom_in = 0;
3505 seq->gbl.fom_out = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003506
Marek Vasut16502a02015-07-17 01:57:41 +02003507 /* Initialize WLAT and RLAT. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003508 mem_init_latency(seq);
Marek Vasut16502a02015-07-17 01:57:41 +02003509
3510 /* Initialize bit slips. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003511 mem_precharge_and_activate(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003512
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003513 for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003514 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3515 SCC_MGR_GROUP_COUNTER_OFFSET);
Marek Vasutfa5d8212015-07-19 01:34:43 +02003516 /* Only needed once to set all groups, pins, DQ, DQS, DM. */
3517 if (i == 0)
3518 scc_mgr_set_hhp_extras();
3519
Marek Vasutc5c5f532015-07-17 02:06:20 +02003520 scc_set_bypass_mode(i);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003521 }
3522
Marek Vasut722c9682015-07-17 02:07:12 +02003523 /* Calibration is skipped. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003524 if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
Dinh Nguyen3da42852015-06-02 22:52:49 -05003525 /*
3526 * Set VFIFO and LFIFO to instant-on settings in skip
3527 * calibration mode.
3528 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003529 mem_skip_calibrate(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003530
Marek Vasut722c9682015-07-17 02:07:12 +02003531 /*
3532 * Do not remove this line as it makes sure all of our
3533 * decisions have been applied.
3534 */
3535 writel(0, &sdr_scc_mgr->update);
3536 return 1;
3537 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003538
Marek Vasut722c9682015-07-17 02:07:12 +02003539 /* Calibration is not skipped. */
3540 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3541 /*
3542 * Zero all delay chain/phase settings for all
3543 * groups and all shadow register sets.
3544 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003545 scc_mgr_zero_all(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003546
Marek Vasutf085ac32015-08-02 18:27:21 +02003547 run_groups = ~0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003548
Marek Vasut722c9682015-07-17 02:07:12 +02003549 for (write_group = 0, write_test_bgn = 0; write_group
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003550 < seq->rwcfg->mem_if_write_dqs_width; write_group++,
3551 write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) {
Marek Vasutc452dcd2015-07-17 02:50:56 +02003552 /* Initialize the group failure */
Marek Vasut722c9682015-07-17 02:07:12 +02003553 group_failed = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003554
Marek Vasut722c9682015-07-17 02:07:12 +02003555 current_run = run_groups & ((1 <<
3556 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3557 run_groups = run_groups >>
3558 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003559
Marek Vasut722c9682015-07-17 02:07:12 +02003560 if (current_run == 0)
3561 continue;
3562
3563 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3564 SCC_MGR_GROUP_COUNTER_OFFSET);
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003565 scc_mgr_zero_group(seq, write_group, 0);
Marek Vasut722c9682015-07-17 02:07:12 +02003566
Marek Vasut33c42bb2015-07-17 02:21:47 +02003567 for (read_group = write_group * rwdqs_ratio,
3568 read_test_bgn = 0;
Marek Vasutc452dcd2015-07-17 02:50:56 +02003569 read_group < (write_group + 1) * rwdqs_ratio;
Marek Vasut33c42bb2015-07-17 02:21:47 +02003570 read_group++,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003571 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasut33c42bb2015-07-17 02:21:47 +02003572 if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO)
3573 continue;
Marek Vasut722c9682015-07-17 02:07:12 +02003574
Marek Vasut33c42bb2015-07-17 02:21:47 +02003575 /* Calibrate the VFIFO */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003576 if (rw_mgr_mem_calibrate_vfifo(seq, read_group,
Marek Vasut33c42bb2015-07-17 02:21:47 +02003577 read_test_bgn))
3578 continue;
3579
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003580 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasut139823e2015-08-02 19:47:01 +02003581 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003582 return 0;
3583
3584 /* The group failed, we're done. */
3585 goto grp_failed;
3586 }
3587
3588 /* Calibrate the output side */
3589 for (rank_bgn = 0, sr = 0;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003590 rank_bgn < seq->rwcfg->mem_number_of_ranks;
Marek Vasutc452dcd2015-07-17 02:50:56 +02003591 rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
3592 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3593 continue;
3594
3595 /* Not needed in quick mode! */
Marek Vasut139823e2015-08-02 19:47:01 +02003596 if (STATIC_CALIB_STEPS &
3597 CALIB_SKIP_DELAY_SWEEPS)
Marek Vasutc452dcd2015-07-17 02:50:56 +02003598 continue;
3599
Marek Vasutc452dcd2015-07-17 02:50:56 +02003600 /* Calibrate WRITEs */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003601 if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn,
Marek Vasut139823e2015-08-02 19:47:01 +02003602 write_group,
3603 write_test_bgn))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003604 continue;
3605
Marek Vasut33c42bb2015-07-17 02:21:47 +02003606 group_failed = 1;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003607 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasut139823e2015-08-02 19:47:01 +02003608 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasut33c42bb2015-07-17 02:21:47 +02003609 return 0;
Marek Vasut722c9682015-07-17 02:07:12 +02003610 }
3611
Marek Vasutc452dcd2015-07-17 02:50:56 +02003612 /* Some group failed, we're done. */
3613 if (group_failed)
3614 goto grp_failed;
Marek Vasut4ac21612015-07-17 02:31:04 +02003615
Marek Vasutc452dcd2015-07-17 02:50:56 +02003616 for (read_group = write_group * rwdqs_ratio,
3617 read_test_bgn = 0;
3618 read_group < (write_group + 1) * rwdqs_ratio;
3619 read_group++,
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003620 read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) {
Marek Vasutc452dcd2015-07-17 02:50:56 +02003621 if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES)
3622 continue;
Marek Vasut4ac21612015-07-17 02:31:04 +02003623
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003624 if (!rw_mgr_mem_calibrate_vfifo_end(seq,
3625 read_group,
Marek Vasut139823e2015-08-02 19:47:01 +02003626 read_test_bgn))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003627 continue;
Marek Vasut4ac21612015-07-17 02:31:04 +02003628
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003629 if (!(seq->gbl.phy_debug_mode_flags &
Marek Vasut139823e2015-08-02 19:47:01 +02003630 PHY_DEBUG_SWEEP_ALL_GROUPS))
Marek Vasutc452dcd2015-07-17 02:50:56 +02003631 return 0;
Marek Vasut4ac21612015-07-17 02:31:04 +02003632
Marek Vasutc452dcd2015-07-17 02:50:56 +02003633 /* The group failed, we're done. */
3634 goto grp_failed;
Marek Vasut722c9682015-07-17 02:07:12 +02003635 }
3636
Marek Vasutc452dcd2015-07-17 02:50:56 +02003637 /* No group failed, continue as usual. */
3638 continue;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003639
Marek Vasutc452dcd2015-07-17 02:50:56 +02003640grp_failed: /* A group failed, increment the counter. */
3641 failing_groups++;
Marek Vasut722c9682015-07-17 02:07:12 +02003642 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003643
Marek Vasut722c9682015-07-17 02:07:12 +02003644 /*
3645 * USER If there are any failing groups then report
3646 * the failure.
3647 */
3648 if (failing_groups != 0)
3649 return 0;
3650
Marek Vasutc50ae302015-07-17 02:40:21 +02003651 if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO)
3652 continue;
3653
Marek Vasut722c9682015-07-17 02:07:12 +02003654 /* Calibrate the LFIFO */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003655 if (!rw_mgr_mem_calibrate_lfifo(seq))
Marek Vasutc50ae302015-07-17 02:40:21 +02003656 return 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003657 }
3658
3659 /*
3660 * Do not remove this line as it makes sure all of our decisions
3661 * have been applied.
3662 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003663 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003664 return 1;
3665}
3666
Marek Vasut23a040c2015-07-17 01:20:21 +02003667/**
3668 * run_mem_calibrate() - Perform memory calibration
3669 *
3670 * This function triggers the entire memory calibration procedure.
3671 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003672static int run_mem_calibrate(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003673{
Marek Vasut23a040c2015-07-17 01:20:21 +02003674 int pass;
Marek Vasutbba77112016-04-05 23:41:56 +02003675 u32 ctrl_cfg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003676
3677 debug("%s:%d\n", __func__, __LINE__);
3678
3679 /* Reset pass/fail status shown on afi_cal_success/fail */
Marek Vasut1273dd92015-07-12 21:05:08 +02003680 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003681
Marek Vasut23a040c2015-07-17 01:20:21 +02003682 /* Stop tracking manager. */
Marek Vasutbba77112016-04-05 23:41:56 +02003683 ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg);
3684 writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK,
3685 &sdr_ctrl->ctrl_cfg);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003686
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003687 phy_mgr_initialize(seq);
3688 rw_mgr_mem_initialize(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003689
Marek Vasut23a040c2015-07-17 01:20:21 +02003690 /* Perform the actual memory calibration. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003691 pass = mem_calibrate(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003692
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003693 mem_precharge_and_activate(seq);
Marek Vasut1273dd92015-07-12 21:05:08 +02003694 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003695
Marek Vasut23a040c2015-07-17 01:20:21 +02003696 /* Handoff. */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003697 rw_mgr_mem_handoff(seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003698 /*
Marek Vasut23a040c2015-07-17 01:20:21 +02003699 * In Hard PHY this is a 2-bit control:
3700 * 0: AFI Mux Select
3701 * 1: DDIO Mux Select
Dinh Nguyen3da42852015-06-02 22:52:49 -05003702 */
Marek Vasut23a040c2015-07-17 01:20:21 +02003703 writel(0x2, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003704
Marek Vasut23a040c2015-07-17 01:20:21 +02003705 /* Start tracking manager. */
Marek Vasutbba77112016-04-05 23:41:56 +02003706 writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg);
Marek Vasut23a040c2015-07-17 01:20:21 +02003707
3708 return pass;
3709}
3710
3711/**
3712 * debug_mem_calibrate() - Report result of memory calibration
3713 * @pass: Value indicating whether calibration passed or failed
3714 *
3715 * This function reports the results of the memory calibration
3716 * and writes debug information into the register file.
3717 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003718static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass)
Marek Vasut23a040c2015-07-17 01:20:21 +02003719{
Marek Vasut5ded7322015-08-02 19:42:26 +02003720 u32 debug_info;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003721
3722 if (pass) {
Goldschmidt Simon92962b32018-01-25 06:04:44 +00003723 debug("%s: CALIBRATION PASSED\n", __FILE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003724
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003725 seq->gbl.fom_in /= 2;
3726 seq->gbl.fom_out /= 2;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003727
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003728 if (seq->gbl.fom_in > 0xff)
3729 seq->gbl.fom_in = 0xff;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003730
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003731 if (seq->gbl.fom_out > 0xff)
3732 seq->gbl.fom_out = 0xff;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003733
3734 /* Update the FOM in the register file */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003735 debug_info = seq->gbl.fom_in;
3736 debug_info |= seq->gbl.fom_out << 8;
Marek Vasut1273dd92015-07-12 21:05:08 +02003737 writel(debug_info, &sdr_reg_file->fom);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003738
Marek Vasut1273dd92015-07-12 21:05:08 +02003739 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3740 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003741 } else {
Goldschmidt Simon92962b32018-01-25 06:04:44 +00003742 debug("%s: CALIBRATION FAILED\n", __FILE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003743
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003744 debug_info = seq->gbl.error_stage;
3745 debug_info |= seq->gbl.error_substage << 8;
3746 debug_info |= seq->gbl.error_group << 16;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003747
Marek Vasut1273dd92015-07-12 21:05:08 +02003748 writel(debug_info, &sdr_reg_file->failing_stage);
3749 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3750 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003751
3752 /* Update the failing group/stage in the register file */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003753 debug_info = seq->gbl.error_stage;
3754 debug_info |= seq->gbl.error_substage << 8;
3755 debug_info |= seq->gbl.error_group << 16;
Marek Vasut1273dd92015-07-12 21:05:08 +02003756 writel(debug_info, &sdr_reg_file->failing_stage);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003757 }
3758
Goldschmidt Simon92962b32018-01-25 06:04:44 +00003759 debug("%s: Calibration complete\n", __FILE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003760}
3761
Marek Vasutbb064342015-07-19 06:12:42 +02003762/**
3763 * hc_initialize_rom_data() - Initialize ROM data
3764 *
3765 * Initialize ROM data.
3766 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003767static void hc_initialize_rom_data(void)
3768{
Marek Vasut04955cf2015-08-02 17:15:19 +02003769 unsigned int nelem = 0;
3770 const u32 *rom_init;
Marek Vasutbb064342015-07-19 06:12:42 +02003771 u32 i, addr;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003772
Marek Vasut04955cf2015-08-02 17:15:19 +02003773 socfpga_get_seq_inst_init(&rom_init, &nelem);
Marek Vasutc4815f72015-07-12 19:03:33 +02003774 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
Marek Vasut04955cf2015-08-02 17:15:19 +02003775 for (i = 0; i < nelem; i++)
3776 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003777
Marek Vasut04955cf2015-08-02 17:15:19 +02003778 socfpga_get_seq_ac_init(&rom_init, &nelem);
Marek Vasutc4815f72015-07-12 19:03:33 +02003779 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
Marek Vasut04955cf2015-08-02 17:15:19 +02003780 for (i = 0; i < nelem; i++)
3781 writel(rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003782}
3783
Marek Vasut9c1ab2c2015-07-19 06:13:37 +02003784/**
3785 * initialize_reg_file() - Initialize SDR register file
3786 *
3787 * Initialize SDR register file.
3788 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003789static void initialize_reg_file(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003790{
Dinh Nguyen3da42852015-06-02 22:52:49 -05003791 /* Initialize the register file with the correct data */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003792 writel(seq->misccfg->reg_file_init_seq_signature,
3793 &sdr_reg_file->signature);
Marek Vasut1273dd92015-07-12 21:05:08 +02003794 writel(0, &sdr_reg_file->debug_data_addr);
3795 writel(0, &sdr_reg_file->cur_stage);
3796 writel(0, &sdr_reg_file->fom);
3797 writel(0, &sdr_reg_file->failing_stage);
3798 writel(0, &sdr_reg_file->debug1);
3799 writel(0, &sdr_reg_file->debug2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003800}
3801
Marek Vasut2ca151f2015-07-19 06:14:04 +02003802/**
3803 * initialize_hps_phy() - Initialize HPS PHY
3804 *
3805 * Initialize HPS PHY.
3806 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003807static void initialize_hps_phy(void)
3808{
Marek Vasut5ded7322015-08-02 19:42:26 +02003809 u32 reg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003810 /*
3811 * Tracking also gets configured here because it's in the
3812 * same register.
3813 */
Marek Vasut5ded7322015-08-02 19:42:26 +02003814 u32 trk_sample_count = 7500;
3815 u32 trk_long_idle_sample_count = (10 << 16) | 100;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003816 /*
3817 * Format is number of outer loops in the 16 MSB, sample
3818 * count in 16 LSB.
3819 */
3820
3821 reg = 0;
3822 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3823 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3824 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3825 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3826 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3827 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3828 /*
3829 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3830 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3831 */
3832 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3833 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3834 trk_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003835 writel(reg, &sdr_ctrl->phy_ctrl0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003836
3837 reg = 0;
3838 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3839 trk_sample_count >>
3840 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3841 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3842 trk_long_idle_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003843 writel(reg, &sdr_ctrl->phy_ctrl1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003844
3845 reg = 0;
3846 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3847 trk_long_idle_sample_count >>
3848 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003849 writel(reg, &sdr_ctrl->phy_ctrl2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003850}
3851
Marek Vasut880e46f2015-07-17 00:45:11 +02003852/**
3853 * initialize_tracking() - Initialize tracking
3854 *
3855 * Initialize the register file with usable initial data.
3856 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003857static void initialize_tracking(struct socfpga_sdrseq *seq)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003858{
Marek Vasut880e46f2015-07-17 00:45:11 +02003859 /*
3860 * Initialize the register file with the correct data.
3861 * Compute usable version of value in case we skip full
3862 * computation later.
3863 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003864 writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap,
3865 seq->iocfg->delay_per_dchain_tap) - 1,
Marek Vasut880e46f2015-07-17 00:45:11 +02003866 &sdr_reg_file->dtaps_per_ptap);
3867
3868 /* trk_sample_count */
3869 writel(7500, &sdr_reg_file->trk_sample_count);
3870
3871 /* longidle outer loop [15:0] */
3872 writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003873
3874 /*
Marek Vasut880e46f2015-07-17 00:45:11 +02003875 * longidle sample count [31:24]
3876 * trfc, worst case of 933Mhz 4Gb [23:16]
3877 * trcd, worst case [15:8]
3878 * vfifo wait [7:0]
Dinh Nguyen3da42852015-06-02 22:52:49 -05003879 */
Marek Vasut880e46f2015-07-17 00:45:11 +02003880 writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0),
3881 &sdr_reg_file->delays);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003882
Marek Vasut880e46f2015-07-17 00:45:11 +02003883 /* mux delay */
Marek Vasut9a5a90a2019-10-18 00:22:31 +02003884 if (dram_is_ddr(2)) {
3885 writel(0, &sdr_reg_file->trk_rw_mgr_addr);
3886 } else if (dram_is_ddr(3)) {
3887 writel((seq->rwcfg->idle << 24) |
3888 (seq->rwcfg->activate_1 << 16) |
3889 (seq->rwcfg->sgle_read << 8) |
3890 (seq->rwcfg->precharge_all << 0),
3891 &sdr_reg_file->trk_rw_mgr_addr);
3892 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003893
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003894 writel(seq->rwcfg->mem_if_read_dqs_width,
Marek Vasut880e46f2015-07-17 00:45:11 +02003895 &sdr_reg_file->trk_read_dqs_width);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003896
Marek Vasut880e46f2015-07-17 00:45:11 +02003897 /* trefi [7:0] */
Marek Vasut9a5a90a2019-10-18 00:22:31 +02003898 if (dram_is_ddr(2)) {
3899 writel(1000 << 0, &sdr_reg_file->trk_rfsh);
3900 } else if (dram_is_ddr(3)) {
3901 writel((seq->rwcfg->refresh_all << 24) | (1000 << 0),
3902 &sdr_reg_file->trk_rfsh);
3903 }
Dinh Nguyen3da42852015-06-02 22:52:49 -05003904}
3905
Simon Goldschmidt29873c72019-04-16 22:04:39 +02003906int sdram_calibration_full(struct socfpga_sdr *sdr)
Dinh Nguyen3da42852015-06-02 22:52:49 -05003907{
Marek Vasut5ded7322015-08-02 19:42:26 +02003908 u32 pass;
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003909 struct socfpga_sdrseq seq;
Marek Vasut84e0b0c2015-07-17 01:05:36 +02003910
Simon Goldschmidt29873c72019-04-16 22:04:39 +02003911 /*
3912 * For size reasons, this file uses hard coded addresses.
3913 * Check if we are called with the correct address.
3914 */
3915 if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS)
3916 return -ENODEV;
3917
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003918 memset(&seq, 0, sizeof(seq));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003919
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003920 seq.rwcfg = socfpga_get_sdram_rwmgr_config();
3921 seq.iocfg = socfpga_get_sdram_io_config();
3922 seq.misccfg = socfpga_get_sdram_misc_config();
Marek Vasutd718a262015-08-02 18:12:08 +02003923
Dinh Nguyen3da42852015-06-02 22:52:49 -05003924 /* Set the calibration enabled by default */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003925 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003926 /*
3927 * Only sweep all groups (regardless of fail state) by default
3928 * Set enabled read test by default.
3929 */
3930#if DISABLE_GUARANTEED_READ
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003931 seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003932#endif
3933 /* Initialize the register file */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003934 initialize_reg_file(&seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003935
3936 /* Initialize any PHY CSR */
3937 initialize_hps_phy();
3938
3939 scc_mgr_initialize();
3940
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003941 initialize_tracking(&seq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003942
Goldschmidt Simon92962b32018-01-25 06:04:44 +00003943 debug("%s: Preparing to start memory calibration\n", __FILE__);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003944
3945 debug("%s:%d\n", __func__, __LINE__);
Marek Vasutea9aa242016-04-04 21:21:05 +02003946 debug_cond(DLEVEL >= 1,
Marek Vasut23f62b32015-07-13 01:05:27 +02003947 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003948 seq.rwcfg->mem_number_of_ranks,
3949 seq.rwcfg->mem_number_of_cs_per_dimm,
3950 seq.rwcfg->mem_dq_per_read_dqs,
3951 seq.rwcfg->mem_dq_per_write_dqs,
3952 seq.rwcfg->mem_virtual_groups_per_read_dqs,
3953 seq.rwcfg->mem_virtual_groups_per_write_dqs);
Marek Vasutea9aa242016-04-04 21:21:05 +02003954 debug_cond(DLEVEL >= 1,
Marek Vasut23f62b32015-07-13 01:05:27 +02003955 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003956 seq.rwcfg->mem_if_read_dqs_width,
3957 seq.rwcfg->mem_if_write_dqs_width,
3958 seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width,
3959 seq.iocfg->delay_per_opa_tap,
3960 seq.iocfg->delay_per_dchain_tap);
Marek Vasutea9aa242016-04-04 21:21:05 +02003961 debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003962 seq.iocfg->delay_per_dqs_en_dchain_tap,
3963 seq.iocfg->dll_chain_length);
Marek Vasutea9aa242016-04-04 21:21:05 +02003964 debug_cond(DLEVEL >= 1,
Marek Vasut139823e2015-08-02 19:47:01 +02003965 "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003966 seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max,
3967 seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max);
Marek Vasutea9aa242016-04-04 21:21:05 +02003968 debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003969 seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max,
3970 seq.iocfg->io_out2_delay_max);
Marek Vasutea9aa242016-04-04 21:21:05 +02003971 debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003972 seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003973
3974 hc_initialize_rom_data();
3975
3976 /* update info for sims */
3977 reg_file_set_stage(CAL_STAGE_NIL);
3978 reg_file_set_group(0);
3979
3980 /*
3981 * Load global needed for those actions that require
3982 * some dynamic calibration support.
3983 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003984 seq.dyn_calib_steps = STATIC_CALIB_STEPS;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003985 /*
3986 * Load global to allow dynamic selection of delay loop settings
3987 * based on calibration mode.
3988 */
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003989 if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3990 seq.skip_delay_mask = 0xff;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003991 else
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003992 seq.skip_delay_mask = 0x0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003993
Simon Goldschmidt285b3cb2019-07-11 21:18:12 +02003994 pass = run_mem_calibrate(&seq);
3995 debug_mem_calibrate(&seq, pass);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003996 return pass;
3997}