blob: 2fcc3eef46c3a40a48ddbc4ffb1c85a50bfbc6c2 [file] [log] [blame]
Dinh Nguyen3da42852015-06-02 22:52:49 -05001/*
2 * Copyright Altera Corporation (C) 2012-2015
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <common.h>
8#include <asm/io.h>
9#include <asm/arch/sdram.h>
10#include "sequencer.h"
11#include "sequencer_auto.h"
12#include "sequencer_auto_ac_init.h"
13#include "sequencer_auto_inst_init.h"
14#include "sequencer_defines.h"
15
16static void scc_mgr_load_dqs_for_write_group(uint32_t write_group);
17
18static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
Marek Vasut6afb4fe2015-07-12 18:46:52 +020019 (struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
Dinh Nguyen3da42852015-06-02 22:52:49 -050020
21static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
Marek Vasut6afb4fe2015-07-12 18:46:52 +020022 (struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
Dinh Nguyen3da42852015-06-02 22:52:49 -050023
24static struct socfpga_sdr_reg_file *sdr_reg_file =
Marek Vasuta1c654a2015-07-12 18:31:05 +020025 (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -050026
27static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
Marek Vasute79025a2015-07-12 18:42:34 +020028 (struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
Dinh Nguyen3da42852015-06-02 22:52:49 -050029
30static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
Marek Vasut1bc6f142015-07-12 18:54:37 +020031 (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -050032
33static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
Marek Vasut1bc6f142015-07-12 18:54:37 +020034 (struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
Dinh Nguyen3da42852015-06-02 22:52:49 -050035
36static struct socfpga_data_mgr *data_mgr =
Marek Vasutc4815f72015-07-12 19:03:33 +020037 (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -050038
Marek Vasut6cb9f162015-07-12 20:49:39 +020039static struct socfpga_sdr_ctrl *sdr_ctrl =
40 (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
41
Dinh Nguyen3da42852015-06-02 22:52:49 -050042#define DELTA_D 1
Dinh Nguyen3da42852015-06-02 22:52:49 -050043
44/*
45 * In order to reduce ROM size, most of the selectable calibration steps are
46 * decided at compile time based on the user's calibration mode selection,
47 * as captured by the STATIC_CALIB_STEPS selection below.
48 *
49 * However, to support simulation-time selection of fast simulation mode, where
50 * we skip everything except the bare minimum, we need a few of the steps to
51 * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
52 * check, which is based on the rtl-supplied value, or we dynamically compute
53 * the value to use based on the dynamically-chosen calibration mode
54 */
55
56#define DLEVEL 0
57#define STATIC_IN_RTL_SIM 0
58#define STATIC_SKIP_DELAY_LOOPS 0
59
60#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
61 STATIC_SKIP_DELAY_LOOPS)
62
63/* calibration steps requested by the rtl */
64uint16_t dyn_calib_steps;
65
66/*
67 * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
68 * instead of static, we use boolean logic to select between
69 * non-skip and skip values
70 *
71 * The mask is set to include all bits when not-skipping, but is
72 * zero when skipping
73 */
74
75uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
76
77#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
78 ((non_skip_value) & skip_delay_mask)
79
80struct gbl_type *gbl;
81struct param_type *param;
82uint32_t curr_shadow_reg;
83
84static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
85 uint32_t write_group, uint32_t use_dm,
86 uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
87
Dinh Nguyen3da42852015-06-02 22:52:49 -050088static void set_failing_group_stage(uint32_t group, uint32_t stage,
89 uint32_t substage)
90{
91 /*
92 * Only set the global stage if there was not been any other
93 * failing group
94 */
95 if (gbl->error_stage == CAL_STAGE_NIL) {
96 gbl->error_substage = substage;
97 gbl->error_stage = stage;
98 gbl->error_group = group;
99 }
100}
101
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200102static void reg_file_set_group(u16 set_group)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500103{
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200104 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500105}
106
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200107static void reg_file_set_stage(u8 set_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500108{
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200109 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500110}
111
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200112static void reg_file_set_sub_stage(u8 set_sub_stage)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500113{
Marek Vasut2c0d2d92015-07-12 21:10:24 +0200114 set_sub_stage &= 0xff;
115 clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500116}
117
118static void initialize(void)
119{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500120 debug("%s:%d\n", __func__, __LINE__);
121 /* USER calibration has control over path to memory */
122 /*
123 * In Hard PHY this is a 2-bit control:
124 * 0: AFI Mux Select
125 * 1: DDIO Mux Select
126 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200127 writel(0x3, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500128
129 /* USER memory clock is not stable we begin initialization */
Marek Vasut1273dd92015-07-12 21:05:08 +0200130 writel(0, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500131
132 /* USER calibration status all set to zero */
Marek Vasut1273dd92015-07-12 21:05:08 +0200133 writel(0, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500134
Marek Vasut1273dd92015-07-12 21:05:08 +0200135 writel(0, &phy_mgr_cfg->cal_debug_info);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500136
137 if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
138 param->read_correct_mask_vg = ((uint32_t)1 <<
139 (RW_MGR_MEM_DQ_PER_READ_DQS /
140 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
141 param->write_correct_mask_vg = ((uint32_t)1 <<
142 (RW_MGR_MEM_DQ_PER_READ_DQS /
143 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
144 param->read_correct_mask = ((uint32_t)1 <<
145 RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
146 param->write_correct_mask = ((uint32_t)1 <<
147 RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
148 param->dm_correct_mask = ((uint32_t)1 <<
149 (RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
150 - 1;
151 }
152}
153
154static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
155{
156 uint32_t odt_mask_0 = 0;
157 uint32_t odt_mask_1 = 0;
158 uint32_t cs_and_odt_mask;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500159
160 if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
161 if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
162 /*
163 * 1 Rank
164 * Read: ODT = 0
165 * Write: ODT = 1
166 */
167 odt_mask_0 = 0x0;
168 odt_mask_1 = 0x1;
169 } else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
170 /* 2 Ranks */
171 if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
172 /* - Dual-Slot , Single-Rank
173 * (1 chip-select per DIMM)
174 * OR
175 * - RDIMM, 4 total CS (2 CS per DIMM)
176 * means 2 DIMM
177 * Since MEM_NUMBER_OF_RANKS is 2 they are
178 * both single rank
179 * with 2 CS each (special for RDIMM)
180 * Read: Turn on ODT on the opposite rank
181 * Write: Turn on ODT on all ranks
182 */
183 odt_mask_0 = 0x3 & ~(1 << rank);
184 odt_mask_1 = 0x3;
185 } else {
186 /*
187 * USER - Single-Slot , Dual-rank DIMMs
188 * (2 chip-selects per DIMM)
189 * USER Read: Turn on ODT off on all ranks
190 * USER Write: Turn on ODT on active rank
191 */
192 odt_mask_0 = 0x0;
193 odt_mask_1 = 0x3 & (1 << rank);
194 }
Marek Vasut963bca62015-07-18 02:23:29 +0200195 } else {
Dinh Nguyen3da42852015-06-02 22:52:49 -0500196 /* 4 Ranks
197 * Read:
198 * ----------+-----------------------+
199 * | |
200 * | ODT |
201 * Read From +-----------------------+
202 * Rank | 3 | 2 | 1 | 0 |
203 * ----------+-----+-----+-----+-----+
204 * 0 | 0 | 1 | 0 | 0 |
205 * 1 | 1 | 0 | 0 | 0 |
206 * 2 | 0 | 0 | 0 | 1 |
207 * 3 | 0 | 0 | 1 | 0 |
208 * ----------+-----+-----+-----+-----+
209 *
210 * Write:
211 * ----------+-----------------------+
212 * | |
213 * | ODT |
214 * Write To +-----------------------+
215 * Rank | 3 | 2 | 1 | 0 |
216 * ----------+-----+-----+-----+-----+
217 * 0 | 0 | 1 | 0 | 1 |
218 * 1 | 1 | 0 | 1 | 0 |
219 * 2 | 0 | 1 | 0 | 1 |
220 * 3 | 1 | 0 | 1 | 0 |
221 * ----------+-----+-----+-----+-----+
222 */
223 switch (rank) {
224 case 0:
225 odt_mask_0 = 0x4;
226 odt_mask_1 = 0x5;
227 break;
228 case 1:
229 odt_mask_0 = 0x8;
230 odt_mask_1 = 0xA;
231 break;
232 case 2:
233 odt_mask_0 = 0x1;
234 odt_mask_1 = 0x5;
235 break;
236 case 3:
237 odt_mask_0 = 0x2;
238 odt_mask_1 = 0xA;
239 break;
240 }
241 }
242 } else {
243 odt_mask_0 = 0x0;
244 odt_mask_1 = 0x0;
245 }
246
247 cs_and_odt_mask =
248 (0xFF & ~(1 << rank)) |
249 ((0xFF & odt_mask_0) << 8) |
250 ((0xFF & odt_mask_1) << 16);
Marek Vasut1273dd92015-07-12 21:05:08 +0200251 writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
252 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500253}
254
255static void scc_mgr_initialize(void)
256{
Marek Vasutc4815f72015-07-12 19:03:33 +0200257 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_RFILE_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500258
259 /*
260 * Clear register file for HPS
261 * 16 (2^4) is the size of the full register file in the scc mgr:
262 * RFILE_DEPTH = log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
263 * MEM_IF_READ_DQS_WIDTH - 1) + 1;
264 */
265 uint32_t i;
266 for (i = 0; i < 16; i++) {
Marek Vasut7ac40d22015-06-26 18:56:54 +0200267 debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
Dinh Nguyen3da42852015-06-02 22:52:49 -0500268 __func__, __LINE__, i);
Marek Vasut17fdc912015-07-12 20:05:54 +0200269 writel(0, addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500270 }
271}
272
273static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group,
274 uint32_t delay)
275{
Marek Vasutc4815f72015-07-12 19:03:33 +0200276 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500277
278 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200279 writel(delay, addr + (read_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500280}
281
282static void scc_mgr_set_dqs_io_in_delay(uint32_t write_group,
283 uint32_t delay)
284{
Marek Vasutc4815f72015-07-12 19:03:33 +0200285 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500286
Marek Vasut17fdc912015-07-12 20:05:54 +0200287 writel(delay, addr + (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500288}
289
290static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
291{
Marek Vasutc4815f72015-07-12 19:03:33 +0200292 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_EN_PHASE_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500293
294 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200295 writel(phase, addr + (read_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500296}
297
298static void scc_mgr_set_dqs_en_phase_all_ranks(uint32_t read_group,
299 uint32_t phase)
300{
301 uint32_t r;
302 uint32_t update_scan_chains;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500303
304 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
305 r += NUM_RANKS_PER_SHADOW_REG) {
306 /*
307 * USER although the h/w doesn't support different phases per
308 * shadow register, for simplicity our scc manager modeling
309 * keeps different phase settings per shadow reg, and it's
310 * important for us to keep them in sync to match h/w.
311 * for efficiency, the scan chain update should occur only
312 * once to sr0.
313 */
314 update_scan_chains = (r == 0) ? 1 : 0;
315
316 scc_mgr_set_dqs_en_phase(read_group, phase);
317
318 if (update_scan_chains) {
Marek Vasut1273dd92015-07-12 21:05:08 +0200319 writel(read_group, &sdr_scc_mgr->dqs_ena);
320 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500321 }
322 }
323}
324
325static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group,
326 uint32_t phase)
327{
Marek Vasutc4815f72015-07-12 19:03:33 +0200328 u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQDQS_OUT_PHASE_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500329
330 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200331 writel(phase, addr + (write_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500332}
333
334static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
335 uint32_t phase)
336{
337 uint32_t r;
338 uint32_t update_scan_chains;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500339
340 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
341 r += NUM_RANKS_PER_SHADOW_REG) {
342 /*
343 * USER although the h/w doesn't support different phases per
344 * shadow register, for simplicity our scc manager modeling
345 * keeps different phase settings per shadow reg, and it's
346 * important for us to keep them in sync to match h/w.
347 * for efficiency, the scan chain update should occur only
348 * once to sr0.
349 */
350 update_scan_chains = (r == 0) ? 1 : 0;
351
352 scc_mgr_set_dqdqs_output_phase(write_group, phase);
353
354 if (update_scan_chains) {
Marek Vasut1273dd92015-07-12 21:05:08 +0200355 writel(write_group, &sdr_scc_mgr->dqs_ena);
356 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500357 }
358 }
359}
360
361static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
362{
Marek Vasutc4815f72015-07-12 19:03:33 +0200363 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_EN_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500364
365 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200366 writel(delay + IO_DQS_EN_DELAY_OFFSET, addr +
Dinh Nguyen3da42852015-06-02 22:52:49 -0500367 (read_group << 2));
368}
369
370static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
371 uint32_t delay)
372{
373 uint32_t r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500374
375 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
376 r += NUM_RANKS_PER_SHADOW_REG) {
377 scc_mgr_set_dqs_en_delay(read_group, delay);
378
Marek Vasut1273dd92015-07-12 21:05:08 +0200379 writel(read_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500380 /*
381 * In shadow register mode, the T11 settings are stored in
382 * registers in the core, which are updated by the DQS_ENA
383 * signals. Not issuing the SCC_MGR_UPD command allows us to
384 * save lots of rank switching overhead, by calling
385 * select_shadow_regs_for_update with update_scan_chains
386 * set to 0.
387 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200388 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500389 }
390 /*
391 * In shadow register mode, the T11 settings are stored in
392 * registers in the core, which are updated by the DQS_ENA
393 * signals. Not issuing the SCC_MGR_UPD command allows us to
394 * save lots of rank switching overhead, by calling
395 * select_shadow_regs_for_update with update_scan_chains
396 * set to 0.
397 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200398 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500399}
400
401static void scc_mgr_set_oct_out1_delay(uint32_t write_group, uint32_t delay)
402{
403 uint32_t read_group;
Marek Vasutc4815f72015-07-12 19:03:33 +0200404 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_OCT_OUT1_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500405
406 /*
407 * Load the setting in the SCC manager
408 * Although OCT affects only write data, the OCT delay is controlled
409 * by the DQS logic block which is instantiated once per read group.
410 * For protocols where a write group consists of multiple read groups,
411 * the setting must be set multiple times.
412 */
413 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
414 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
415 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
416 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
Marek Vasut17fdc912015-07-12 20:05:54 +0200417 writel(delay, addr + (read_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500418}
419
Marek Vasut07aee5b2015-07-12 22:07:33 +0200420static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500421{
Marek Vasutc4815f72015-07-12 19:03:33 +0200422 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500423
424 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200425 writel(delay, addr + (dq_in_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500426}
427
Marek Vasut07aee5b2015-07-12 22:07:33 +0200428static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500429{
Marek Vasutc4815f72015-07-12 19:03:33 +0200430 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500431
432 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200433 writel(delay, addr + (dq_in_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500434}
435
436static void scc_mgr_set_hhp_extras(void)
437{
438 /*
439 * Load the fixed setting in the SCC manager
440 * bits: 0:0 = 1'b1 - dqs bypass
441 * bits: 1:1 = 1'b1 - dq bypass
442 * bits: 4:2 = 3'b001 - rfifo_mode
443 * bits: 6:5 = 2'b01 - rfifo clock_select
444 * bits: 7:7 = 1'b0 - separate gating from ungating setting
445 * bits: 8:8 = 1'b0 - separate OE from Output delay setting
446 */
447 uint32_t value = (0<<8) | (0<<7) | (1<<5) | (1<<2) | (1<<1) | (1<<0);
Marek Vasutc4815f72015-07-12 19:03:33 +0200448 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_HHP_GLOBALS_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500449
Marek Vasut17fdc912015-07-12 20:05:54 +0200450 writel(value, addr + SCC_MGR_HHP_EXTRAS_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500451}
452
453static void scc_mgr_set_dqs_out1_delay(uint32_t write_group,
454 uint32_t delay)
455{
Marek Vasutc4815f72015-07-12 19:03:33 +0200456 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500457
458 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200459 writel(delay, addr + (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -0500460}
461
Marek Vasut07aee5b2015-07-12 22:07:33 +0200462static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
Dinh Nguyen3da42852015-06-02 22:52:49 -0500463{
Marek Vasutc4815f72015-07-12 19:03:33 +0200464 uint32_t addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500465
466 /* Load the setting in the SCC manager */
Marek Vasut17fdc912015-07-12 20:05:54 +0200467 writel(delay, addr +
Dinh Nguyen3da42852015-06-02 22:52:49 -0500468 ((RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm) << 2));
469}
470
471/*
472 * USER Zero all DQS config
473 * TODO: maybe rename to scc_mgr_zero_dqs_config (or something)
474 */
475static void scc_mgr_zero_all(void)
476{
477 uint32_t i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500478
479 /*
480 * USER Zero all DQS config settings, across all groups and all
481 * shadow registers
482 */
483 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
484 NUM_RANKS_PER_SHADOW_REG) {
485 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
486 /*
487 * The phases actually don't exist on a per-rank basis,
488 * but there's no harm updating them several times, so
489 * let's keep the code simple.
490 */
491 scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
492 scc_mgr_set_dqs_en_phase(i, 0);
493 scc_mgr_set_dqs_en_delay(i, 0);
494 }
495
496 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
497 scc_mgr_set_dqdqs_output_phase(i, 0);
498 /* av/cv don't have out2 */
499 scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
500 }
501 }
502
503 /* multicast to all DQS group enables */
Marek Vasut1273dd92015-07-12 21:05:08 +0200504 writel(0xff, &sdr_scc_mgr->dqs_ena);
505 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500506}
507
508static void scc_set_bypass_mode(uint32_t write_group, uint32_t mode)
509{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500510 /* mode = 0 : Do NOT bypass - Half Rate Mode */
511 /* mode = 1 : Bypass - Full Rate Mode */
512
513 /* only need to set once for all groups, pins, dq, dqs, dm */
514 if (write_group == 0) {
515 debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n", __func__,
516 __LINE__);
517 scc_mgr_set_hhp_extras();
518 debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
519 __func__, __LINE__);
520 }
521 /* multicast to all DQ enables */
Marek Vasut1273dd92015-07-12 21:05:08 +0200522 writel(0xff, &sdr_scc_mgr->dq_ena);
523 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500524
525 /* update current DQS IO enable */
Marek Vasut1273dd92015-07-12 21:05:08 +0200526 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500527
528 /* update the DQS logic */
Marek Vasut1273dd92015-07-12 21:05:08 +0200529 writel(write_group, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500530
531 /* hit update */
Marek Vasut1273dd92015-07-12 21:05:08 +0200532 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500533}
534
535static void scc_mgr_zero_group(uint32_t write_group, uint32_t test_begin,
536 int32_t out_only)
537{
538 uint32_t i, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500539
540 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r +=
541 NUM_RANKS_PER_SHADOW_REG) {
542 /* Zero all DQ config settings */
543 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200544 scc_mgr_set_dq_out1_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500545 if (!out_only)
Marek Vasut07aee5b2015-07-12 22:07:33 +0200546 scc_mgr_set_dq_in_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500547 }
548
549 /* multicast to all DQ enables */
Marek Vasut1273dd92015-07-12 21:05:08 +0200550 writel(0xff, &sdr_scc_mgr->dq_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500551
552 /* Zero all DM config settings */
553 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200554 scc_mgr_set_dm_out1_delay(i, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500555 }
556
557 /* multicast to all DM enables */
Marek Vasut1273dd92015-07-12 21:05:08 +0200558 writel(0xff, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500559
560 /* zero all DQS io settings */
561 if (!out_only)
562 scc_mgr_set_dqs_io_in_delay(write_group, 0);
563 /* av/cv don't have out2 */
564 scc_mgr_set_dqs_out1_delay(write_group, IO_DQS_OUT_RESERVE);
565 scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
566 scc_mgr_load_dqs_for_write_group(write_group);
567
568 /* multicast to all DQS IO enables (only 1) */
Marek Vasut1273dd92015-07-12 21:05:08 +0200569 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500570
571 /* hit update to zero everything */
Marek Vasut1273dd92015-07-12 21:05:08 +0200572 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500573 }
574}
575
576/* load up dqs config settings */
577static void scc_mgr_load_dqs(uint32_t dqs)
578{
Marek Vasut1273dd92015-07-12 21:05:08 +0200579 writel(dqs, &sdr_scc_mgr->dqs_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500580}
581
582static void scc_mgr_load_dqs_for_write_group(uint32_t write_group)
583{
584 uint32_t read_group;
Marek Vasute79025a2015-07-12 18:42:34 +0200585 uint32_t addr = (u32)&sdr_scc_mgr->dqs_ena;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500586 /*
587 * Although OCT affects only write data, the OCT delay is controlled
588 * by the DQS logic block which is instantiated once per read group.
589 * For protocols where a write group consists of multiple read groups,
590 * the setting must be scanned multiple times.
591 */
592 for (read_group = write_group * RW_MGR_MEM_IF_READ_DQS_WIDTH /
593 RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
594 read_group < (write_group + 1) * RW_MGR_MEM_IF_READ_DQS_WIDTH /
595 RW_MGR_MEM_IF_WRITE_DQS_WIDTH; ++read_group)
Marek Vasut17fdc912015-07-12 20:05:54 +0200596 writel(read_group, addr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500597}
598
599/* load up dqs io config settings */
600static void scc_mgr_load_dqs_io(void)
601{
Marek Vasut1273dd92015-07-12 21:05:08 +0200602 writel(0, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500603}
604
605/* load up dq config settings */
606static void scc_mgr_load_dq(uint32_t dq_in_group)
607{
Marek Vasut1273dd92015-07-12 21:05:08 +0200608 writel(dq_in_group, &sdr_scc_mgr->dq_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500609}
610
611/* load up dm config settings */
612static void scc_mgr_load_dm(uint32_t dm)
613{
Marek Vasut1273dd92015-07-12 21:05:08 +0200614 writel(dm, &sdr_scc_mgr->dm_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500615}
616
617/*
618 * apply and load a particular input delay for the DQ pins in a group
619 * group_bgn is the index of the first dq pin (in the write group)
620 */
621static void scc_mgr_apply_group_dq_in_delay(uint32_t write_group,
622 uint32_t group_bgn, uint32_t delay)
623{
624 uint32_t i, p;
625
626 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200627 scc_mgr_set_dq_in_delay(p, delay);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500628 scc_mgr_load_dq(p);
629 }
630}
631
632/* apply and load a particular output delay for the DQ pins in a group */
633static void scc_mgr_apply_group_dq_out1_delay(uint32_t write_group,
634 uint32_t group_bgn,
635 uint32_t delay1)
636{
637 uint32_t i, p;
638
639 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200640 scc_mgr_set_dq_out1_delay(i, delay1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500641 scc_mgr_load_dq(i);
642 }
643}
644
645/* apply and load a particular output delay for the DM pins in a group */
646static void scc_mgr_apply_group_dm_out1_delay(uint32_t write_group,
647 uint32_t delay1)
648{
649 uint32_t i;
650
651 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +0200652 scc_mgr_set_dm_out1_delay(i, delay1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500653 scc_mgr_load_dm(i);
654 }
655}
656
657
658/* apply and load delay on both DQS and OCT out1 */
659static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
660 uint32_t delay)
661{
662 scc_mgr_set_dqs_out1_delay(write_group, delay);
663 scc_mgr_load_dqs_io();
664
665 scc_mgr_set_oct_out1_delay(write_group, delay);
666 scc_mgr_load_dqs_for_write_group(write_group);
667}
668
669/* apply a delay to the entire output side: DQ, DM, DQS, OCT */
670static void scc_mgr_apply_group_all_out_delay_add(uint32_t write_group,
671 uint32_t group_bgn,
672 uint32_t delay)
673{
674 uint32_t i, p, new_delay;
675
676 /* dq shift */
677 for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
678 new_delay = READ_SCC_DQ_OUT2_DELAY;
679 new_delay += delay;
680
681 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
682 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQ[%u,%u]:\
683 %u > %lu => %lu", __func__, __LINE__,
684 write_group, group_bgn, delay, i, p, new_delay,
685 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
686 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
687 new_delay = IO_IO_OUT2_DELAY_MAX;
688 }
689
690 scc_mgr_load_dq(i);
691 }
692
693 /* dm shift */
694 for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
695 new_delay = READ_SCC_DM_IO_OUT2_DELAY;
696 new_delay += delay;
697
698 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
699 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DM[%u]:\
700 %u > %lu => %lu\n", __func__, __LINE__,
701 write_group, group_bgn, delay, i, new_delay,
702 (long unsigned int)IO_IO_OUT2_DELAY_MAX,
703 (long unsigned int)IO_IO_OUT2_DELAY_MAX);
704 new_delay = IO_IO_OUT2_DELAY_MAX;
705 }
706
707 scc_mgr_load_dm(i);
708 }
709
710 /* dqs shift */
711 new_delay = READ_SCC_DQS_IO_OUT2_DELAY;
712 new_delay += delay;
713
714 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
715 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
716 " adding %u to OUT1\n", __func__, __LINE__,
717 write_group, group_bgn, delay, new_delay,
718 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
719 new_delay - IO_IO_OUT2_DELAY_MAX);
720 scc_mgr_set_dqs_out1_delay(write_group, new_delay -
721 IO_IO_OUT2_DELAY_MAX);
722 new_delay = IO_IO_OUT2_DELAY_MAX;
723 }
724
725 scc_mgr_load_dqs_io();
726
727 /* oct shift */
728 new_delay = READ_SCC_OCT_OUT2_DELAY;
729 new_delay += delay;
730
731 if (new_delay > IO_IO_OUT2_DELAY_MAX) {
732 debug_cond(DLEVEL == 1, "%s:%d (%u, %u, %u) DQS: %u > %d => %d;"
733 " adding %u to OUT1\n", __func__, __LINE__,
734 write_group, group_bgn, delay, new_delay,
735 IO_IO_OUT2_DELAY_MAX, IO_IO_OUT2_DELAY_MAX,
736 new_delay - IO_IO_OUT2_DELAY_MAX);
737 scc_mgr_set_oct_out1_delay(write_group, new_delay -
738 IO_IO_OUT2_DELAY_MAX);
739 new_delay = IO_IO_OUT2_DELAY_MAX;
740 }
741
742 scc_mgr_load_dqs_for_write_group(write_group);
743}
744
745/*
746 * USER apply a delay to the entire output side (DQ, DM, DQS, OCT)
747 * and to all ranks
748 */
749static void scc_mgr_apply_group_all_out_delay_add_all_ranks(
750 uint32_t write_group, uint32_t group_bgn, uint32_t delay)
751{
752 uint32_t r;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500753
754 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
755 r += NUM_RANKS_PER_SHADOW_REG) {
756 scc_mgr_apply_group_all_out_delay_add(write_group,
757 group_bgn, delay);
Marek Vasut1273dd92015-07-12 21:05:08 +0200758 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500759 }
760}
761
762/* optimization used to recover some slots in ddr3 inst_rom */
763/* could be applied to other protocols if we wanted to */
764static void set_jump_as_return(void)
765{
Dinh Nguyen3da42852015-06-02 22:52:49 -0500766 /*
767 * to save space, we replace return with jump to special shared
768 * RETURN instruction so we set the counter to large value so that
769 * we always jump
770 */
Marek Vasut1273dd92015-07-12 21:05:08 +0200771 writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
772 writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500773}
774
775/*
776 * should always use constants as argument to ensure all computations are
777 * performed at compile time
778 */
779static void delay_for_n_mem_clocks(const uint32_t clocks)
780{
781 uint32_t afi_clocks;
782 uint8_t inner = 0;
783 uint8_t outer = 0;
784 uint16_t c_loop = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500785
786 debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
787
788
789 afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
790 /* scale (rounding up) to get afi clocks */
791
792 /*
793 * Note, we don't bother accounting for being off a little bit
794 * because of a few extra instructions in outer loops
795 * Note, the loops have a test at the end, and do the test before
796 * the decrement, and so always perform the loop
797 * 1 time more than the counter value
798 */
799 if (afi_clocks == 0) {
800 ;
801 } else if (afi_clocks <= 0x100) {
802 inner = afi_clocks-1;
803 outer = 0;
804 c_loop = 0;
805 } else if (afi_clocks <= 0x10000) {
806 inner = 0xff;
807 outer = (afi_clocks-1) >> 8;
808 c_loop = 0;
809 } else {
810 inner = 0xff;
811 outer = 0xff;
812 c_loop = (afi_clocks-1) >> 16;
813 }
814
815 /*
816 * rom instructions are structured as follows:
817 *
818 * IDLE_LOOP2: jnz cntr0, TARGET_A
819 * IDLE_LOOP1: jnz cntr1, TARGET_B
820 * return
821 *
822 * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
823 * TARGET_B is set to IDLE_LOOP2 as well
824 *
825 * if we have no outer loop, though, then we can use IDLE_LOOP1 only,
826 * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
827 *
828 * a little confusing, but it helps save precious space in the inst_rom
829 * and sequencer rom and keeps the delays more accurate and reduces
830 * overhead
831 */
832 if (afi_clocks <= 0x100) {
Marek Vasut1273dd92015-07-12 21:05:08 +0200833 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
834 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500835
Marek Vasut1273dd92015-07-12 21:05:08 +0200836 writel(RW_MGR_IDLE_LOOP1,
837 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500838
Marek Vasut1273dd92015-07-12 21:05:08 +0200839 writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
840 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500841 } else {
Marek Vasut1273dd92015-07-12 21:05:08 +0200842 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
843 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500844
Marek Vasut1273dd92015-07-12 21:05:08 +0200845 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
846 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500847
Marek Vasut1273dd92015-07-12 21:05:08 +0200848 writel(RW_MGR_IDLE_LOOP2,
849 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500850
Marek Vasut1273dd92015-07-12 21:05:08 +0200851 writel(RW_MGR_IDLE_LOOP2,
852 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500853
854 /* hack to get around compiler not being smart enough */
855 if (afi_clocks <= 0x10000) {
856 /* only need to run once */
Marek Vasut1273dd92015-07-12 21:05:08 +0200857 writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
858 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500859 } else {
860 do {
Marek Vasut1273dd92015-07-12 21:05:08 +0200861 writel(RW_MGR_IDLE_LOOP2,
862 SDR_PHYGRP_RWMGRGRP_ADDRESS |
863 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500864 } while (c_loop-- != 0);
865 }
866 }
867 debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
868}
869
870static void rw_mgr_mem_initialize(void)
871{
872 uint32_t r;
Marek Vasut1273dd92015-07-12 21:05:08 +0200873 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
874 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -0500875
876 debug("%s:%d\n", __func__, __LINE__);
877
878 /* The reset / cke part of initialization is broadcasted to all ranks */
Marek Vasut1273dd92015-07-12 21:05:08 +0200879 writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
880 RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500881
882 /*
883 * Here's how you load register for a loop
884 * Counters are located @ 0x800
885 * Jump address are located @ 0xC00
886 * For both, registers 0 to 3 are selected using bits 3 and 2, like
887 * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
888 * I know this ain't pretty, but Avalon bus throws away the 2 least
889 * significant bits
890 */
891
892 /* start with memory RESET activated */
893
894 /* tINIT = 200us */
895
896 /*
897 * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
898 * If a and b are the number of iteration in 2 nested loops
899 * it takes the following number of cycles to complete the operation:
900 * number_of_cycles = ((2 + n) * a + 2) * b
901 * where n is the number of instruction in the inner loop
902 * One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
903 * b = 6A
904 */
905
906 /* Load counters */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500907 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200908 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500909 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200910 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500911 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200912 &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500913
914 /* Load jump address */
Marek Vasut1273dd92015-07-12 21:05:08 +0200915 writel(RW_MGR_INIT_RESET_0_CKE_0,
916 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
917 writel(RW_MGR_INIT_RESET_0_CKE_0,
918 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
919 writel(RW_MGR_INIT_RESET_0_CKE_0,
920 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500921
922 /* Execute count instruction */
Marek Vasut1273dd92015-07-12 21:05:08 +0200923 writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500924
925 /* indicate that memory is stable */
Marek Vasut1273dd92015-07-12 21:05:08 +0200926 writel(1, &phy_mgr_cfg->reset_mem_stbl);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500927
928 /*
929 * transition the RESET to high
930 * Wait for 500us
931 */
932
933 /*
934 * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
935 * If a and b are the number of iteration in 2 nested loops
936 * it takes the following number of cycles to complete the operation
937 * number_of_cycles = ((2 + n) * a + 2) * b
938 * where n is the number of instruction in the inner loop
939 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
940 * b = FF
941 */
942
943 /* Load counters */
Dinh Nguyen3da42852015-06-02 22:52:49 -0500944 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200945 &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500946 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200947 &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500948 writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
Marek Vasut1273dd92015-07-12 21:05:08 +0200949 &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500950
951 /* Load jump address */
Marek Vasut1273dd92015-07-12 21:05:08 +0200952 writel(RW_MGR_INIT_RESET_1_CKE_0,
953 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
954 writel(RW_MGR_INIT_RESET_1_CKE_0,
955 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
956 writel(RW_MGR_INIT_RESET_1_CKE_0,
957 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500958
Marek Vasut1273dd92015-07-12 21:05:08 +0200959 writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500960
961 /* bring up clock enable */
962
963 /* tXRP < 250 ck cycles */
964 delay_for_n_mem_clocks(250);
965
966 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
967 if (param->skip_ranks[r]) {
968 /* request to skip the rank */
969 continue;
970 }
971
972 /* set rank */
973 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
974
975 /*
976 * USER Use Mirror-ed commands for odd ranks if address
977 * mirrorring is on
978 */
979 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
980 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200981 writel(RW_MGR_MRS2_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500982 delay_for_n_mem_clocks(4);
983 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200984 writel(RW_MGR_MRS3_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500985 delay_for_n_mem_clocks(4);
986 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200987 writel(RW_MGR_MRS1_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500988 delay_for_n_mem_clocks(4);
989 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200990 writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500991 } else {
992 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200993 writel(RW_MGR_MRS2, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500994 delay_for_n_mem_clocks(4);
995 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200996 writel(RW_MGR_MRS3, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -0500997 delay_for_n_mem_clocks(4);
998 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +0200999 writel(RW_MGR_MRS1, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001000 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001001 writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001002 }
1003 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001004 writel(RW_MGR_ZQCL, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001005
1006 /* tZQinit = tDLLK = 512 ck cycles */
1007 delay_for_n_mem_clocks(512);
1008 }
1009}
1010
1011/*
1012 * At the end of calibration we have to program the user settings in, and
1013 * USER hand off the memory to the user.
1014 */
1015static void rw_mgr_mem_handoff(void)
1016{
1017 uint32_t r;
Marek Vasut1273dd92015-07-12 21:05:08 +02001018 uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
1019 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001020
1021 debug("%s:%d\n", __func__, __LINE__);
1022 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
1023 if (param->skip_ranks[r])
1024 /* request to skip the rank */
1025 continue;
1026 /* set rank */
1027 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
1028
1029 /* precharge all banks ... */
Marek Vasut1273dd92015-07-12 21:05:08 +02001030 writel(RW_MGR_PRECHARGE_ALL, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001031
1032 /* load up MR settings specified by user */
1033
1034 /*
1035 * Use Mirror-ed commands for odd ranks if address
1036 * mirrorring is on
1037 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05001038 if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
1039 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001040 writel(RW_MGR_MRS2_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001041 delay_for_n_mem_clocks(4);
1042 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001043 writel(RW_MGR_MRS3_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001044 delay_for_n_mem_clocks(4);
1045 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001046 writel(RW_MGR_MRS1_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001047 delay_for_n_mem_clocks(4);
1048 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001049 writel(RW_MGR_MRS0_USER_MIRR, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001050 } else {
1051 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001052 writel(RW_MGR_MRS2, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001053 delay_for_n_mem_clocks(4);
1054 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001055 writel(RW_MGR_MRS3, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001056 delay_for_n_mem_clocks(4);
1057 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001058 writel(RW_MGR_MRS1, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001059 delay_for_n_mem_clocks(4);
1060 set_jump_as_return();
Marek Vasut1273dd92015-07-12 21:05:08 +02001061 writel(RW_MGR_MRS0_USER, grpaddr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001062 }
1063 /*
1064 * USER need to wait tMOD (12CK or 15ns) time before issuing
1065 * other commands, but we will have plenty of NIOS cycles before
1066 * actual handoff so its okay.
1067 */
1068 }
1069}
1070
1071/*
1072 * performs a guaranteed read on the patterns we are going to use during a
1073 * read test to ensure memory works
1074 */
1075static uint32_t rw_mgr_mem_calibrate_read_test_patterns(uint32_t rank_bgn,
1076 uint32_t group, uint32_t num_tries, uint32_t *bit_chk,
1077 uint32_t all_ranks)
1078{
1079 uint32_t r, vg;
1080 uint32_t correct_mask_vg;
1081 uint32_t tmp_bit_chk;
1082 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1083 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1084 uint32_t addr;
1085 uint32_t base_rw_mgr;
1086
1087 *bit_chk = param->read_correct_mask;
1088 correct_mask_vg = param->read_correct_mask_vg;
1089
1090 for (r = rank_bgn; r < rank_end; r++) {
1091 if (param->skip_ranks[r])
1092 /* request to skip the rank */
1093 continue;
1094
1095 /* set rank */
1096 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1097
1098 /* Load up a constant bursts of read commands */
Marek Vasut1273dd92015-07-12 21:05:08 +02001099 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
1100 writel(RW_MGR_GUARANTEED_READ,
1101 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001102
Marek Vasut1273dd92015-07-12 21:05:08 +02001103 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
1104 writel(RW_MGR_GUARANTEED_READ_CONT,
1105 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001106
1107 tmp_bit_chk = 0;
1108 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1109 /* reset the fifos to get pointers to known state */
1110
Marek Vasut1273dd92015-07-12 21:05:08 +02001111 writel(0, &phy_mgr_cmd->fifo_reset);
1112 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1113 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001114
1115 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1116 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1117
Marek Vasutc4815f72015-07-12 19:03:33 +02001118 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02001119 writel(RW_MGR_GUARANTEED_READ, addr +
Dinh Nguyen3da42852015-06-02 22:52:49 -05001120 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1121 vg) << 2));
1122
Marek Vasut1273dd92015-07-12 21:05:08 +02001123 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001124 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & (~base_rw_mgr));
1125
1126 if (vg == 0)
1127 break;
1128 }
1129 *bit_chk &= tmp_bit_chk;
1130 }
1131
Marek Vasutc4815f72015-07-12 19:03:33 +02001132 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02001133 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001134
1135 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1136 debug_cond(DLEVEL == 1, "%s:%d test_load_patterns(%u,ALL) => (%u == %u) =>\
1137 %lu\n", __func__, __LINE__, group, *bit_chk, param->read_correct_mask,
1138 (long unsigned int)(*bit_chk == param->read_correct_mask));
1139 return *bit_chk == param->read_correct_mask;
1140}
1141
1142static uint32_t rw_mgr_mem_calibrate_read_test_patterns_all_ranks
1143 (uint32_t group, uint32_t num_tries, uint32_t *bit_chk)
1144{
1145 return rw_mgr_mem_calibrate_read_test_patterns(0, group,
1146 num_tries, bit_chk, 1);
1147}
1148
1149/* load up the patterns we are going to use during a read test */
1150static void rw_mgr_mem_calibrate_read_load_patterns(uint32_t rank_bgn,
1151 uint32_t all_ranks)
1152{
1153 uint32_t r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001154 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1155 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1156
1157 debug("%s:%d\n", __func__, __LINE__);
1158 for (r = rank_bgn; r < rank_end; r++) {
1159 if (param->skip_ranks[r])
1160 /* request to skip the rank */
1161 continue;
1162
1163 /* set rank */
1164 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1165
1166 /* Load up a constant bursts */
Marek Vasut1273dd92015-07-12 21:05:08 +02001167 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001168
Marek Vasut1273dd92015-07-12 21:05:08 +02001169 writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
1170 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001171
Marek Vasut1273dd92015-07-12 21:05:08 +02001172 writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001173
Marek Vasut1273dd92015-07-12 21:05:08 +02001174 writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
1175 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001176
Marek Vasut1273dd92015-07-12 21:05:08 +02001177 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001178
Marek Vasut1273dd92015-07-12 21:05:08 +02001179 writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
1180 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001181
Marek Vasut1273dd92015-07-12 21:05:08 +02001182 writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001183
Marek Vasut1273dd92015-07-12 21:05:08 +02001184 writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
1185 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001186
Marek Vasut1273dd92015-07-12 21:05:08 +02001187 writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1188 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001189 }
1190
1191 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1192}
1193
1194/*
1195 * try a read and see if it returns correct data back. has dummy reads
1196 * inserted into the mix used to align dqs enable. has more thorough checks
1197 * than the regular read test.
1198 */
1199static uint32_t rw_mgr_mem_calibrate_read_test(uint32_t rank_bgn, uint32_t group,
1200 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1201 uint32_t all_groups, uint32_t all_ranks)
1202{
1203 uint32_t r, vg;
1204 uint32_t correct_mask_vg;
1205 uint32_t tmp_bit_chk;
1206 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
1207 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
1208 uint32_t addr;
1209 uint32_t base_rw_mgr;
1210
1211 *bit_chk = param->read_correct_mask;
1212 correct_mask_vg = param->read_correct_mask_vg;
1213
1214 uint32_t quick_read_mode = (((STATIC_CALIB_STEPS) &
1215 CALIB_SKIP_DELAY_SWEEPS) && ENABLE_SUPER_QUICK_CALIBRATION);
1216
1217 for (r = rank_bgn; r < rank_end; r++) {
1218 if (param->skip_ranks[r])
1219 /* request to skip the rank */
1220 continue;
1221
1222 /* set rank */
1223 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
1224
Marek Vasut1273dd92015-07-12 21:05:08 +02001225 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001226
Marek Vasut1273dd92015-07-12 21:05:08 +02001227 writel(RW_MGR_READ_B2B_WAIT1,
1228 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001229
Marek Vasut1273dd92015-07-12 21:05:08 +02001230 writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
1231 writel(RW_MGR_READ_B2B_WAIT2,
1232 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001233
Dinh Nguyen3da42852015-06-02 22:52:49 -05001234 if (quick_read_mode)
Marek Vasut1273dd92015-07-12 21:05:08 +02001235 writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001236 /* need at least two (1+1) reads to capture failures */
1237 else if (all_groups)
Marek Vasut1273dd92015-07-12 21:05:08 +02001238 writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001239 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001240 writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001241
Marek Vasut1273dd92015-07-12 21:05:08 +02001242 writel(RW_MGR_READ_B2B,
1243 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001244 if (all_groups)
1245 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
1246 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
Marek Vasut1273dd92015-07-12 21:05:08 +02001247 &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001248 else
Marek Vasut1273dd92015-07-12 21:05:08 +02001249 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001250
Marek Vasut1273dd92015-07-12 21:05:08 +02001251 writel(RW_MGR_READ_B2B,
1252 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001253
1254 tmp_bit_chk = 0;
1255 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS-1; ; vg--) {
1256 /* reset the fifos to get pointers to known state */
Marek Vasut1273dd92015-07-12 21:05:08 +02001257 writel(0, &phy_mgr_cmd->fifo_reset);
1258 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
1259 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001260
1261 tmp_bit_chk = tmp_bit_chk << (RW_MGR_MEM_DQ_PER_READ_DQS
1262 / RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS);
1263
Marek Vasutc4815f72015-07-12 19:03:33 +02001264 if (all_groups)
1265 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_ALL_GROUPS_OFFSET;
1266 else
1267 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
1268
Marek Vasut17fdc912015-07-12 20:05:54 +02001269 writel(RW_MGR_READ_B2B, addr +
Dinh Nguyen3da42852015-06-02 22:52:49 -05001270 ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
1271 vg) << 2));
1272
Marek Vasut1273dd92015-07-12 21:05:08 +02001273 base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001274 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
1275
1276 if (vg == 0)
1277 break;
1278 }
1279 *bit_chk &= tmp_bit_chk;
1280 }
1281
Marek Vasutc4815f72015-07-12 19:03:33 +02001282 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02001283 writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001284
1285 if (all_correct) {
1286 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1287 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ALL,%u) =>\
1288 (%u == %u) => %lu", __func__, __LINE__, group,
1289 all_groups, *bit_chk, param->read_correct_mask,
1290 (long unsigned int)(*bit_chk ==
1291 param->read_correct_mask));
1292 return *bit_chk == param->read_correct_mask;
1293 } else {
1294 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
1295 debug_cond(DLEVEL == 2, "%s:%d read_test(%u,ONE,%u) =>\
1296 (%u != %lu) => %lu\n", __func__, __LINE__,
1297 group, all_groups, *bit_chk, (long unsigned int)0,
1298 (long unsigned int)(*bit_chk != 0x00));
1299 return *bit_chk != 0x00;
1300 }
1301}
1302
1303static uint32_t rw_mgr_mem_calibrate_read_test_all_ranks(uint32_t group,
1304 uint32_t num_tries, uint32_t all_correct, uint32_t *bit_chk,
1305 uint32_t all_groups)
1306{
1307 return rw_mgr_mem_calibrate_read_test(0, group, num_tries, all_correct,
1308 bit_chk, all_groups, 1);
1309}
1310
1311static void rw_mgr_incr_vfifo(uint32_t grp, uint32_t *v)
1312{
Marek Vasut1273dd92015-07-12 21:05:08 +02001313 writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001314 (*v)++;
1315}
1316
1317static void rw_mgr_decr_vfifo(uint32_t grp, uint32_t *v)
1318{
1319 uint32_t i;
1320
1321 for (i = 0; i < VFIFO_SIZE-1; i++)
1322 rw_mgr_incr_vfifo(grp, v);
1323}
1324
1325static int find_vfifo_read(uint32_t grp, uint32_t *bit_chk)
1326{
1327 uint32_t v;
1328 uint32_t fail_cnt = 0;
1329 uint32_t test_status;
1330
1331 for (v = 0; v < VFIFO_SIZE; ) {
1332 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo %u\n",
1333 __func__, __LINE__, v);
1334 test_status = rw_mgr_mem_calibrate_read_test_all_ranks
1335 (grp, 1, PASS_ONE_BIT, bit_chk, 0);
1336 if (!test_status) {
1337 fail_cnt++;
1338
1339 if (fail_cnt == 2)
1340 break;
1341 }
1342
1343 /* fiddle with FIFO */
1344 rw_mgr_incr_vfifo(grp, &v);
1345 }
1346
1347 if (v >= VFIFO_SIZE) {
1348 /* no failing read found!! Something must have gone wrong */
1349 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: vfifo failed\n",
1350 __func__, __LINE__);
1351 return 0;
1352 } else {
1353 return v;
1354 }
1355}
1356
1357static int find_working_phase(uint32_t *grp, uint32_t *bit_chk,
1358 uint32_t dtaps_per_ptap, uint32_t *work_bgn,
1359 uint32_t *v, uint32_t *d, uint32_t *p,
1360 uint32_t *i, uint32_t *max_working_cnt)
1361{
1362 uint32_t found_begin = 0;
1363 uint32_t tmp_delay = 0;
1364 uint32_t test_status;
1365
1366 for (*d = 0; *d <= dtaps_per_ptap; (*d)++, tmp_delay +=
1367 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1368 *work_bgn = tmp_delay;
1369 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1370
1371 for (*i = 0; *i < VFIFO_SIZE; (*i)++) {
1372 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_bgn +=
1373 IO_DELAY_PER_OPA_TAP) {
1374 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1375
1376 test_status =
1377 rw_mgr_mem_calibrate_read_test_all_ranks
1378 (*grp, 1, PASS_ONE_BIT, bit_chk, 0);
1379
1380 if (test_status) {
1381 *max_working_cnt = 1;
1382 found_begin = 1;
1383 break;
1384 }
1385 }
1386
1387 if (found_begin)
1388 break;
1389
1390 if (*p > IO_DQS_EN_PHASE_MAX)
1391 /* fiddle with FIFO */
1392 rw_mgr_incr_vfifo(*grp, v);
1393 }
1394
1395 if (found_begin)
1396 break;
1397 }
1398
1399 if (*i >= VFIFO_SIZE) {
1400 /* cannot find working solution */
1401 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/\
1402 ptap/dtap\n", __func__, __LINE__);
1403 return 0;
1404 } else {
1405 return 1;
1406 }
1407}
1408
1409static void sdr_backup_phase(uint32_t *grp, uint32_t *bit_chk,
1410 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1411 uint32_t *p, uint32_t *max_working_cnt)
1412{
1413 uint32_t found_begin = 0;
1414 uint32_t tmp_delay;
1415
1416 /* Special case code for backing up a phase */
1417 if (*p == 0) {
1418 *p = IO_DQS_EN_PHASE_MAX;
1419 rw_mgr_decr_vfifo(*grp, v);
1420 } else {
1421 (*p)--;
1422 }
1423 tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
1424 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1425
1426 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn;
1427 (*d)++, tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1428 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1429
1430 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1431 PASS_ONE_BIT,
1432 bit_chk, 0)) {
1433 found_begin = 1;
1434 *work_bgn = tmp_delay;
1435 break;
1436 }
1437 }
1438
1439 /* We have found a working dtap before the ptap found above */
1440 if (found_begin == 1)
1441 (*max_working_cnt)++;
1442
1443 /*
1444 * Restore VFIFO to old state before we decremented it
1445 * (if needed).
1446 */
1447 (*p)++;
1448 if (*p > IO_DQS_EN_PHASE_MAX) {
1449 *p = 0;
1450 rw_mgr_incr_vfifo(*grp, v);
1451 }
1452
1453 scc_mgr_set_dqs_en_delay_all_ranks(*grp, 0);
1454}
1455
1456static int sdr_nonworking_phase(uint32_t *grp, uint32_t *bit_chk,
1457 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1458 uint32_t *p, uint32_t *i, uint32_t *max_working_cnt,
1459 uint32_t *work_end)
1460{
1461 uint32_t found_end = 0;
1462
1463 (*p)++;
1464 *work_end += IO_DELAY_PER_OPA_TAP;
1465 if (*p > IO_DQS_EN_PHASE_MAX) {
1466 /* fiddle with FIFO */
1467 *p = 0;
1468 rw_mgr_incr_vfifo(*grp, v);
1469 }
1470
1471 for (; *i < VFIFO_SIZE + 1; (*i)++) {
1472 for (; *p <= IO_DQS_EN_PHASE_MAX; (*p)++, *work_end
1473 += IO_DELAY_PER_OPA_TAP) {
1474 scc_mgr_set_dqs_en_phase_all_ranks(*grp, *p);
1475
1476 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1477 (*grp, 1, PASS_ONE_BIT, bit_chk, 0)) {
1478 found_end = 1;
1479 break;
1480 } else {
1481 (*max_working_cnt)++;
1482 }
1483 }
1484
1485 if (found_end)
1486 break;
1487
1488 if (*p > IO_DQS_EN_PHASE_MAX) {
1489 /* fiddle with FIFO */
1490 rw_mgr_incr_vfifo(*grp, v);
1491 *p = 0;
1492 }
1493 }
1494
1495 if (*i >= VFIFO_SIZE + 1) {
1496 /* cannot see edge of failing read */
1497 debug_cond(DLEVEL == 2, "%s:%d sdr_nonworking_phase: end:\
1498 failed\n", __func__, __LINE__);
1499 return 0;
1500 } else {
1501 return 1;
1502 }
1503}
1504
1505static int sdr_find_window_centre(uint32_t *grp, uint32_t *bit_chk,
1506 uint32_t *work_bgn, uint32_t *v, uint32_t *d,
1507 uint32_t *p, uint32_t *work_mid,
1508 uint32_t *work_end)
1509{
1510 int i;
1511 int tmp_delay = 0;
1512
1513 *work_mid = (*work_bgn + *work_end) / 2;
1514
1515 debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
1516 *work_bgn, *work_end, *work_mid);
1517 /* Get the middle delay to be less than a VFIFO delay */
1518 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX;
1519 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1520 ;
1521 debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
1522 while (*work_mid > tmp_delay)
1523 *work_mid -= tmp_delay;
1524 debug_cond(DLEVEL == 2, "new work_mid %d\n", *work_mid);
1525
1526 tmp_delay = 0;
1527 for (*p = 0; *p <= IO_DQS_EN_PHASE_MAX && tmp_delay < *work_mid;
1528 (*p)++, tmp_delay += IO_DELAY_PER_OPA_TAP)
1529 ;
1530 tmp_delay -= IO_DELAY_PER_OPA_TAP;
1531 debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", (*p) - 1, tmp_delay);
1532 for (*d = 0; *d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_mid; (*d)++,
1533 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP)
1534 ;
1535 debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", *d, tmp_delay);
1536
1537 scc_mgr_set_dqs_en_phase_all_ranks(*grp, (*p) - 1);
1538 scc_mgr_set_dqs_en_delay_all_ranks(*grp, *d);
1539
1540 /*
1541 * push vfifo until we can successfully calibrate. We can do this
1542 * because the largest possible margin in 1 VFIFO cycle.
1543 */
1544 for (i = 0; i < VFIFO_SIZE; i++) {
1545 debug_cond(DLEVEL == 2, "find_dqs_en_phase: center: vfifo=%u\n",
1546 *v);
1547 if (rw_mgr_mem_calibrate_read_test_all_ranks(*grp, 1,
1548 PASS_ONE_BIT,
1549 bit_chk, 0)) {
1550 break;
1551 }
1552
1553 /* fiddle with FIFO */
1554 rw_mgr_incr_vfifo(*grp, v);
1555 }
1556
1557 if (i >= VFIFO_SIZE) {
1558 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center: \
1559 failed\n", __func__, __LINE__);
1560 return 0;
1561 } else {
1562 return 1;
1563 }
1564}
1565
1566/* find a good dqs enable to use */
1567static uint32_t rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(uint32_t grp)
1568{
1569 uint32_t v, d, p, i;
1570 uint32_t max_working_cnt;
1571 uint32_t bit_chk;
1572 uint32_t dtaps_per_ptap;
1573 uint32_t work_bgn, work_mid, work_end;
1574 uint32_t found_passing_read, found_failing_read, initial_failing_dtap;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001575
1576 debug("%s:%d %u\n", __func__, __LINE__, grp);
1577
1578 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
1579
1580 scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
1581 scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
1582
1583 /* ************************************************************** */
1584 /* * Step 0 : Determine number of delay taps for each phase tap * */
1585 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP/IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1586
1587 /* ********************************************************* */
1588 /* * Step 1 : First push vfifo until we get a failing read * */
1589 v = find_vfifo_read(grp, &bit_chk);
1590
1591 max_working_cnt = 0;
1592
1593 /* ******************************************************** */
1594 /* * step 2: find first working phase, increment in ptaps * */
1595 work_bgn = 0;
1596 if (find_working_phase(&grp, &bit_chk, dtaps_per_ptap, &work_bgn, &v, &d,
1597 &p, &i, &max_working_cnt) == 0)
1598 return 0;
1599
1600 work_end = work_bgn;
1601
1602 /*
1603 * If d is 0 then the working window covers a phase tap and
1604 * we can follow the old procedure otherwise, we've found the beginning,
1605 * and we need to increment the dtaps until we find the end.
1606 */
1607 if (d == 0) {
1608 /* ********************************************************* */
1609 /* * step 3a: if we have room, back off by one and
1610 increment in dtaps * */
1611
1612 sdr_backup_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1613 &max_working_cnt);
1614
1615 /* ********************************************************* */
1616 /* * step 4a: go forward from working phase to non working
1617 phase, increment in ptaps * */
1618 if (sdr_nonworking_phase(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1619 &i, &max_working_cnt, &work_end) == 0)
1620 return 0;
1621
1622 /* ********************************************************* */
1623 /* * step 5a: back off one from last, increment in dtaps * */
1624
1625 /* Special case code for backing up a phase */
1626 if (p == 0) {
1627 p = IO_DQS_EN_PHASE_MAX;
1628 rw_mgr_decr_vfifo(grp, &v);
1629 } else {
1630 p = p - 1;
1631 }
1632
1633 work_end -= IO_DELAY_PER_OPA_TAP;
1634 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1635
1636 /* * The actual increment of dtaps is done outside of
1637 the if/else loop to share code */
1638 d = 0;
1639
1640 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p: \
1641 vfifo=%u ptap=%u\n", __func__, __LINE__,
1642 v, p);
1643 } else {
1644 /* ******************************************************* */
1645 /* * step 3-5b: Find the right edge of the window using
1646 delay taps * */
1647 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase:vfifo=%u \
1648 ptap=%u dtap=%u bgn=%u\n", __func__, __LINE__,
1649 v, p, d, work_bgn);
1650
1651 work_end = work_bgn;
1652
1653 /* * The actual increment of dtaps is done outside of the
1654 if/else loop to share code */
1655
1656 /* Only here to counterbalance a subtract later on which is
1657 not needed if this branch of the algorithm is taken */
1658 max_working_cnt++;
1659 }
1660
1661 /* The dtap increment to find the failing edge is done here */
1662 for (; d <= IO_DQS_EN_DELAY_MAX; d++, work_end +=
1663 IO_DELAY_PER_DQS_EN_DCHAIN_TAP) {
1664 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1665 end-2: dtap=%u\n", __func__, __LINE__, d);
1666 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1667
1668 if (!rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1669 PASS_ONE_BIT,
1670 &bit_chk, 0)) {
1671 break;
1672 }
1673 }
1674
1675 /* Go back to working dtap */
1676 if (d != 0)
1677 work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
1678
1679 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: v/p/d: vfifo=%u \
1680 ptap=%u dtap=%u end=%u\n", __func__, __LINE__,
1681 v, p, d-1, work_end);
1682
1683 if (work_end < work_bgn) {
1684 /* nil range */
1685 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: end-2: \
1686 failed\n", __func__, __LINE__);
1687 return 0;
1688 }
1689
1690 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: found range [%u,%u]\n",
1691 __func__, __LINE__, work_bgn, work_end);
1692
1693 /* *************************************************************** */
1694 /*
1695 * * We need to calculate the number of dtaps that equal a ptap
1696 * * To do that we'll back up a ptap and re-find the edge of the
1697 * * window using dtaps
1698 */
1699
1700 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: calculate dtaps_per_ptap \
1701 for tracking\n", __func__, __LINE__);
1702
1703 /* Special case code for backing up a phase */
1704 if (p == 0) {
1705 p = IO_DQS_EN_PHASE_MAX;
1706 rw_mgr_decr_vfifo(grp, &v);
1707 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1708 cycle/phase: v=%u p=%u\n", __func__, __LINE__,
1709 v, p);
1710 } else {
1711 p = p - 1;
1712 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: backedup \
1713 phase only: v=%u p=%u", __func__, __LINE__,
1714 v, p);
1715 }
1716
1717 scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
1718
1719 /*
1720 * Increase dtap until we first see a passing read (in case the
1721 * window is smaller than a ptap),
1722 * and then a failing read to mark the edge of the window again
1723 */
1724
1725 /* Find a passing read */
1726 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find passing read\n",
1727 __func__, __LINE__);
1728 found_passing_read = 0;
1729 found_failing_read = 0;
1730 initial_failing_dtap = d;
1731 for (; d <= IO_DQS_EN_DELAY_MAX; d++) {
1732 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: testing \
1733 read d=%u\n", __func__, __LINE__, d);
1734 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1735
1736 if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
1737 PASS_ONE_BIT,
1738 &bit_chk, 0)) {
1739 found_passing_read = 1;
1740 break;
1741 }
1742 }
1743
1744 if (found_passing_read) {
1745 /* Find a failing read */
1746 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: find failing \
1747 read\n", __func__, __LINE__);
1748 for (d = d + 1; d <= IO_DQS_EN_DELAY_MAX; d++) {
1749 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: \
1750 testing read d=%u\n", __func__, __LINE__, d);
1751 scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
1752
1753 if (!rw_mgr_mem_calibrate_read_test_all_ranks
1754 (grp, 1, PASS_ONE_BIT, &bit_chk, 0)) {
1755 found_failing_read = 1;
1756 break;
1757 }
1758 }
1759 } else {
1760 debug_cond(DLEVEL == 1, "%s:%d find_dqs_en_phase: failed to \
1761 calculate dtaps", __func__, __LINE__);
1762 debug_cond(DLEVEL == 1, "per ptap. Fall back on static value\n");
1763 }
1764
1765 /*
1766 * The dynamically calculated dtaps_per_ptap is only valid if we
1767 * found a passing/failing read. If we didn't, it means d hit the max
1768 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
1769 * statically calculated value.
1770 */
1771 if (found_passing_read && found_failing_read)
1772 dtaps_per_ptap = d - initial_failing_dtap;
1773
Marek Vasut1273dd92015-07-12 21:05:08 +02001774 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001775 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: dtaps_per_ptap=%u \
1776 - %u = %u", __func__, __LINE__, d,
1777 initial_failing_dtap, dtaps_per_ptap);
1778
1779 /* ******************************************** */
1780 /* * step 6: Find the centre of the window * */
1781 if (sdr_find_window_centre(&grp, &bit_chk, &work_bgn, &v, &d, &p,
1782 &work_mid, &work_end) == 0)
1783 return 0;
1784
1785 debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: center found: \
1786 vfifo=%u ptap=%u dtap=%u\n", __func__, __LINE__,
1787 v, p-1, d);
1788 return 1;
1789}
1790
1791/*
1792 * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
1793 * dq_in_delay values
1794 */
1795static uint32_t
1796rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
1797(uint32_t write_group, uint32_t read_group, uint32_t test_bgn)
1798{
1799 uint32_t found;
1800 uint32_t i;
1801 uint32_t p;
1802 uint32_t d;
1803 uint32_t r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05001804
1805 const uint32_t delay_step = IO_IO_IN_DELAY_MAX /
1806 (RW_MGR_MEM_DQ_PER_READ_DQS-1);
1807 /* we start at zero, so have one less dq to devide among */
1808
1809 debug("%s:%d (%u,%u,%u)", __func__, __LINE__, write_group, read_group,
1810 test_bgn);
1811
1812 /* try different dq_in_delays since the dq path is shorter than dqs */
1813
1814 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1815 r += NUM_RANKS_PER_SHADOW_REG) {
1816 for (i = 0, p = test_bgn, d = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1817 i++, p++, d += delay_step) {
1818 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_\
1819 vfifo_find_dqs_", __func__, __LINE__);
1820 debug_cond(DLEVEL == 1, "en_phase_sweep_dq_in_delay: g=%u/%u ",
1821 write_group, read_group);
1822 debug_cond(DLEVEL == 1, "r=%u, i=%u p=%u d=%u\n", r, i , p, d);
Marek Vasut07aee5b2015-07-12 22:07:33 +02001823 scc_mgr_set_dq_in_delay(p, d);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001824 scc_mgr_load_dq(p);
1825 }
Marek Vasut1273dd92015-07-12 21:05:08 +02001826 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001827 }
1828
1829 found = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(read_group);
1830
1831 debug_cond(DLEVEL == 1, "%s:%d rw_mgr_mem_calibrate_vfifo_find_dqs_\
1832 en_phase_sweep_dq", __func__, __LINE__);
1833 debug_cond(DLEVEL == 1, "_in_delay: g=%u/%u found=%u; Reseting delay \
1834 chain to zero\n", write_group, read_group, found);
1835
1836 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
1837 r += NUM_RANKS_PER_SHADOW_REG) {
1838 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS;
1839 i++, p++) {
Marek Vasut07aee5b2015-07-12 22:07:33 +02001840 scc_mgr_set_dq_in_delay(p, 0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001841 scc_mgr_load_dq(p);
1842 }
Marek Vasut1273dd92015-07-12 21:05:08 +02001843 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001844 }
1845
1846 return found;
1847}
1848
1849/* per-bit deskew DQ and center */
1850static uint32_t rw_mgr_mem_calibrate_vfifo_center(uint32_t rank_bgn,
1851 uint32_t write_group, uint32_t read_group, uint32_t test_bgn,
1852 uint32_t use_read_test, uint32_t update_fom)
1853{
1854 uint32_t i, p, d, min_index;
1855 /*
1856 * Store these as signed since there are comparisons with
1857 * signed numbers.
1858 */
1859 uint32_t bit_chk;
1860 uint32_t sticky_bit_chk;
1861 int32_t left_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1862 int32_t right_edge[RW_MGR_MEM_DQ_PER_READ_DQS];
1863 int32_t final_dq[RW_MGR_MEM_DQ_PER_READ_DQS];
1864 int32_t mid;
1865 int32_t orig_mid_min, mid_min;
1866 int32_t new_dqs, start_dqs, start_dqs_en, shift_dq, final_dqs,
1867 final_dqs_en;
1868 int32_t dq_margin, dqs_margin;
1869 uint32_t stop;
1870 uint32_t temp_dq_in_delay1, temp_dq_in_delay2;
1871 uint32_t addr;
1872
1873 debug("%s:%d: %u %u", __func__, __LINE__, read_group, test_bgn);
1874
Marek Vasutc4815f72015-07-12 19:03:33 +02001875 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_DQS_IN_DELAY_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02001876 start_dqs = readl(addr + (read_group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05001877 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
Marek Vasut17fdc912015-07-12 20:05:54 +02001878 start_dqs_en = readl(addr + ((read_group << 2)
Dinh Nguyen3da42852015-06-02 22:52:49 -05001879 - IO_DQS_EN_DELAY_OFFSET));
1880
1881 /* set the left and right edge of each bit to an illegal value */
1882 /* use (IO_IO_IN_DELAY_MAX + 1) as an illegal value */
1883 sticky_bit_chk = 0;
1884 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1885 left_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1886 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1887 }
1888
Dinh Nguyen3da42852015-06-02 22:52:49 -05001889 /* Search for the left edge of the window for each bit */
1890 for (d = 0; d <= IO_IO_IN_DELAY_MAX; d++) {
1891 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, d);
1892
Marek Vasut1273dd92015-07-12 21:05:08 +02001893 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001894
1895 /*
1896 * Stop searching when the read test doesn't pass AND when
1897 * we've seen a passing read on every bit.
1898 */
1899 if (use_read_test) {
1900 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1901 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1902 &bit_chk, 0, 0);
1903 } else {
1904 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
1905 0, PASS_ONE_BIT,
1906 &bit_chk, 0);
1907 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
1908 (read_group - (write_group *
1909 RW_MGR_MEM_IF_READ_DQS_WIDTH /
1910 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
1911 stop = (bit_chk == 0);
1912 }
1913 sticky_bit_chk = sticky_bit_chk | bit_chk;
1914 stop = stop && (sticky_bit_chk == param->read_correct_mask);
1915 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(left): dtap=%u => %u == %u \
1916 && %u", __func__, __LINE__, d,
1917 sticky_bit_chk,
1918 param->read_correct_mask, stop);
1919
1920 if (stop == 1) {
1921 break;
1922 } else {
1923 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
1924 if (bit_chk & 1) {
1925 /* Remember a passing test as the
1926 left_edge */
1927 left_edge[i] = d;
1928 } else {
1929 /* If a left edge has not been seen yet,
1930 then a future passing test will mark
1931 this edge as the right edge */
1932 if (left_edge[i] ==
1933 IO_IO_IN_DELAY_MAX + 1) {
1934 right_edge[i] = -(d + 1);
1935 }
1936 }
1937 bit_chk = bit_chk >> 1;
1938 }
1939 }
1940 }
1941
1942 /* Reset DQ delay chains to 0 */
1943 scc_mgr_apply_group_dq_in_delay(write_group, test_bgn, 0);
1944 sticky_bit_chk = 0;
1945 for (i = RW_MGR_MEM_DQ_PER_READ_DQS - 1;; i--) {
1946 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
1947 %d right_edge[%u]: %d\n", __func__, __LINE__,
1948 i, left_edge[i], i, right_edge[i]);
1949
1950 /*
1951 * Check for cases where we haven't found the left edge,
1952 * which makes our assignment of the the right edge invalid.
1953 * Reset it to the illegal value.
1954 */
1955 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) && (
1956 right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1957 right_edge[i] = IO_IO_IN_DELAY_MAX + 1;
1958 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: reset \
1959 right_edge[%u]: %d\n", __func__, __LINE__,
1960 i, right_edge[i]);
1961 }
1962
1963 /*
1964 * Reset sticky bit (except for bits where we have seen
1965 * both the left and right edge).
1966 */
1967 sticky_bit_chk = sticky_bit_chk << 1;
1968 if ((left_edge[i] != IO_IO_IN_DELAY_MAX + 1) &&
1969 (right_edge[i] != IO_IO_IN_DELAY_MAX + 1)) {
1970 sticky_bit_chk = sticky_bit_chk | 1;
1971 }
1972
1973 if (i == 0)
1974 break;
1975 }
1976
Dinh Nguyen3da42852015-06-02 22:52:49 -05001977 /* Search for the right edge of the window for each bit */
1978 for (d = 0; d <= IO_DQS_IN_DELAY_MAX - start_dqs; d++) {
1979 scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
1980 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
1981 uint32_t delay = d + start_dqs_en;
1982 if (delay > IO_DQS_EN_DELAY_MAX)
1983 delay = IO_DQS_EN_DELAY_MAX;
1984 scc_mgr_set_dqs_en_delay(read_group, delay);
1985 }
1986 scc_mgr_load_dqs(read_group);
1987
Marek Vasut1273dd92015-07-12 21:05:08 +02001988 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05001989
1990 /*
1991 * Stop searching when the read test doesn't pass AND when
1992 * we've seen a passing read on every bit.
1993 */
1994 if (use_read_test) {
1995 stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
1996 read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
1997 &bit_chk, 0, 0);
1998 } else {
1999 rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2000 0, PASS_ONE_BIT,
2001 &bit_chk, 0);
2002 bit_chk = bit_chk >> (RW_MGR_MEM_DQ_PER_READ_DQS *
2003 (read_group - (write_group *
2004 RW_MGR_MEM_IF_READ_DQS_WIDTH /
2005 RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
2006 stop = (bit_chk == 0);
2007 }
2008 sticky_bit_chk = sticky_bit_chk | bit_chk;
2009 stop = stop && (sticky_bit_chk == param->read_correct_mask);
2010
2011 debug_cond(DLEVEL == 2, "%s:%d vfifo_center(right): dtap=%u => %u == \
2012 %u && %u", __func__, __LINE__, d,
2013 sticky_bit_chk, param->read_correct_mask, stop);
2014
2015 if (stop == 1) {
2016 break;
2017 } else {
2018 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2019 if (bit_chk & 1) {
2020 /* Remember a passing test as
2021 the right_edge */
2022 right_edge[i] = d;
2023 } else {
2024 if (d != 0) {
2025 /* If a right edge has not been
2026 seen yet, then a future passing
2027 test will mark this edge as the
2028 left edge */
2029 if (right_edge[i] ==
2030 IO_IO_IN_DELAY_MAX + 1) {
2031 left_edge[i] = -(d + 1);
2032 }
2033 } else {
2034 /* d = 0 failed, but it passed
2035 when testing the left edge,
2036 so it must be marginal,
2037 set it to -1 */
2038 if (right_edge[i] ==
2039 IO_IO_IN_DELAY_MAX + 1 &&
2040 left_edge[i] !=
2041 IO_IO_IN_DELAY_MAX
2042 + 1) {
2043 right_edge[i] = -1;
2044 }
2045 /* If a right edge has not been
2046 seen yet, then a future passing
2047 test will mark this edge as the
2048 left edge */
2049 else if (right_edge[i] ==
2050 IO_IO_IN_DELAY_MAX +
2051 1) {
2052 left_edge[i] = -(d + 1);
2053 }
2054 }
2055 }
2056
2057 debug_cond(DLEVEL == 2, "%s:%d vfifo_center[r,\
2058 d=%u]: ", __func__, __LINE__, d);
2059 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d ",
2060 (int)(bit_chk & 1), i, left_edge[i]);
2061 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2062 right_edge[i]);
2063 bit_chk = bit_chk >> 1;
2064 }
2065 }
2066 }
2067
2068 /* Check that all bits have a window */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002069 for (i = 0; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2070 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: left_edge[%u]: \
2071 %d right_edge[%u]: %d", __func__, __LINE__,
2072 i, left_edge[i], i, right_edge[i]);
2073 if ((left_edge[i] == IO_IO_IN_DELAY_MAX + 1) || (right_edge[i]
2074 == IO_IO_IN_DELAY_MAX + 1)) {
2075 /*
2076 * Restore delay chain settings before letting the loop
2077 * in rw_mgr_mem_calibrate_vfifo to retry different
2078 * dqs/ck relationships.
2079 */
2080 scc_mgr_set_dqs_bus_in_delay(read_group, start_dqs);
2081 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2082 scc_mgr_set_dqs_en_delay(read_group,
2083 start_dqs_en);
2084 }
2085 scc_mgr_load_dqs(read_group);
Marek Vasut1273dd92015-07-12 21:05:08 +02002086 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002087
2088 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: failed to \
2089 find edge [%u]: %d %d", __func__, __LINE__,
2090 i, left_edge[i], right_edge[i]);
2091 if (use_read_test) {
2092 set_failing_group_stage(read_group *
2093 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2094 CAL_STAGE_VFIFO,
2095 CAL_SUBSTAGE_VFIFO_CENTER);
2096 } else {
2097 set_failing_group_stage(read_group *
2098 RW_MGR_MEM_DQ_PER_READ_DQS + i,
2099 CAL_STAGE_VFIFO_AFTER_WRITES,
2100 CAL_SUBSTAGE_VFIFO_CENTER);
2101 }
2102 return 0;
2103 }
2104 }
2105
2106 /* Find middle of window for each DQ bit */
2107 mid_min = left_edge[0] - right_edge[0];
2108 min_index = 0;
2109 for (i = 1; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++) {
2110 mid = left_edge[i] - right_edge[i];
2111 if (mid < mid_min) {
2112 mid_min = mid;
2113 min_index = i;
2114 }
2115 }
2116
2117 /*
2118 * -mid_min/2 represents the amount that we need to move DQS.
2119 * If mid_min is odd and positive we'll need to add one to
2120 * make sure the rounding in further calculations is correct
2121 * (always bias to the right), so just add 1 for all positive values.
2122 */
2123 if (mid_min > 0)
2124 mid_min++;
2125
2126 mid_min = mid_min / 2;
2127
2128 debug_cond(DLEVEL == 1, "%s:%d vfifo_center: mid_min=%d (index=%u)\n",
2129 __func__, __LINE__, mid_min, min_index);
2130
2131 /* Determine the amount we can change DQS (which is -mid_min) */
2132 orig_mid_min = mid_min;
2133 new_dqs = start_dqs - mid_min;
2134 if (new_dqs > IO_DQS_IN_DELAY_MAX)
2135 new_dqs = IO_DQS_IN_DELAY_MAX;
2136 else if (new_dqs < 0)
2137 new_dqs = 0;
2138
2139 mid_min = start_dqs - new_dqs;
2140 debug_cond(DLEVEL == 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
2141 mid_min, new_dqs);
2142
2143 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2144 if (start_dqs_en - mid_min > IO_DQS_EN_DELAY_MAX)
2145 mid_min += start_dqs_en - mid_min - IO_DQS_EN_DELAY_MAX;
2146 else if (start_dqs_en - mid_min < 0)
2147 mid_min += start_dqs_en - mid_min;
2148 }
2149 new_dqs = start_dqs - mid_min;
2150
2151 debug_cond(DLEVEL == 1, "vfifo_center: start_dqs=%d start_dqs_en=%d \
2152 new_dqs=%d mid_min=%d\n", start_dqs,
2153 IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS ? start_dqs_en : -1,
2154 new_dqs, mid_min);
2155
2156 /* Initialize data for export structures */
2157 dqs_margin = IO_IO_IN_DELAY_MAX + 1;
2158 dq_margin = IO_IO_IN_DELAY_MAX + 1;
2159
Dinh Nguyen3da42852015-06-02 22:52:49 -05002160 /* add delay to bring centre of all DQ windows to the same "level" */
2161 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
2162 /* Use values before divide by 2 to reduce round off error */
2163 shift_dq = (left_edge[i] - right_edge[i] -
2164 (left_edge[min_index] - right_edge[min_index]))/2 +
2165 (orig_mid_min - mid_min);
2166
2167 debug_cond(DLEVEL == 2, "vfifo_center: before: \
2168 shift_dq[%u]=%d\n", i, shift_dq);
2169
Marek Vasut1273dd92015-07-12 21:05:08 +02002170 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_IN_DELAY_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02002171 temp_dq_in_delay1 = readl(addr + (p << 2));
2172 temp_dq_in_delay2 = readl(addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05002173
2174 if (shift_dq + (int32_t)temp_dq_in_delay1 >
2175 (int32_t)IO_IO_IN_DELAY_MAX) {
2176 shift_dq = (int32_t)IO_IO_IN_DELAY_MAX - temp_dq_in_delay2;
2177 } else if (shift_dq + (int32_t)temp_dq_in_delay1 < 0) {
2178 shift_dq = -(int32_t)temp_dq_in_delay1;
2179 }
2180 debug_cond(DLEVEL == 2, "vfifo_center: after: \
2181 shift_dq[%u]=%d\n", i, shift_dq);
2182 final_dq[i] = temp_dq_in_delay1 + shift_dq;
Marek Vasut07aee5b2015-07-12 22:07:33 +02002183 scc_mgr_set_dq_in_delay(p, final_dq[i]);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002184 scc_mgr_load_dq(p);
2185
2186 debug_cond(DLEVEL == 2, "vfifo_center: margin[%u]=[%d,%d]\n", i,
2187 left_edge[i] - shift_dq + (-mid_min),
2188 right_edge[i] + shift_dq - (-mid_min));
2189 /* To determine values for export structures */
2190 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2191 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2192
2193 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2194 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2195 }
2196
2197 final_dqs = new_dqs;
2198 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS)
2199 final_dqs_en = start_dqs_en - mid_min;
2200
2201 /* Move DQS-en */
2202 if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
2203 scc_mgr_set_dqs_en_delay(read_group, final_dqs_en);
2204 scc_mgr_load_dqs(read_group);
2205 }
2206
2207 /* Move DQS */
2208 scc_mgr_set_dqs_bus_in_delay(read_group, final_dqs);
2209 scc_mgr_load_dqs(read_group);
2210 debug_cond(DLEVEL == 2, "%s:%d vfifo_center: dq_margin=%d \
2211 dqs_margin=%d", __func__, __LINE__,
2212 dq_margin, dqs_margin);
2213
2214 /*
2215 * Do not remove this line as it makes sure all of our decisions
2216 * have been applied. Apply the update bit.
2217 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002218 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002219
2220 return (dq_margin >= 0) && (dqs_margin >= 0);
2221}
2222
2223/*
2224 * calibrate the read valid prediction FIFO.
2225 *
2226 * - read valid prediction will consist of finding a good DQS enable phase,
2227 * DQS enable delay, DQS input phase, and DQS input delay.
2228 * - we also do a per-bit deskew on the DQ lines.
2229 */
2230static uint32_t rw_mgr_mem_calibrate_vfifo(uint32_t read_group,
2231 uint32_t test_bgn)
2232{
2233 uint32_t p, d, rank_bgn, sr;
2234 uint32_t dtaps_per_ptap;
2235 uint32_t tmp_delay;
2236 uint32_t bit_chk;
2237 uint32_t grp_calibrated;
2238 uint32_t write_group, write_test_bgn;
2239 uint32_t failed_substage;
2240
Marek Vasut7ac40d22015-06-26 18:56:54 +02002241 debug("%s:%d: %u %u\n", __func__, __LINE__, read_group, test_bgn);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002242
2243 /* update info for sims */
2244 reg_file_set_stage(CAL_STAGE_VFIFO);
2245
2246 write_group = read_group;
2247 write_test_bgn = test_bgn;
2248
2249 /* USER Determine number of delay taps for each phase tap */
2250 dtaps_per_ptap = 0;
2251 tmp_delay = 0;
2252 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
2253 dtaps_per_ptap++;
2254 tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
2255 }
2256 dtaps_per_ptap--;
2257 tmp_delay = 0;
2258
2259 /* update info for sims */
2260 reg_file_set_group(read_group);
2261
2262 grp_calibrated = 0;
2263
2264 reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
2265 failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
2266
2267 for (d = 0; d <= dtaps_per_ptap && grp_calibrated == 0; d += 2) {
2268 /*
2269 * In RLDRAMX we may be messing the delay of pins in
2270 * the same write group but outside of the current read
2271 * the group, but that's ok because we haven't
2272 * calibrated output side yet.
2273 */
2274 if (d > 0) {
2275 scc_mgr_apply_group_all_out_delay_add_all_ranks
2276 (write_group, write_test_bgn, d);
2277 }
2278
2279 for (p = 0; p <= IO_DQDQS_OUT_PHASE_MAX && grp_calibrated == 0;
2280 p++) {
2281 /* set a particular dqdqs phase */
2282 scc_mgr_set_dqdqs_output_phase_all_ranks(read_group, p);
2283
2284 debug_cond(DLEVEL == 1, "%s:%d calibrate_vfifo: g=%u \
2285 p=%u d=%u\n", __func__, __LINE__,
2286 read_group, p, d);
2287
2288 /*
2289 * Load up the patterns used by read calibration
2290 * using current DQDQS phase.
2291 */
2292 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2293 if (!(gbl->phy_debug_mode_flags &
2294 PHY_DEBUG_DISABLE_GUARANTEED_READ)) {
2295 if (!rw_mgr_mem_calibrate_read_test_patterns_all_ranks
2296 (read_group, 1, &bit_chk)) {
2297 debug_cond(DLEVEL == 1, "%s:%d Guaranteed read test failed:",
2298 __func__, __LINE__);
2299 debug_cond(DLEVEL == 1, " g=%u p=%u d=%u\n",
2300 read_group, p, d);
2301 break;
2302 }
2303 }
2304
2305/* case:56390 */
2306 grp_calibrated = 1;
2307 if (rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase_sweep_dq_in_delay
2308 (write_group, read_group, test_bgn)) {
2309 /*
2310 * USER Read per-bit deskew can be done on a
2311 * per shadow register basis.
2312 */
2313 for (rank_bgn = 0, sr = 0;
2314 rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2315 rank_bgn += NUM_RANKS_PER_SHADOW_REG,
2316 ++sr) {
2317 /*
2318 * Determine if this set of ranks
2319 * should be skipped entirely.
2320 */
2321 if (!param->skip_shadow_regs[sr]) {
2322 /*
2323 * If doing read after write
2324 * calibration, do not update
2325 * FOM, now - do it then.
2326 */
2327 if (!rw_mgr_mem_calibrate_vfifo_center
2328 (rank_bgn, write_group,
2329 read_group, test_bgn, 1, 0)) {
2330 grp_calibrated = 0;
2331 failed_substage =
2332 CAL_SUBSTAGE_VFIFO_CENTER;
2333 }
2334 }
2335 }
2336 } else {
2337 grp_calibrated = 0;
2338 failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
2339 }
2340 }
2341 }
2342
2343 if (grp_calibrated == 0) {
2344 set_failing_group_stage(write_group, CAL_STAGE_VFIFO,
2345 failed_substage);
2346 return 0;
2347 }
2348
2349 /*
2350 * Reset the delay chains back to zero if they have moved > 1
2351 * (check for > 1 because loop will increase d even when pass in
2352 * first case).
2353 */
2354 if (d > 2)
2355 scc_mgr_zero_group(write_group, write_test_bgn, 1);
2356
2357 return 1;
2358}
2359
2360/* VFIFO Calibration -- Read Deskew Calibration after write deskew */
2361static uint32_t rw_mgr_mem_calibrate_vfifo_end(uint32_t read_group,
2362 uint32_t test_bgn)
2363{
2364 uint32_t rank_bgn, sr;
2365 uint32_t grp_calibrated;
2366 uint32_t write_group;
2367
2368 debug("%s:%d %u %u", __func__, __LINE__, read_group, test_bgn);
2369
2370 /* update info for sims */
2371
2372 reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
2373 reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
2374
2375 write_group = read_group;
2376
2377 /* update info for sims */
2378 reg_file_set_group(read_group);
2379
2380 grp_calibrated = 1;
2381 /* Read per-bit deskew can be done on a per shadow register basis */
2382 for (rank_bgn = 0, sr = 0; rank_bgn < RW_MGR_MEM_NUMBER_OF_RANKS;
2383 rank_bgn += NUM_RANKS_PER_SHADOW_REG, ++sr) {
2384 /* Determine if this set of ranks should be skipped entirely */
2385 if (!param->skip_shadow_regs[sr]) {
2386 /* This is the last calibration round, update FOM here */
2387 if (!rw_mgr_mem_calibrate_vfifo_center(rank_bgn,
2388 write_group,
2389 read_group,
2390 test_bgn, 0,
2391 1)) {
2392 grp_calibrated = 0;
2393 }
2394 }
2395 }
2396
2397
2398 if (grp_calibrated == 0) {
2399 set_failing_group_stage(write_group,
2400 CAL_STAGE_VFIFO_AFTER_WRITES,
2401 CAL_SUBSTAGE_VFIFO_CENTER);
2402 return 0;
2403 }
2404
2405 return 1;
2406}
2407
2408/* Calibrate LFIFO to find smallest read latency */
2409static uint32_t rw_mgr_mem_calibrate_lfifo(void)
2410{
2411 uint32_t found_one;
2412 uint32_t bit_chk;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002413
2414 debug("%s:%d\n", __func__, __LINE__);
2415
2416 /* update info for sims */
2417 reg_file_set_stage(CAL_STAGE_LFIFO);
2418 reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
2419
2420 /* Load up the patterns used by read calibration for all ranks */
2421 rw_mgr_mem_calibrate_read_load_patterns(0, 1);
2422 found_one = 0;
2423
Dinh Nguyen3da42852015-06-02 22:52:49 -05002424 do {
Marek Vasut1273dd92015-07-12 21:05:08 +02002425 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002426 debug_cond(DLEVEL == 2, "%s:%d lfifo: read_lat=%u",
2427 __func__, __LINE__, gbl->curr_read_lat);
2428
2429 if (!rw_mgr_mem_calibrate_read_test_all_ranks(0,
2430 NUM_READ_TESTS,
2431 PASS_ALL_BITS,
2432 &bit_chk, 1)) {
2433 break;
2434 }
2435
2436 found_one = 1;
2437 /* reduce read latency and see if things are working */
2438 /* correctly */
2439 gbl->curr_read_lat--;
2440 } while (gbl->curr_read_lat > 0);
2441
2442 /* reset the fifos to get pointers to known state */
2443
Marek Vasut1273dd92015-07-12 21:05:08 +02002444 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002445
2446 if (found_one) {
2447 /* add a fudge factor to the read latency that was determined */
2448 gbl->curr_read_lat += 2;
Marek Vasut1273dd92015-07-12 21:05:08 +02002449 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002450 debug_cond(DLEVEL == 2, "%s:%d lfifo: success: using \
2451 read_lat=%u\n", __func__, __LINE__,
2452 gbl->curr_read_lat);
2453 return 1;
2454 } else {
2455 set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
2456 CAL_SUBSTAGE_READ_LATENCY);
2457
2458 debug_cond(DLEVEL == 2, "%s:%d lfifo: failed at initial \
2459 read_lat=%u\n", __func__, __LINE__,
2460 gbl->curr_read_lat);
2461 return 0;
2462 }
2463}
2464
2465/*
2466 * issue write test command.
2467 * two variants are provided. one that just tests a write pattern and
2468 * another that tests datamask functionality.
2469 */
2470static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
2471 uint32_t test_dm)
2472{
2473 uint32_t mcc_instruction;
2474 uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
2475 ENABLE_SUPER_QUICK_CALIBRATION);
2476 uint32_t rw_wl_nop_cycles;
2477 uint32_t addr;
2478
2479 /*
2480 * Set counter and jump addresses for the right
2481 * number of NOP cycles.
2482 * The number of supported NOP cycles can range from -1 to infinity
2483 * Three different cases are handled:
2484 *
2485 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
2486 * mechanism will be used to insert the right number of NOPs
2487 *
2488 * 2. For a number of NOP cycles equals to 0, the micro-instruction
2489 * issuing the write command will jump straight to the
2490 * micro-instruction that turns on DQS (for DDRx), or outputs write
2491 * data (for RLD), skipping
2492 * the NOP micro-instruction all together
2493 *
2494 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
2495 * turned on in the same micro-instruction that issues the write
2496 * command. Then we need
2497 * to directly jump to the micro-instruction that sends out the data
2498 *
2499 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
2500 * (2 and 3). One jump-counter (0) is used to perform multiple
2501 * write-read operations.
2502 * one counter left to issue this command in "multiple-group" mode
2503 */
2504
2505 rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
2506
2507 if (rw_wl_nop_cycles == -1) {
2508 /*
2509 * CNTR 2 - We want to execute the special write operation that
2510 * turns on DQS right away and then skip directly to the
2511 * instruction that sends out the data. We set the counter to a
2512 * large number so that the jump is always taken.
2513 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002514 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002515
2516 /* CNTR 3 - Not used */
2517 if (test_dm) {
2518 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002519 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
Marek Vasut1273dd92015-07-12 21:05:08 +02002520 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002521 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
Marek Vasut1273dd92015-07-12 21:05:08 +02002522 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002523 } else {
2524 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
Marek Vasut1273dd92015-07-12 21:05:08 +02002525 writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
2526 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
2527 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2528 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002529 }
2530 } else if (rw_wl_nop_cycles == 0) {
2531 /*
2532 * CNTR 2 - We want to skip the NOP operation and go straight
2533 * to the DQS enable instruction. We set the counter to a large
2534 * number so that the jump is always taken.
2535 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002536 writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002537
2538 /* CNTR 3 - Not used */
2539 if (test_dm) {
2540 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002541 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
Marek Vasut1273dd92015-07-12 21:05:08 +02002542 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002543 } else {
2544 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
Marek Vasut1273dd92015-07-12 21:05:08 +02002545 writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
2546 &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002547 }
2548 } else {
2549 /*
2550 * CNTR 2 - In this case we want to execute the next instruction
2551 * and NOT take the jump. So we set the counter to 0. The jump
2552 * address doesn't count.
2553 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002554 writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
2555 writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002556
2557 /*
2558 * CNTR 3 - Set the nop counter to the number of cycles we
2559 * need to loop for, minus 1.
2560 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002561 writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002562 if (test_dm) {
2563 mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
Marek Vasut1273dd92015-07-12 21:05:08 +02002564 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
2565 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002566 } else {
2567 mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
Marek Vasut1273dd92015-07-12 21:05:08 +02002568 writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
2569 &sdr_rw_load_jump_mgr_regs->load_jump_add3);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002570 }
2571 }
2572
Marek Vasut1273dd92015-07-12 21:05:08 +02002573 writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
2574 RW_MGR_RESET_READ_DATAPATH_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002575
Dinh Nguyen3da42852015-06-02 22:52:49 -05002576 if (quick_write_mode)
Marek Vasut1273dd92015-07-12 21:05:08 +02002577 writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002578 else
Marek Vasut1273dd92015-07-12 21:05:08 +02002579 writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002580
Marek Vasut1273dd92015-07-12 21:05:08 +02002581 writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002582
2583 /*
2584 * CNTR 1 - This is used to ensure enough time elapses
2585 * for read data to come back.
2586 */
Marek Vasut1273dd92015-07-12 21:05:08 +02002587 writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002588
Dinh Nguyen3da42852015-06-02 22:52:49 -05002589 if (test_dm) {
Marek Vasut1273dd92015-07-12 21:05:08 +02002590 writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
2591 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002592 } else {
Marek Vasut1273dd92015-07-12 21:05:08 +02002593 writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
2594 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002595 }
2596
Marek Vasutc4815f72015-07-12 19:03:33 +02002597 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02002598 writel(mcc_instruction, addr + (group << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05002599}
2600
2601/* Test writes, can check for a single bit pass or multiple bit pass */
2602static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
2603 uint32_t write_group, uint32_t use_dm, uint32_t all_correct,
2604 uint32_t *bit_chk, uint32_t all_ranks)
2605{
Dinh Nguyen3da42852015-06-02 22:52:49 -05002606 uint32_t r;
2607 uint32_t correct_mask_vg;
2608 uint32_t tmp_bit_chk;
2609 uint32_t vg;
2610 uint32_t rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
2611 (rank_bgn + NUM_RANKS_PER_SHADOW_REG);
2612 uint32_t addr_rw_mgr;
2613 uint32_t base_rw_mgr;
2614
2615 *bit_chk = param->write_correct_mask;
2616 correct_mask_vg = param->write_correct_mask_vg;
2617
2618 for (r = rank_bgn; r < rank_end; r++) {
2619 if (param->skip_ranks[r]) {
2620 /* request to skip the rank */
2621 continue;
2622 }
2623
2624 /* set rank */
2625 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
2626
2627 tmp_bit_chk = 0;
Marek Vasuta4bfa462015-07-12 17:52:36 +02002628 addr_rw_mgr = SDR_PHYGRP_RWMGRGRP_ADDRESS;
Dinh Nguyen3da42852015-06-02 22:52:49 -05002629 for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS-1; ; vg--) {
2630 /* reset the fifos to get pointers to known state */
Marek Vasut1273dd92015-07-12 21:05:08 +02002631 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002632
2633 tmp_bit_chk = tmp_bit_chk <<
2634 (RW_MGR_MEM_DQ_PER_WRITE_DQS /
2635 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
2636 rw_mgr_mem_calibrate_write_test_issue(write_group *
2637 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS+vg,
2638 use_dm);
2639
Marek Vasut17fdc912015-07-12 20:05:54 +02002640 base_rw_mgr = readl(addr_rw_mgr);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002641 tmp_bit_chk = tmp_bit_chk | (correct_mask_vg & ~(base_rw_mgr));
2642 if (vg == 0)
2643 break;
2644 }
2645 *bit_chk &= tmp_bit_chk;
2646 }
2647
2648 if (all_correct) {
2649 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2650 debug_cond(DLEVEL == 2, "write_test(%u,%u,ALL) : %u == \
2651 %u => %lu", write_group, use_dm,
2652 *bit_chk, param->write_correct_mask,
2653 (long unsigned int)(*bit_chk ==
2654 param->write_correct_mask));
2655 return *bit_chk == param->write_correct_mask;
2656 } else {
2657 set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
2658 debug_cond(DLEVEL == 2, "write_test(%u,%u,ONE) : %u != ",
2659 write_group, use_dm, *bit_chk);
2660 debug_cond(DLEVEL == 2, "%lu" " => %lu", (long unsigned int)0,
2661 (long unsigned int)(*bit_chk != 0));
2662 return *bit_chk != 0x00;
2663 }
2664}
2665
2666/*
2667 * center all windows. do per-bit-deskew to possibly increase size of
2668 * certain windows.
2669 */
2670static uint32_t rw_mgr_mem_calibrate_writes_center(uint32_t rank_bgn,
2671 uint32_t write_group, uint32_t test_bgn)
2672{
2673 uint32_t i, p, min_index;
2674 int32_t d;
2675 /*
2676 * Store these as signed since there are comparisons with
2677 * signed numbers.
2678 */
2679 uint32_t bit_chk;
2680 uint32_t sticky_bit_chk;
2681 int32_t left_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2682 int32_t right_edge[RW_MGR_MEM_DQ_PER_WRITE_DQS];
2683 int32_t mid;
2684 int32_t mid_min, orig_mid_min;
2685 int32_t new_dqs, start_dqs, shift_dq;
2686 int32_t dq_margin, dqs_margin, dm_margin;
2687 uint32_t stop;
2688 uint32_t temp_dq_out1_delay;
2689 uint32_t addr;
2690
2691 debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
2692
2693 dm_margin = 0;
2694
Marek Vasutc4815f72015-07-12 19:03:33 +02002695 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02002696 start_dqs = readl(addr +
Dinh Nguyen3da42852015-06-02 22:52:49 -05002697 (RW_MGR_MEM_DQ_PER_WRITE_DQS << 2));
2698
2699 /* per-bit deskew */
2700
2701 /*
2702 * set the left and right edge of each bit to an illegal value
2703 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value.
2704 */
2705 sticky_bit_chk = 0;
2706 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2707 left_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2708 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2709 }
2710
2711 /* Search for the left edge of the window for each bit */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002712 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX; d++) {
2713 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, d);
2714
Marek Vasut1273dd92015-07-12 21:05:08 +02002715 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002716
2717 /*
2718 * Stop searching when the read test doesn't pass AND when
2719 * we've seen a passing read on every bit.
2720 */
2721 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2722 0, PASS_ONE_BIT, &bit_chk, 0);
2723 sticky_bit_chk = sticky_bit_chk | bit_chk;
2724 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2725 debug_cond(DLEVEL == 2, "write_center(left): dtap=%d => %u \
2726 == %u && %u [bit_chk= %u ]\n",
2727 d, sticky_bit_chk, param->write_correct_mask,
2728 stop, bit_chk);
2729
2730 if (stop == 1) {
2731 break;
2732 } else {
2733 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2734 if (bit_chk & 1) {
2735 /*
2736 * Remember a passing test as the
2737 * left_edge.
2738 */
2739 left_edge[i] = d;
2740 } else {
2741 /*
2742 * If a left edge has not been seen
2743 * yet, then a future passing test will
2744 * mark this edge as the right edge.
2745 */
2746 if (left_edge[i] ==
2747 IO_IO_OUT1_DELAY_MAX + 1) {
2748 right_edge[i] = -(d + 1);
2749 }
2750 }
2751 debug_cond(DLEVEL == 2, "write_center[l,d=%d):", d);
2752 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2753 (int)(bit_chk & 1), i, left_edge[i]);
2754 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2755 right_edge[i]);
2756 bit_chk = bit_chk >> 1;
2757 }
2758 }
2759 }
2760
2761 /* Reset DQ delay chains to 0 */
2762 scc_mgr_apply_group_dq_out1_delay(write_group, test_bgn, 0);
2763 sticky_bit_chk = 0;
2764 for (i = RW_MGR_MEM_DQ_PER_WRITE_DQS - 1;; i--) {
2765 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2766 %d right_edge[%u]: %d\n", __func__, __LINE__,
2767 i, left_edge[i], i, right_edge[i]);
2768
2769 /*
2770 * Check for cases where we haven't found the left edge,
2771 * which makes our assignment of the the right edge invalid.
2772 * Reset it to the illegal value.
2773 */
2774 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) &&
2775 (right_edge[i] != IO_IO_OUT1_DELAY_MAX + 1)) {
2776 right_edge[i] = IO_IO_OUT1_DELAY_MAX + 1;
2777 debug_cond(DLEVEL == 2, "%s:%d write_center: reset \
2778 right_edge[%u]: %d\n", __func__, __LINE__,
2779 i, right_edge[i]);
2780 }
2781
2782 /*
2783 * Reset sticky bit (except for bits where we have
2784 * seen the left edge).
2785 */
2786 sticky_bit_chk = sticky_bit_chk << 1;
2787 if ((left_edge[i] != IO_IO_OUT1_DELAY_MAX + 1))
2788 sticky_bit_chk = sticky_bit_chk | 1;
2789
2790 if (i == 0)
2791 break;
2792 }
2793
2794 /* Search for the right edge of the window for each bit */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002795 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - start_dqs; d++) {
2796 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
2797 d + start_dqs);
2798
Marek Vasut1273dd92015-07-12 21:05:08 +02002799 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002800
2801 /*
2802 * Stop searching when the read test doesn't pass AND when
2803 * we've seen a passing read on every bit.
2804 */
2805 stop = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
2806 0, PASS_ONE_BIT, &bit_chk, 0);
2807
2808 sticky_bit_chk = sticky_bit_chk | bit_chk;
2809 stop = stop && (sticky_bit_chk == param->write_correct_mask);
2810
2811 debug_cond(DLEVEL == 2, "write_center (right): dtap=%u => %u == \
2812 %u && %u\n", d, sticky_bit_chk,
2813 param->write_correct_mask, stop);
2814
2815 if (stop == 1) {
2816 if (d == 0) {
2817 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS;
2818 i++) {
2819 /* d = 0 failed, but it passed when
2820 testing the left edge, so it must be
2821 marginal, set it to -1 */
2822 if (right_edge[i] ==
2823 IO_IO_OUT1_DELAY_MAX + 1 &&
2824 left_edge[i] !=
2825 IO_IO_OUT1_DELAY_MAX + 1) {
2826 right_edge[i] = -1;
2827 }
2828 }
2829 }
2830 break;
2831 } else {
2832 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2833 if (bit_chk & 1) {
2834 /*
2835 * Remember a passing test as
2836 * the right_edge.
2837 */
2838 right_edge[i] = d;
2839 } else {
2840 if (d != 0) {
2841 /*
2842 * If a right edge has not
2843 * been seen yet, then a future
2844 * passing test will mark this
2845 * edge as the left edge.
2846 */
2847 if (right_edge[i] ==
2848 IO_IO_OUT1_DELAY_MAX + 1)
2849 left_edge[i] = -(d + 1);
2850 } else {
2851 /*
2852 * d = 0 failed, but it passed
2853 * when testing the left edge,
2854 * so it must be marginal, set
2855 * it to -1.
2856 */
2857 if (right_edge[i] ==
2858 IO_IO_OUT1_DELAY_MAX + 1 &&
2859 left_edge[i] !=
2860 IO_IO_OUT1_DELAY_MAX + 1)
2861 right_edge[i] = -1;
2862 /*
2863 * If a right edge has not been
2864 * seen yet, then a future
2865 * passing test will mark this
2866 * edge as the left edge.
2867 */
2868 else if (right_edge[i] ==
2869 IO_IO_OUT1_DELAY_MAX +
2870 1)
2871 left_edge[i] = -(d + 1);
2872 }
2873 }
2874 debug_cond(DLEVEL == 2, "write_center[r,d=%d):", d);
2875 debug_cond(DLEVEL == 2, "bit_chk_test=%d left_edge[%u]: %d",
2876 (int)(bit_chk & 1), i, left_edge[i]);
2877 debug_cond(DLEVEL == 2, "right_edge[%u]: %d\n", i,
2878 right_edge[i]);
2879 bit_chk = bit_chk >> 1;
2880 }
2881 }
2882 }
2883
2884 /* Check that all bits have a window */
2885 for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2886 debug_cond(DLEVEL == 2, "%s:%d write_center: left_edge[%u]: \
2887 %d right_edge[%u]: %d", __func__, __LINE__,
2888 i, left_edge[i], i, right_edge[i]);
2889 if ((left_edge[i] == IO_IO_OUT1_DELAY_MAX + 1) ||
2890 (right_edge[i] == IO_IO_OUT1_DELAY_MAX + 1)) {
2891 set_failing_group_stage(test_bgn + i,
2892 CAL_STAGE_WRITES,
2893 CAL_SUBSTAGE_WRITES_CENTER);
2894 return 0;
2895 }
2896 }
2897
2898 /* Find middle of window for each DQ bit */
2899 mid_min = left_edge[0] - right_edge[0];
2900 min_index = 0;
2901 for (i = 1; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
2902 mid = left_edge[i] - right_edge[i];
2903 if (mid < mid_min) {
2904 mid_min = mid;
2905 min_index = i;
2906 }
2907 }
2908
2909 /*
2910 * -mid_min/2 represents the amount that we need to move DQS.
2911 * If mid_min is odd and positive we'll need to add one to
2912 * make sure the rounding in further calculations is correct
2913 * (always bias to the right), so just add 1 for all positive values.
2914 */
2915 if (mid_min > 0)
2916 mid_min++;
2917 mid_min = mid_min / 2;
2918 debug_cond(DLEVEL == 1, "%s:%d write_center: mid_min=%d\n", __func__,
2919 __LINE__, mid_min);
2920
2921 /* Determine the amount we can change DQS (which is -mid_min) */
2922 orig_mid_min = mid_min;
2923 new_dqs = start_dqs;
2924 mid_min = 0;
2925 debug_cond(DLEVEL == 1, "%s:%d write_center: start_dqs=%d new_dqs=%d \
2926 mid_min=%d\n", __func__, __LINE__, start_dqs, new_dqs, mid_min);
2927 /* Initialize data for export structures */
2928 dqs_margin = IO_IO_OUT1_DELAY_MAX + 1;
2929 dq_margin = IO_IO_OUT1_DELAY_MAX + 1;
2930
2931 /* add delay to bring centre of all DQ windows to the same "level" */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002932 for (i = 0, p = test_bgn; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++, p++) {
2933 /* Use values before divide by 2 to reduce round off error */
2934 shift_dq = (left_edge[i] - right_edge[i] -
2935 (left_edge[min_index] - right_edge[min_index]))/2 +
2936 (orig_mid_min - mid_min);
2937
2938 debug_cond(DLEVEL == 2, "%s:%d write_center: before: shift_dq \
2939 [%u]=%d\n", __func__, __LINE__, i, shift_dq);
2940
Marek Vasut1273dd92015-07-12 21:05:08 +02002941 addr = SDR_PHYGRP_SCCGRP_ADDRESS | SCC_MGR_IO_OUT1_DELAY_OFFSET;
Marek Vasut17fdc912015-07-12 20:05:54 +02002942 temp_dq_out1_delay = readl(addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05002943 if (shift_dq + (int32_t)temp_dq_out1_delay >
2944 (int32_t)IO_IO_OUT1_DELAY_MAX) {
2945 shift_dq = (int32_t)IO_IO_OUT1_DELAY_MAX - temp_dq_out1_delay;
2946 } else if (shift_dq + (int32_t)temp_dq_out1_delay < 0) {
2947 shift_dq = -(int32_t)temp_dq_out1_delay;
2948 }
2949 debug_cond(DLEVEL == 2, "write_center: after: shift_dq[%u]=%d\n",
2950 i, shift_dq);
Marek Vasut07aee5b2015-07-12 22:07:33 +02002951 scc_mgr_set_dq_out1_delay(i, temp_dq_out1_delay + shift_dq);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002952 scc_mgr_load_dq(i);
2953
2954 debug_cond(DLEVEL == 2, "write_center: margin[%u]=[%d,%d]\n", i,
2955 left_edge[i] - shift_dq + (-mid_min),
2956 right_edge[i] + shift_dq - (-mid_min));
2957 /* To determine values for export structures */
2958 if (left_edge[i] - shift_dq + (-mid_min) < dq_margin)
2959 dq_margin = left_edge[i] - shift_dq + (-mid_min);
2960
2961 if (right_edge[i] + shift_dq - (-mid_min) < dqs_margin)
2962 dqs_margin = right_edge[i] + shift_dq - (-mid_min);
2963 }
2964
2965 /* Move DQS */
2966 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
Marek Vasut1273dd92015-07-12 21:05:08 +02002967 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002968
2969 /* Centre DM */
2970 debug_cond(DLEVEL == 2, "%s:%d write_center: DM\n", __func__, __LINE__);
2971
2972 /*
2973 * set the left and right edge of each bit to an illegal value,
2974 * use (IO_IO_OUT1_DELAY_MAX + 1) as an illegal value,
2975 */
2976 left_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2977 right_edge[0] = IO_IO_OUT1_DELAY_MAX + 1;
2978 int32_t bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
2979 int32_t end_curr = IO_IO_OUT1_DELAY_MAX + 1;
2980 int32_t bgn_best = IO_IO_OUT1_DELAY_MAX + 1;
2981 int32_t end_best = IO_IO_OUT1_DELAY_MAX + 1;
2982 int32_t win_best = 0;
2983
2984 /* Search for the/part of the window with DM shift */
Dinh Nguyen3da42852015-06-02 22:52:49 -05002985 for (d = IO_IO_OUT1_DELAY_MAX; d >= 0; d -= DELTA_D) {
2986 scc_mgr_apply_group_dm_out1_delay(write_group, d);
Marek Vasut1273dd92015-07-12 21:05:08 +02002987 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05002988
2989 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
2990 PASS_ALL_BITS, &bit_chk,
2991 0)) {
2992 /* USE Set current end of the window */
2993 end_curr = -d;
2994 /*
2995 * If a starting edge of our window has not been seen
2996 * this is our current start of the DM window.
2997 */
2998 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
2999 bgn_curr = -d;
3000
3001 /*
3002 * If current window is bigger than best seen.
3003 * Set best seen to be current window.
3004 */
3005 if ((end_curr-bgn_curr+1) > win_best) {
3006 win_best = end_curr-bgn_curr+1;
3007 bgn_best = bgn_curr;
3008 end_best = end_curr;
3009 }
3010 } else {
3011 /* We just saw a failing test. Reset temp edge */
3012 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3013 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3014 }
3015 }
3016
3017
3018 /* Reset DM delay chains to 0 */
3019 scc_mgr_apply_group_dm_out1_delay(write_group, 0);
3020
3021 /*
3022 * Check to see if the current window nudges up aganist 0 delay.
3023 * If so we need to continue the search by shifting DQS otherwise DQS
3024 * search begins as a new search. */
3025 if (end_curr != 0) {
3026 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3027 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3028 }
3029
3030 /* Search for the/part of the window with DQS shifts */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003031 for (d = 0; d <= IO_IO_OUT1_DELAY_MAX - new_dqs; d += DELTA_D) {
3032 /*
3033 * Note: This only shifts DQS, so are we limiting ourselve to
3034 * width of DQ unnecessarily.
3035 */
3036 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
3037 d + new_dqs);
3038
Marek Vasut1273dd92015-07-12 21:05:08 +02003039 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003040 if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
3041 PASS_ALL_BITS, &bit_chk,
3042 0)) {
3043 /* USE Set current end of the window */
3044 end_curr = d;
3045 /*
3046 * If a beginning edge of our window has not been seen
3047 * this is our current begin of the DM window.
3048 */
3049 if (bgn_curr == IO_IO_OUT1_DELAY_MAX + 1)
3050 bgn_curr = d;
3051
3052 /*
3053 * If current window is bigger than best seen. Set best
3054 * seen to be current window.
3055 */
3056 if ((end_curr-bgn_curr+1) > win_best) {
3057 win_best = end_curr-bgn_curr+1;
3058 bgn_best = bgn_curr;
3059 end_best = end_curr;
3060 }
3061 } else {
3062 /* We just saw a failing test. Reset temp edge */
3063 bgn_curr = IO_IO_OUT1_DELAY_MAX + 1;
3064 end_curr = IO_IO_OUT1_DELAY_MAX + 1;
3065
3066 /* Early exit optimization: if ther remaining delay
3067 chain space is less than already seen largest window
3068 we can exit */
3069 if ((win_best-1) >
3070 (IO_IO_OUT1_DELAY_MAX - new_dqs - d)) {
3071 break;
3072 }
3073 }
3074 }
3075
3076 /* assign left and right edge for cal and reporting; */
3077 left_edge[0] = -1*bgn_best;
3078 right_edge[0] = end_best;
3079
3080 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d\n", __func__,
3081 __LINE__, left_edge[0], right_edge[0]);
3082
3083 /* Move DQS (back to orig) */
3084 scc_mgr_apply_group_dqs_io_and_oct_out1(write_group, new_dqs);
3085
3086 /* Move DM */
3087
3088 /* Find middle of window for the DM bit */
3089 mid = (left_edge[0] - right_edge[0]) / 2;
3090
3091 /* only move right, since we are not moving DQS/DQ */
3092 if (mid < 0)
3093 mid = 0;
3094
3095 /* dm_marign should fail if we never find a window */
3096 if (win_best == 0)
3097 dm_margin = -1;
3098 else
3099 dm_margin = left_edge[0] - mid;
3100
3101 scc_mgr_apply_group_dm_out1_delay(write_group, mid);
Marek Vasut1273dd92015-07-12 21:05:08 +02003102 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003103
3104 debug_cond(DLEVEL == 2, "%s:%d dm_calib: left=%d right=%d mid=%d \
3105 dm_margin=%d\n", __func__, __LINE__, left_edge[0],
3106 right_edge[0], mid, dm_margin);
3107 /* Export values */
3108 gbl->fom_out += dq_margin + dqs_margin;
3109
3110 debug_cond(DLEVEL == 2, "%s:%d write_center: dq_margin=%d \
3111 dqs_margin=%d dm_margin=%d\n", __func__, __LINE__,
3112 dq_margin, dqs_margin, dm_margin);
3113
3114 /*
3115 * Do not remove this line as it makes sure all of our
3116 * decisions have been applied.
3117 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003118 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003119 return (dq_margin >= 0) && (dqs_margin >= 0) && (dm_margin >= 0);
3120}
3121
3122/* calibrate the write operations */
3123static uint32_t rw_mgr_mem_calibrate_writes(uint32_t rank_bgn, uint32_t g,
3124 uint32_t test_bgn)
3125{
3126 /* update info for sims */
3127 debug("%s:%d %u %u\n", __func__, __LINE__, g, test_bgn);
3128
3129 reg_file_set_stage(CAL_STAGE_WRITES);
3130 reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER);
3131
3132 reg_file_set_group(g);
3133
3134 if (!rw_mgr_mem_calibrate_writes_center(rank_bgn, g, test_bgn)) {
3135 set_failing_group_stage(g, CAL_STAGE_WRITES,
3136 CAL_SUBSTAGE_WRITES_CENTER);
3137 return 0;
3138 }
3139
3140 return 1;
3141}
3142
3143/* precharge all banks and activate row 0 in bank "000..." and bank "111..." */
3144static void mem_precharge_and_activate(void)
3145{
3146 uint32_t r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003147
3148 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
3149 if (param->skip_ranks[r]) {
3150 /* request to skip the rank */
3151 continue;
3152 }
3153
3154 /* set rank */
3155 set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
3156
3157 /* precharge all banks ... */
Marek Vasut1273dd92015-07-12 21:05:08 +02003158 writel(RW_MGR_PRECHARGE_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3159 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003160
Marek Vasut1273dd92015-07-12 21:05:08 +02003161 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0);
3162 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT1,
3163 &sdr_rw_load_jump_mgr_regs->load_jump_add0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003164
Marek Vasut1273dd92015-07-12 21:05:08 +02003165 writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1);
3166 writel(RW_MGR_ACTIVATE_0_AND_1_WAIT2,
3167 &sdr_rw_load_jump_mgr_regs->load_jump_add1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003168
3169 /* activate rows */
Marek Vasut1273dd92015-07-12 21:05:08 +02003170 writel(RW_MGR_ACTIVATE_0_AND_1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
3171 RW_MGR_RUN_SINGLE_GROUP_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003172 }
3173}
3174
3175/* Configure various memory related parameters. */
3176static void mem_config(void)
3177{
3178 uint32_t rlat, wlat;
3179 uint32_t rw_wl_nop_cycles;
3180 uint32_t max_latency;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003181
3182 debug("%s:%d\n", __func__, __LINE__);
3183 /* read in write and read latency */
Marek Vasut1273dd92015-07-12 21:05:08 +02003184 wlat = readl(&data_mgr->t_wl_add);
3185 wlat += readl(&data_mgr->mem_t_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003186
Dinh Nguyen3da42852015-06-02 22:52:49 -05003187 /* WL for hard phy does not include additive latency */
3188
3189 /*
3190 * add addtional write latency to offset the address/command extra
3191 * clock cycle. We change the AC mux setting causing AC to be delayed
3192 * by one mem clock cycle. Only do this for DDR3
3193 */
3194 wlat = wlat + 1;
3195
Marek Vasut1273dd92015-07-12 21:05:08 +02003196 rlat = readl(&data_mgr->t_rl_add);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003197
3198 rw_wl_nop_cycles = wlat - 2;
3199 gbl->rw_wl_nop_cycles = rw_wl_nop_cycles;
3200
3201 /*
3202 * For AV/CV, lfifo is hardened and always runs at full rate so
3203 * max latency in AFI clocks, used here, is correspondingly smaller.
3204 */
3205 max_latency = (1<<MAX_LATENCY_COUNT_WIDTH)/1 - 1;
3206 /* configure for a burst length of 8 */
3207
3208 /* write latency */
3209 /* Adjust Write Latency for Hard PHY */
3210 wlat = wlat + 1;
3211
3212 /* set a pretty high read latency initially */
3213 gbl->curr_read_lat = rlat + 16;
3214
3215 if (gbl->curr_read_lat > max_latency)
3216 gbl->curr_read_lat = max_latency;
3217
Marek Vasut1273dd92015-07-12 21:05:08 +02003218 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003219
3220 /* advertise write latency */
3221 gbl->curr_write_lat = wlat;
Marek Vasut1273dd92015-07-12 21:05:08 +02003222 writel(wlat - 2, &phy_mgr_cfg->afi_wlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003223
3224 /* initialize bit slips */
3225 mem_precharge_and_activate();
3226}
3227
3228/* Set VFIFO and LFIFO to instant-on settings in skip calibration mode */
3229static void mem_skip_calibrate(void)
3230{
3231 uint32_t vfifo_offset;
3232 uint32_t i, j, r;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003233
3234 debug("%s:%d\n", __func__, __LINE__);
3235 /* Need to update every shadow register set used by the interface */
3236 for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
3237 r += NUM_RANKS_PER_SHADOW_REG) {
3238 /*
3239 * Set output phase alignment settings appropriate for
3240 * skip calibration.
3241 */
3242 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3243 scc_mgr_set_dqs_en_phase(i, 0);
3244#if IO_DLL_CHAIN_LENGTH == 6
3245 scc_mgr_set_dqdqs_output_phase(i, 6);
3246#else
3247 scc_mgr_set_dqdqs_output_phase(i, 7);
3248#endif
3249 /*
3250 * Case:33398
3251 *
3252 * Write data arrives to the I/O two cycles before write
3253 * latency is reached (720 deg).
3254 * -> due to bit-slip in a/c bus
3255 * -> to allow board skew where dqs is longer than ck
3256 * -> how often can this happen!?
3257 * -> can claim back some ptaps for high freq
3258 * support if we can relax this, but i digress...
3259 *
3260 * The write_clk leads mem_ck by 90 deg
3261 * The minimum ptap of the OPA is 180 deg
3262 * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay
3263 * The write_clk is always delayed by 2 ptaps
3264 *
3265 * Hence, to make DQS aligned to CK, we need to delay
3266 * DQS by:
3267 * (720 - 90 - 180 - 2 * (360 / IO_DLL_CHAIN_LENGTH))
3268 *
3269 * Dividing the above by (360 / IO_DLL_CHAIN_LENGTH)
3270 * gives us the number of ptaps, which simplies to:
3271 *
3272 * (1.25 * IO_DLL_CHAIN_LENGTH - 2)
3273 */
3274 scc_mgr_set_dqdqs_output_phase(i, (1.25 *
3275 IO_DLL_CHAIN_LENGTH - 2));
3276 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003277 writel(0xff, &sdr_scc_mgr->dqs_ena);
3278 writel(0xff, &sdr_scc_mgr->dqs_io_ena);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003279
Dinh Nguyen3da42852015-06-02 22:52:49 -05003280 for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003281 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3282 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003283 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003284 writel(0xff, &sdr_scc_mgr->dq_ena);
3285 writel(0xff, &sdr_scc_mgr->dm_ena);
3286 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003287 }
3288
3289 /* Compensate for simulation model behaviour */
3290 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
3291 scc_mgr_set_dqs_bus_in_delay(i, 10);
3292 scc_mgr_load_dqs(i);
3293 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003294 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003295
3296 /*
3297 * ArriaV has hard FIFOs that can only be initialized by incrementing
3298 * in sequencer.
3299 */
3300 vfifo_offset = CALIB_VFIFO_OFFSET;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003301 for (j = 0; j < vfifo_offset; j++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003302 writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003303 }
Marek Vasut1273dd92015-07-12 21:05:08 +02003304 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003305
3306 /*
3307 * For ACV with hard lfifo, we get the skip-cal setting from
3308 * generation-time constant.
3309 */
3310 gbl->curr_read_lat = CALIB_LFIFO_OFFSET;
Marek Vasut1273dd92015-07-12 21:05:08 +02003311 writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003312}
3313
3314/* Memory calibration entry point */
3315static uint32_t mem_calibrate(void)
3316{
3317 uint32_t i;
3318 uint32_t rank_bgn, sr;
3319 uint32_t write_group, write_test_bgn;
3320 uint32_t read_group, read_test_bgn;
3321 uint32_t run_groups, current_run;
3322 uint32_t failing_groups = 0;
3323 uint32_t group_failed = 0;
3324 uint32_t sr_failed = 0;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003325
3326 debug("%s:%d\n", __func__, __LINE__);
3327 /* Initialize the data settings */
3328
3329 gbl->error_substage = CAL_SUBSTAGE_NIL;
3330 gbl->error_stage = CAL_STAGE_NIL;
3331 gbl->error_group = 0xff;
3332 gbl->fom_in = 0;
3333 gbl->fom_out = 0;
3334
3335 mem_config();
3336
3337 uint32_t bypass_mode = 0x1;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003338 for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
Marek Vasut1273dd92015-07-12 21:05:08 +02003339 writel(i, SDR_PHYGRP_SCCGRP_ADDRESS |
3340 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003341 scc_set_bypass_mode(i, bypass_mode);
3342 }
3343
3344 if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) {
3345 /*
3346 * Set VFIFO and LFIFO to instant-on settings in skip
3347 * calibration mode.
3348 */
3349 mem_skip_calibrate();
3350 } else {
3351 for (i = 0; i < NUM_CALIB_REPEAT; i++) {
3352 /*
3353 * Zero all delay chain/phase settings for all
3354 * groups and all shadow register sets.
3355 */
3356 scc_mgr_zero_all();
3357
3358 run_groups = ~param->skip_groups;
3359
3360 for (write_group = 0, write_test_bgn = 0; write_group
3361 < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; write_group++,
3362 write_test_bgn += RW_MGR_MEM_DQ_PER_WRITE_DQS) {
3363 /* Initialized the group failure */
3364 group_failed = 0;
3365
3366 current_run = run_groups & ((1 <<
3367 RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1);
3368 run_groups = run_groups >>
3369 RW_MGR_NUM_DQS_PER_WRITE_GROUP;
3370
3371 if (current_run == 0)
3372 continue;
3373
Marek Vasut1273dd92015-07-12 21:05:08 +02003374 writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS |
3375 SCC_MGR_GROUP_COUNTER_OFFSET);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003376 scc_mgr_zero_group(write_group, write_test_bgn,
3377 0);
3378
3379 for (read_group = write_group *
3380 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3381 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3382 read_test_bgn = 0;
3383 read_group < (write_group + 1) *
3384 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3385 RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3386 group_failed == 0;
3387 read_group++, read_test_bgn +=
3388 RW_MGR_MEM_DQ_PER_READ_DQS) {
3389 /* Calibrate the VFIFO */
3390 if (!((STATIC_CALIB_STEPS) &
3391 CALIB_SKIP_VFIFO)) {
3392 if (!rw_mgr_mem_calibrate_vfifo
3393 (read_group,
3394 read_test_bgn)) {
3395 group_failed = 1;
3396
3397 if (!(gbl->
3398 phy_debug_mode_flags &
3399 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3400 return 0;
3401 }
3402 }
3403 }
3404 }
3405
3406 /* Calibrate the output side */
3407 if (group_failed == 0) {
3408 for (rank_bgn = 0, sr = 0; rank_bgn
3409 < RW_MGR_MEM_NUMBER_OF_RANKS;
3410 rank_bgn +=
3411 NUM_RANKS_PER_SHADOW_REG,
3412 ++sr) {
3413 sr_failed = 0;
3414 if (!((STATIC_CALIB_STEPS) &
3415 CALIB_SKIP_WRITES)) {
3416 if ((STATIC_CALIB_STEPS)
3417 & CALIB_SKIP_DELAY_SWEEPS) {
3418 /* not needed in quick mode! */
3419 } else {
3420 /*
3421 * Determine if this set of
3422 * ranks should be skipped
3423 * entirely.
3424 */
3425 if (!param->skip_shadow_regs[sr]) {
3426 if (!rw_mgr_mem_calibrate_writes
3427 (rank_bgn, write_group,
3428 write_test_bgn)) {
3429 sr_failed = 1;
3430 if (!(gbl->
3431 phy_debug_mode_flags &
3432 PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3433 return 0;
3434 }
3435 }
3436 }
3437 }
3438 }
3439 if (sr_failed != 0)
3440 group_failed = 1;
3441 }
3442 }
3443
3444 if (group_failed == 0) {
3445 for (read_group = write_group *
3446 RW_MGR_MEM_IF_READ_DQS_WIDTH /
3447 RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3448 read_test_bgn = 0;
3449 read_group < (write_group + 1)
3450 * RW_MGR_MEM_IF_READ_DQS_WIDTH
3451 / RW_MGR_MEM_IF_WRITE_DQS_WIDTH &&
3452 group_failed == 0;
3453 read_group++, read_test_bgn +=
3454 RW_MGR_MEM_DQ_PER_READ_DQS) {
3455 if (!((STATIC_CALIB_STEPS) &
3456 CALIB_SKIP_WRITES)) {
3457 if (!rw_mgr_mem_calibrate_vfifo_end
3458 (read_group, read_test_bgn)) {
3459 group_failed = 1;
3460
3461 if (!(gbl->phy_debug_mode_flags
3462 & PHY_DEBUG_SWEEP_ALL_GROUPS)) {
3463 return 0;
3464 }
3465 }
3466 }
3467 }
3468 }
3469
3470 if (group_failed != 0)
3471 failing_groups++;
3472 }
3473
3474 /*
3475 * USER If there are any failing groups then report
3476 * the failure.
3477 */
3478 if (failing_groups != 0)
3479 return 0;
3480
3481 /* Calibrate the LFIFO */
3482 if (!((STATIC_CALIB_STEPS) & CALIB_SKIP_LFIFO)) {
3483 /*
3484 * If we're skipping groups as part of debug,
3485 * don't calibrate LFIFO.
3486 */
3487 if (param->skip_groups == 0) {
3488 if (!rw_mgr_mem_calibrate_lfifo())
3489 return 0;
3490 }
3491 }
3492 }
3493 }
3494
3495 /*
3496 * Do not remove this line as it makes sure all of our decisions
3497 * have been applied.
3498 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003499 writel(0, &sdr_scc_mgr->update);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003500 return 1;
3501}
3502
3503static uint32_t run_mem_calibrate(void)
3504{
3505 uint32_t pass;
3506 uint32_t debug_info;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003507
3508 debug("%s:%d\n", __func__, __LINE__);
3509
3510 /* Reset pass/fail status shown on afi_cal_success/fail */
Marek Vasut1273dd92015-07-12 21:05:08 +02003511 writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003512
Dinh Nguyen3da42852015-06-02 22:52:49 -05003513 /* stop tracking manger */
Marek Vasut6cb9f162015-07-12 20:49:39 +02003514 uint32_t ctrlcfg = readl(&sdr_ctrl->ctrl_cfg);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003515
Marek Vasut6cb9f162015-07-12 20:49:39 +02003516 writel(ctrlcfg & 0xFFBFFFFF, &sdr_ctrl->ctrl_cfg);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003517
3518 initialize();
3519 rw_mgr_mem_initialize();
3520
3521 pass = mem_calibrate();
3522
3523 mem_precharge_and_activate();
Marek Vasut1273dd92015-07-12 21:05:08 +02003524 writel(0, &phy_mgr_cmd->fifo_reset);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003525
3526 /*
3527 * Handoff:
3528 * Don't return control of the PHY back to AFI when in debug mode.
3529 */
3530 if ((gbl->phy_debug_mode_flags & PHY_DEBUG_IN_DEBUG_MODE) == 0) {
3531 rw_mgr_mem_handoff();
3532 /*
3533 * In Hard PHY this is a 2-bit control:
3534 * 0: AFI Mux Select
3535 * 1: DDIO Mux Select
3536 */
Marek Vasut1273dd92015-07-12 21:05:08 +02003537 writel(0x2, &phy_mgr_cfg->mux_sel);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003538 }
3539
Marek Vasut6cb9f162015-07-12 20:49:39 +02003540 writel(ctrlcfg, &sdr_ctrl->ctrl_cfg);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003541
3542 if (pass) {
3543 printf("%s: CALIBRATION PASSED\n", __FILE__);
3544
3545 gbl->fom_in /= 2;
3546 gbl->fom_out /= 2;
3547
3548 if (gbl->fom_in > 0xff)
3549 gbl->fom_in = 0xff;
3550
3551 if (gbl->fom_out > 0xff)
3552 gbl->fom_out = 0xff;
3553
3554 /* Update the FOM in the register file */
3555 debug_info = gbl->fom_in;
3556 debug_info |= gbl->fom_out << 8;
Marek Vasut1273dd92015-07-12 21:05:08 +02003557 writel(debug_info, &sdr_reg_file->fom);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003558
Marek Vasut1273dd92015-07-12 21:05:08 +02003559 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3560 writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003561 } else {
3562 printf("%s: CALIBRATION FAILED\n", __FILE__);
3563
3564 debug_info = gbl->error_stage;
3565 debug_info |= gbl->error_substage << 8;
3566 debug_info |= gbl->error_group << 16;
3567
Marek Vasut1273dd92015-07-12 21:05:08 +02003568 writel(debug_info, &sdr_reg_file->failing_stage);
3569 writel(debug_info, &phy_mgr_cfg->cal_debug_info);
3570 writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003571
3572 /* Update the failing group/stage in the register file */
3573 debug_info = gbl->error_stage;
3574 debug_info |= gbl->error_substage << 8;
3575 debug_info |= gbl->error_group << 16;
Marek Vasut1273dd92015-07-12 21:05:08 +02003576 writel(debug_info, &sdr_reg_file->failing_stage);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003577 }
3578
3579 return pass;
3580}
3581
Marek Vasutbb064342015-07-19 06:12:42 +02003582/**
3583 * hc_initialize_rom_data() - Initialize ROM data
3584 *
3585 * Initialize ROM data.
3586 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003587static void hc_initialize_rom_data(void)
3588{
Marek Vasutbb064342015-07-19 06:12:42 +02003589 u32 i, addr;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003590
Marek Vasutc4815f72015-07-12 19:03:33 +02003591 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET;
Marek Vasutbb064342015-07-19 06:12:42 +02003592 for (i = 0; i < ARRAY_SIZE(inst_rom_init); i++)
3593 writel(inst_rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003594
Marek Vasutc4815f72015-07-12 19:03:33 +02003595 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET;
Marek Vasutbb064342015-07-19 06:12:42 +02003596 for (i = 0; i < ARRAY_SIZE(ac_rom_init); i++)
3597 writel(ac_rom_init[i], addr + (i << 2));
Dinh Nguyen3da42852015-06-02 22:52:49 -05003598}
3599
Marek Vasut9c1ab2c2015-07-19 06:13:37 +02003600/**
3601 * initialize_reg_file() - Initialize SDR register file
3602 *
3603 * Initialize SDR register file.
3604 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003605static void initialize_reg_file(void)
3606{
Dinh Nguyen3da42852015-06-02 22:52:49 -05003607 /* Initialize the register file with the correct data */
Marek Vasut1273dd92015-07-12 21:05:08 +02003608 writel(REG_FILE_INIT_SEQ_SIGNATURE, &sdr_reg_file->signature);
3609 writel(0, &sdr_reg_file->debug_data_addr);
3610 writel(0, &sdr_reg_file->cur_stage);
3611 writel(0, &sdr_reg_file->fom);
3612 writel(0, &sdr_reg_file->failing_stage);
3613 writel(0, &sdr_reg_file->debug1);
3614 writel(0, &sdr_reg_file->debug2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003615}
3616
Marek Vasut2ca151f2015-07-19 06:14:04 +02003617/**
3618 * initialize_hps_phy() - Initialize HPS PHY
3619 *
3620 * Initialize HPS PHY.
3621 */
Dinh Nguyen3da42852015-06-02 22:52:49 -05003622static void initialize_hps_phy(void)
3623{
3624 uint32_t reg;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003625 /*
3626 * Tracking also gets configured here because it's in the
3627 * same register.
3628 */
3629 uint32_t trk_sample_count = 7500;
3630 uint32_t trk_long_idle_sample_count = (10 << 16) | 100;
3631 /*
3632 * Format is number of outer loops in the 16 MSB, sample
3633 * count in 16 LSB.
3634 */
3635
3636 reg = 0;
3637 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2);
3638 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1);
3639 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1);
3640 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1);
3641 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0);
3642 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1);
3643 /*
3644 * This field selects the intrinsic latency to RDATA_EN/FULL path.
3645 * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles.
3646 */
3647 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0);
3648 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET(
3649 trk_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003650 writel(reg, &sdr_ctrl->phy_ctrl0);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003651
3652 reg = 0;
3653 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET(
3654 trk_sample_count >>
3655 SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH);
3656 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET(
3657 trk_long_idle_sample_count);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003658 writel(reg, &sdr_ctrl->phy_ctrl1);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003659
3660 reg = 0;
3661 reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET(
3662 trk_long_idle_sample_count >>
3663 SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH);
Marek Vasut6cb9f162015-07-12 20:49:39 +02003664 writel(reg, &sdr_ctrl->phy_ctrl2);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003665}
3666
3667static void initialize_tracking(void)
3668{
3669 uint32_t concatenated_longidle = 0x0;
3670 uint32_t concatenated_delays = 0x0;
3671 uint32_t concatenated_rw_addr = 0x0;
3672 uint32_t concatenated_refresh = 0x0;
3673 uint32_t trk_sample_count = 7500;
3674 uint32_t dtaps_per_ptap;
3675 uint32_t tmp_delay;
Dinh Nguyen3da42852015-06-02 22:52:49 -05003676
3677 /*
3678 * compute usable version of value in case we skip full
3679 * computation later
3680 */
3681 dtaps_per_ptap = 0;
3682 tmp_delay = 0;
3683 while (tmp_delay < IO_DELAY_PER_OPA_TAP) {
3684 dtaps_per_ptap++;
3685 tmp_delay += IO_DELAY_PER_DCHAIN_TAP;
3686 }
3687 dtaps_per_ptap--;
3688
3689 concatenated_longidle = concatenated_longidle ^ 10;
3690 /*longidle outer loop */
3691 concatenated_longidle = concatenated_longidle << 16;
3692 concatenated_longidle = concatenated_longidle ^ 100;
3693 /*longidle sample count */
3694 concatenated_delays = concatenated_delays ^ 243;
3695 /* trfc, worst case of 933Mhz 4Gb */
3696 concatenated_delays = concatenated_delays << 8;
3697 concatenated_delays = concatenated_delays ^ 14;
3698 /* trcd, worst case */
3699 concatenated_delays = concatenated_delays << 8;
3700 concatenated_delays = concatenated_delays ^ 10;
3701 /* vfifo wait */
3702 concatenated_delays = concatenated_delays << 8;
3703 concatenated_delays = concatenated_delays ^ 4;
3704 /* mux delay */
3705
3706 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_IDLE;
3707 concatenated_rw_addr = concatenated_rw_addr << 8;
3708 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_ACTIVATE_1;
3709 concatenated_rw_addr = concatenated_rw_addr << 8;
3710 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_SGLE_READ;
3711 concatenated_rw_addr = concatenated_rw_addr << 8;
3712 concatenated_rw_addr = concatenated_rw_addr ^ RW_MGR_PRECHARGE_ALL;
3713
3714 concatenated_refresh = concatenated_refresh ^ RW_MGR_REFRESH_ALL;
3715 concatenated_refresh = concatenated_refresh << 24;
3716 concatenated_refresh = concatenated_refresh ^ 1000; /* trefi */
3717
3718 /* Initialize the register file with the correct data */
Marek Vasut1273dd92015-07-12 21:05:08 +02003719 writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
3720 writel(trk_sample_count, &sdr_reg_file->trk_sample_count);
3721 writel(concatenated_longidle, &sdr_reg_file->trk_longidle);
3722 writel(concatenated_delays, &sdr_reg_file->delays);
3723 writel(concatenated_rw_addr, &sdr_reg_file->trk_rw_mgr_addr);
3724 writel(RW_MGR_MEM_IF_READ_DQS_WIDTH, &sdr_reg_file->trk_read_dqs_width);
3725 writel(concatenated_refresh, &sdr_reg_file->trk_rfsh);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003726}
3727
3728int sdram_calibration_full(void)
3729{
3730 struct param_type my_param;
3731 struct gbl_type my_gbl;
3732 uint32_t pass;
3733 uint32_t i;
3734
3735 param = &my_param;
3736 gbl = &my_gbl;
3737
3738 /* Initialize the debug mode flags */
3739 gbl->phy_debug_mode_flags = 0;
3740 /* Set the calibration enabled by default */
3741 gbl->phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT;
3742 /*
3743 * Only sweep all groups (regardless of fail state) by default
3744 * Set enabled read test by default.
3745 */
3746#if DISABLE_GUARANTEED_READ
3747 gbl->phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ;
3748#endif
3749 /* Initialize the register file */
3750 initialize_reg_file();
3751
3752 /* Initialize any PHY CSR */
3753 initialize_hps_phy();
3754
3755 scc_mgr_initialize();
3756
3757 initialize_tracking();
3758
3759 /* USER Enable all ranks, groups */
3760 for (i = 0; i < RW_MGR_MEM_NUMBER_OF_RANKS; i++)
3761 param->skip_ranks[i] = 0;
3762 for (i = 0; i < NUM_SHADOW_REGS; ++i)
3763 param->skip_shadow_regs[i] = 0;
3764 param->skip_groups = 0;
3765
3766 printf("%s: Preparing to start memory calibration\n", __FILE__);
3767
3768 debug("%s:%d\n", __func__, __LINE__);
Marek Vasut23f62b32015-07-13 01:05:27 +02003769 debug_cond(DLEVEL == 1,
3770 "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ",
3771 RW_MGR_MEM_NUMBER_OF_RANKS, RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM,
3772 RW_MGR_MEM_DQ_PER_READ_DQS, RW_MGR_MEM_DQ_PER_WRITE_DQS,
3773 RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS,
3774 RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS);
3775 debug_cond(DLEVEL == 1,
3776 "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ",
3777 RW_MGR_MEM_IF_READ_DQS_WIDTH, RW_MGR_MEM_IF_WRITE_DQS_WIDTH,
3778 RW_MGR_MEM_DATA_WIDTH, RW_MGR_MEM_DATA_MASK_WIDTH,
3779 IO_DELAY_PER_OPA_TAP, IO_DELAY_PER_DCHAIN_TAP);
3780 debug_cond(DLEVEL == 1, "dtap_dqsen_delay=%u, dll=%u",
3781 IO_DELAY_PER_DQS_EN_DCHAIN_TAP, IO_DLL_CHAIN_LENGTH);
3782 debug_cond(DLEVEL == 1, "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ",
3783 IO_DQS_EN_PHASE_MAX, IO_DQDQS_OUT_PHASE_MAX,
3784 IO_DQS_EN_DELAY_MAX, IO_DQS_IN_DELAY_MAX);
3785 debug_cond(DLEVEL == 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ",
3786 IO_IO_IN_DELAY_MAX, IO_IO_OUT1_DELAY_MAX,
3787 IO_IO_OUT2_DELAY_MAX);
3788 debug_cond(DLEVEL == 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n",
3789 IO_DQS_IN_RESERVE, IO_DQS_OUT_RESERVE);
Dinh Nguyen3da42852015-06-02 22:52:49 -05003790
3791 hc_initialize_rom_data();
3792
3793 /* update info for sims */
3794 reg_file_set_stage(CAL_STAGE_NIL);
3795 reg_file_set_group(0);
3796
3797 /*
3798 * Load global needed for those actions that require
3799 * some dynamic calibration support.
3800 */
3801 dyn_calib_steps = STATIC_CALIB_STEPS;
3802 /*
3803 * Load global to allow dynamic selection of delay loop settings
3804 * based on calibration mode.
3805 */
3806 if (!(dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS))
3807 skip_delay_mask = 0xff;
3808 else
3809 skip_delay_mask = 0x0;
3810
3811 pass = run_mem_calibrate();
3812
3813 printf("%s: Calibration complete\n", __FILE__);
3814 return pass;
3815}