blob: 8ed8b79c8b7b2469fcceef2c7f22e956509d586e [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Tim Harveyfe0f7f72014-06-02 16:13:23 -07002/*
3 * Copyright (C) 2014 Gateworks Corporation
4 * Author: Tim Harvey <tharvey@gateworks.com>
Tim Harveyfe0f7f72014-06-02 16:13:23 -07005 */
6
7#include <common.h>
8#include <linux/types.h>
Peng Faneb796cb2015-08-17 16:11:04 +08009#include <asm/arch/clock.h>
Tim Harveyfe0f7f72014-06-02 16:13:23 -070010#include <asm/arch/mx6-ddr.h>
11#include <asm/arch/sys_proto.h>
12#include <asm/io.h>
13#include <asm/types.h>
Marek Vasutb10d93e2016-03-02 14:49:51 +010014#include <wait_bit.h>
Tim Harveyfe0f7f72014-06-02 16:13:23 -070015
Eric Nelsona425bf72016-10-30 16:33:50 -070016#if defined(CONFIG_MX6_DDRCAL)
Marek Vasutd339f162015-12-16 15:40:06 +010017static void reset_read_data_fifos(void)
18{
19 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
20
21 /* Reset data FIFOs twice. */
22 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +010023 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +010024
25 setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +010026 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +010027}
28
29static void precharge_all(const bool cs0_enable, const bool cs1_enable)
30{
31 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
32
33 /*
34 * Issue the Precharge-All command to the DDR device for both
35 * chip selects. Note, CON_REQ bit should also remain set. If
36 * only using one chip select, then precharge only the desired
37 * chip select.
38 */
39 if (cs0_enable) { /* CS0 */
40 writel(0x04008050, &mmdc0->mdscr);
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +010041 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +010042 }
43
44 if (cs1_enable) { /* CS1 */
45 writel(0x04008058, &mmdc0->mdscr);
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +010046 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +010047 }
48}
49
50static void force_delay_measurement(int bus_size)
51{
52 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
53 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
54
55 writel(0x800, &mmdc0->mpmur0);
56 if (bus_size == 0x2)
57 writel(0x800, &mmdc1->mpmur0);
58}
59
60static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl)
61{
62 u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl;
63
64 /*
65 * DQS gating absolute offset should be modified from reflecting
66 * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80)
67 */
68
69 val_ctrl = readl(reg_ctrl);
70 val_ctrl &= 0xf0000000;
71
72 dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0;
73 dg_dl_abs_offset = dg_tmp_val & 0x7f;
74 dg_hc_del = (dg_tmp_val & 0x780) << 1;
75
76 val_ctrl |= dg_dl_abs_offset + dg_hc_del;
77
78 dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0;
79 dg_dl_abs_offset = dg_tmp_val & 0x7f;
80 dg_hc_del = (dg_tmp_val & 0x780) << 1;
81
82 val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16;
83
84 writel(val_ctrl, reg_ctrl);
85}
86
Marek Vasut14eeb682018-03-30 03:04:43 +020087static void correct_mpwldectr_result(void *reg)
88{
89 /* Limit is 200/256 of CK, which is WL_HC_DELx | 0x48. */
90 const unsigned int limit = 0x148;
91 u32 val = readl(reg);
92 u32 old = val;
93
94 if ((val & 0x17f) > limit)
95 val &= 0xffff << 16;
96
97 if (((val >> 16) & 0x17f) > limit)
98 val &= 0xffff;
99
100 if (old != val)
101 writel(val, reg);
102}
103
Eric Nelson7f17fb72016-10-30 16:33:48 -0700104int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo)
Marek Vasutd339f162015-12-16 15:40:06 +0100105{
106 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
107 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
108 u32 esdmisc_val, zq_val;
109 u32 errors = 0;
Eric Nelson7f17fb72016-10-30 16:33:48 -0700110 u32 ldectrl[4] = {0};
Marek Vasutd339f162015-12-16 15:40:06 +0100111 u32 ddr_mr1 = 0x4;
Eric Nelson7f17fb72016-10-30 16:33:48 -0700112 u32 rwalat_max;
Marek Vasutd339f162015-12-16 15:40:06 +0100113
114 /*
115 * Stash old values in case calibration fails,
116 * we need to restore them
117 */
118 ldectrl[0] = readl(&mmdc0->mpwldectrl0);
119 ldectrl[1] = readl(&mmdc0->mpwldectrl1);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700120 if (sysinfo->dsize == 2) {
121 ldectrl[2] = readl(&mmdc1->mpwldectrl0);
122 ldectrl[3] = readl(&mmdc1->mpwldectrl1);
123 }
Marek Vasutd339f162015-12-16 15:40:06 +0100124
125 /* disable DDR logic power down timer */
126 clrbits_le32(&mmdc0->mdpdc, 0xff00);
127
128 /* disable Adopt power down timer */
129 setbits_le32(&mmdc0->mapsr, 0x1);
130
131 debug("Starting write leveling calibration.\n");
132
133 /*
134 * 2. disable auto refresh and ZQ calibration
135 * before proceeding with Write Leveling calibration
136 */
137 esdmisc_val = readl(&mmdc0->mdref);
138 writel(0x0000C000, &mmdc0->mdref);
139 zq_val = readl(&mmdc0->mpzqhwctrl);
140 writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl);
141
142 /* 3. increase walat and ralat to maximum */
Eric Nelson7f17fb72016-10-30 16:33:48 -0700143 rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17);
144 setbits_le32(&mmdc0->mdmisc, rwalat_max);
145 if (sysinfo->dsize == 2)
146 setbits_le32(&mmdc1->mdmisc, rwalat_max);
Marek Vasutd339f162015-12-16 15:40:06 +0100147 /*
148 * 4 & 5. Configure the external DDR device to enter write-leveling
149 * mode through Load Mode Register command.
150 * Register setting:
151 * Bits[31:16] MR1 value (0x0080 write leveling enable)
152 * Bit[9] set WL_EN to enable MMDC DQS output
153 * Bits[6:4] set CMD bits for Load Mode Register programming
154 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
155 */
156 writel(0x00808231, &mmdc0->mdscr);
157
158 /* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */
159 writel(0x00000001, &mmdc0->mpwlgcr);
160
161 /*
162 * 7. Upon completion of this process the MMDC de-asserts
163 * the MPWLGCR[HW_WL_EN]
164 */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100165 wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100166
167 /*
168 * 8. check for any errors: check both PHYs for x64 configuration,
169 * if x32, check only PHY0
170 */
171 if (readl(&mmdc0->mpwlgcr) & 0x00000F00)
172 errors |= 1;
Eric Nelson7f17fb72016-10-30 16:33:48 -0700173 if (sysinfo->dsize == 2)
174 if (readl(&mmdc1->mpwlgcr) & 0x00000F00)
175 errors |= 2;
Marek Vasutd339f162015-12-16 15:40:06 +0100176
177 debug("Ending write leveling calibration. Error mask: 0x%x\n", errors);
178
179 /* check to see if cal failed */
180 if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) &&
181 (readl(&mmdc0->mpwldectrl1) == 0x001F001F) &&
Eric Nelson7f17fb72016-10-30 16:33:48 -0700182 ((sysinfo->dsize < 2) ||
183 ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) &&
184 (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) {
Marek Vasutd339f162015-12-16 15:40:06 +0100185 debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n");
186 writel(ldectrl[0], &mmdc0->mpwldectrl0);
187 writel(ldectrl[1], &mmdc0->mpwldectrl1);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700188 if (sysinfo->dsize == 2) {
189 writel(ldectrl[2], &mmdc1->mpwldectrl0);
190 writel(ldectrl[3], &mmdc1->mpwldectrl1);
191 }
Marek Vasutd339f162015-12-16 15:40:06 +0100192 errors |= 4;
193 }
194
Marek Vasut14eeb682018-03-30 03:04:43 +0200195 correct_mpwldectr_result(&mmdc0->mpwldectrl0);
196 correct_mpwldectr_result(&mmdc0->mpwldectrl1);
197 if (sysinfo->dsize == 2) {
198 correct_mpwldectr_result(&mmdc1->mpwldectrl0);
199 correct_mpwldectr_result(&mmdc1->mpwldectrl1);
200 }
201
Marek Vasutd339f162015-12-16 15:40:06 +0100202 /*
203 * User should issue MRS command to exit write leveling mode
204 * through Load Mode Register command
205 * Register setting:
206 * Bits[31:16] MR1 value "ddr_mr1" value from initialization
207 * Bit[9] clear WL_EN to disable MMDC DQS output
208 * Bits[6:4] set CMD bits for Load Mode Register programming
209 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
210 */
211 writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr);
212
213 /* re-enable auto refresh and zq cal */
214 writel(esdmisc_val, &mmdc0->mdref);
215 writel(zq_val, &mmdc0->mpzqhwctrl);
216
Marek Vasut736b4912019-11-26 09:34:49 +0100217 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08x\n",
Marek Vasutd339f162015-12-16 15:40:06 +0100218 readl(&mmdc0->mpwldectrl0));
Marek Vasut736b4912019-11-26 09:34:49 +0100219 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08x\n",
Marek Vasutd339f162015-12-16 15:40:06 +0100220 readl(&mmdc0->mpwldectrl1));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700221 if (sysinfo->dsize == 2) {
Marek Vasut736b4912019-11-26 09:34:49 +0100222 debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08x\n",
Eric Nelson7f17fb72016-10-30 16:33:48 -0700223 readl(&mmdc1->mpwldectrl0));
Marek Vasut736b4912019-11-26 09:34:49 +0100224 debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08x\n",
Eric Nelson7f17fb72016-10-30 16:33:48 -0700225 readl(&mmdc1->mpwldectrl1));
226 }
Marek Vasutd339f162015-12-16 15:40:06 +0100227
228 /* We must force a readback of these values, to get them to stick */
229 readl(&mmdc0->mpwldectrl0);
230 readl(&mmdc0->mpwldectrl1);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700231 if (sysinfo->dsize == 2) {
232 readl(&mmdc1->mpwldectrl0);
233 readl(&mmdc1->mpwldectrl1);
234 }
Marek Vasutd339f162015-12-16 15:40:06 +0100235
236 /* enable DDR logic power down timer: */
237 setbits_le32(&mmdc0->mdpdc, 0x00005500);
238
239 /* Enable Adopt power down timer: */
240 clrbits_le32(&mmdc0->mapsr, 0x1);
241
242 /* Clear CON_REQ */
243 writel(0, &mmdc0->mdscr);
244
245 return errors;
246}
247
Marek Vasut7ec0e392019-11-26 09:34:50 +0100248static void mmdc_set_sdqs(bool set)
249{
Marek Vasutc35b1952019-11-26 09:34:52 +0100250 struct mx6dq_iomux_ddr_regs *mx6dq_ddr_iomux =
Marek Vasut7ec0e392019-11-26 09:34:50 +0100251 (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
Marek Vasutc35b1952019-11-26 09:34:52 +0100252 struct mx6sx_iomux_ddr_regs *mx6sx_ddr_iomux =
253 (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE;
254 int i, sdqs_cnt;
255 u32 sdqs;
Marek Vasut7ec0e392019-11-26 09:34:50 +0100256
Marek Vasutc35b1952019-11-26 09:34:52 +0100257 if (is_mx6sx()) {
258 sdqs = (u32)(&mx6sx_ddr_iomux->dram_sdqs0);
259 sdqs_cnt = 2;
260 } else { /* MX6DQ */
261 sdqs = (u32)(&mx6dq_ddr_iomux->dram_sdqs0);
262 sdqs_cnt = 8;
263 }
264
265 for (i = 0; i < sdqs_cnt; i++) {
Marek Vasutb3140032019-11-26 09:34:51 +0100266 if (set)
267 setbits_le32(sdqs + (4 * i), 0x7000);
268 else
269 clrbits_le32(sdqs + (4 * i), 0x7000);
Marek Vasut7ec0e392019-11-26 09:34:50 +0100270 }
271}
272
Eric Nelson7f17fb72016-10-30 16:33:48 -0700273int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo)
Marek Vasutd339f162015-12-16 15:40:06 +0100274{
275 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
276 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
Marek Vasutd339f162015-12-16 15:40:06 +0100277 bool cs0_enable;
278 bool cs1_enable;
279 bool cs0_enable_initial;
280 bool cs1_enable_initial;
281 u32 esdmisc_val;
Marek Vasutd339f162015-12-16 15:40:06 +0100282 u32 temp_ref;
283 u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */
284 u32 errors = 0;
285 u32 initdelay = 0x40404040;
286
287 /* check to see which chip selects are enabled */
288 cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000;
289 cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000;
290
291 /* disable DDR logic power down timer: */
292 clrbits_le32(&mmdc0->mdpdc, 0xff00);
293
294 /* disable Adopt power down timer: */
295 setbits_le32(&mmdc0->mapsr, 0x1);
296
297 /* set DQS pull ups */
Marek Vasut7ec0e392019-11-26 09:34:50 +0100298 mmdc_set_sdqs(true);
Marek Vasutd339f162015-12-16 15:40:06 +0100299
300 /* Save old RALAT and WALAT values */
301 esdmisc_val = readl(&mmdc0->mdmisc);
302
303 setbits_le32(&mmdc0->mdmisc,
304 (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17));
305
306 /* Disable auto refresh before proceeding with calibration */
307 temp_ref = readl(&mmdc0->mdref);
308 writel(0x0000c000, &mmdc0->mdref);
309
310 /*
311 * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2,
312 * this also sets the CON_REQ bit.
313 */
314 if (cs0_enable_initial)
315 writel(0x00008020, &mmdc0->mdscr);
316 if (cs1_enable_initial)
317 writel(0x00008028, &mmdc0->mdscr);
318
319 /* poll to make sure the con_ack bit was asserted */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100320 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100321
322 /*
323 * Check MDMISC register CALIB_PER_CS to see which CS calibration
324 * is targeted to (under normal cases, it should be cleared
325 * as this is the default value, indicating calibration is directed
326 * to CS0).
327 * Disable the other chip select not being target for calibration
328 * to avoid any potential issues. This will get re-enabled at end
329 * of calibration.
330 */
331 if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0)
332 clrbits_le32(&mmdc0->mdctl, 1 << 30); /* clear SDE_1 */
333 else
334 clrbits_le32(&mmdc0->mdctl, 1 << 31); /* clear SDE_0 */
335
336 /*
337 * Check to see which chip selects are now enabled for
338 * the remainder of the calibration.
339 */
340 cs0_enable = readl(&mmdc0->mdctl) & 0x80000000;
341 cs1_enable = readl(&mmdc0->mdctl) & 0x40000000;
342
Marek Vasutd339f162015-12-16 15:40:06 +0100343 precharge_all(cs0_enable, cs1_enable);
344
345 /* Write the pre-defined value into MPPDCMPR1 */
346 writel(pddword, &mmdc0->mppdcmpr1);
347
348 /*
349 * Issue a write access to the external DDR device by setting
350 * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll
351 * this bit until it clears to indicate completion of the write access.
352 */
353 setbits_le32(&mmdc0->mpswdar0, 1);
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100354 wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100355
356 /* Set the RD_DL_ABS# bits to their default values
357 * (will be calibrated later in the read delay-line calibration).
358 * Both PHYs for x64 configuration, if x32, do only PHY0.
359 */
360 writel(initdelay, &mmdc0->mprddlctl);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700361 if (sysinfo->dsize == 0x2)
Marek Vasutd339f162015-12-16 15:40:06 +0100362 writel(initdelay, &mmdc1->mprddlctl);
363
364 /* Force a measurment, for previous delay setup to take effect. */
Eric Nelson7f17fb72016-10-30 16:33:48 -0700365 force_delay_measurement(sysinfo->dsize);
Marek Vasutd339f162015-12-16 15:40:06 +0100366
367 /*
368 * ***************************
369 * Read DQS Gating calibration
370 * ***************************
371 */
372 debug("Starting Read DQS Gating calibration.\n");
373
374 /*
375 * Reset the read data FIFOs (two resets); only need to issue reset
376 * to PHY0 since in x64 mode, the reset will also go to PHY1.
377 */
378 reset_read_data_fifos();
379
380 /*
381 * Start the automatic read DQS gating calibration process by
382 * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC]
383 * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears
384 * to indicate completion.
385 * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate
386 * no errors were seen during calibration.
387 */
388
389 /*
390 * Set bit 30: chooses option to wait 32 cycles instead of
391 * 16 before comparing read data.
392 */
393 setbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
Eric Nelsonb33f74e2016-10-30 16:33:47 -0700394 if (sysinfo->dsize == 2)
395 setbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
Marek Vasutd339f162015-12-16 15:40:06 +0100396
397 /* Set bit 28 to start automatic read DQS gating calibration */
398 setbits_le32(&mmdc0->mpdgctrl0, 5 << 28);
399
400 /* Poll for completion. MPDGCTRL0[HW_DG_EN] should be 0 */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100401 wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100402
403 /*
404 * Check to see if any errors were encountered during calibration
405 * (check MPDGCTRL0[HW_DG_ERR]).
406 * Check both PHYs for x64 configuration, if x32, check only PHY0.
407 */
408 if (readl(&mmdc0->mpdgctrl0) & 0x00001000)
409 errors |= 1;
410
Eric Nelson7f17fb72016-10-30 16:33:48 -0700411 if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000))
Marek Vasutd339f162015-12-16 15:40:06 +0100412 errors |= 2;
413
Eric Nelsonb33f74e2016-10-30 16:33:47 -0700414 /* now disable mpdgctrl0[DG_CMP_CYC] */
415 clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
416 if (sysinfo->dsize == 2)
417 clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
418
Marek Vasutd339f162015-12-16 15:40:06 +0100419 /*
420 * DQS gating absolute offset should be modified from
421 * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to
422 * reflecting (HW_DG_UPx - 0x80)
423 */
424 modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1,
425 &mmdc0->mpdgctrl0);
426 modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3,
427 &mmdc0->mpdgctrl1);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700428 if (sysinfo->dsize == 0x2) {
Marek Vasutd339f162015-12-16 15:40:06 +0100429 modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1,
430 &mmdc1->mpdgctrl0);
431 modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3,
432 &mmdc1->mpdgctrl1);
433 }
434 debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors);
435
436 /*
437 * **********************
438 * Read Delay calibration
439 * **********************
440 */
441 debug("Starting Read Delay calibration.\n");
442
443 reset_read_data_fifos();
444
445 /*
446 * 4. Issue the Precharge-All command to the DDR device for both
447 * chip selects. If only using one chip select, then precharge
448 * only the desired chip select.
449 */
450 precharge_all(cs0_enable, cs1_enable);
451
452 /*
453 * 9. Read delay-line calibration
454 * Start the automatic read calibration process by asserting
455 * MPRDDLHWCTL[HW_RD_DL_EN].
456 */
457 writel(0x00000030, &mmdc0->mprddlhwctl);
458
459 /*
460 * 10. poll for completion
461 * MMDC indicates that the write data calibration had finished by
462 * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0. Also, ensure that
463 * no error bits were set.
464 */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100465 wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100466
467 /* check both PHYs for x64 configuration, if x32, check only PHY0 */
468 if (readl(&mmdc0->mprddlhwctl) & 0x0000000f)
469 errors |= 4;
470
Eric Nelson7f17fb72016-10-30 16:33:48 -0700471 if ((sysinfo->dsize == 0x2) &&
472 (readl(&mmdc1->mprddlhwctl) & 0x0000000f))
Marek Vasutd339f162015-12-16 15:40:06 +0100473 errors |= 8;
474
475 debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors);
476
477 /*
478 * ***********************
479 * Write Delay Calibration
480 * ***********************
481 */
482 debug("Starting Write Delay calibration.\n");
483
484 reset_read_data_fifos();
485
486 /*
487 * 4. Issue the Precharge-All command to the DDR device for both
488 * chip selects. If only using one chip select, then precharge
489 * only the desired chip select.
490 */
491 precharge_all(cs0_enable, cs1_enable);
492
493 /*
494 * 8. Set the WR_DL_ABS# bits to their default values.
495 * Both PHYs for x64 configuration, if x32, do only PHY0.
496 */
497 writel(initdelay, &mmdc0->mpwrdlctl);
Eric Nelson7f17fb72016-10-30 16:33:48 -0700498 if (sysinfo->dsize == 0x2)
Marek Vasutd339f162015-12-16 15:40:06 +0100499 writel(initdelay, &mmdc1->mpwrdlctl);
500
501 /*
502 * XXX This isn't in the manual. Force a measurement,
503 * for previous delay setup to effect.
504 */
Eric Nelson7f17fb72016-10-30 16:33:48 -0700505 force_delay_measurement(sysinfo->dsize);
Marek Vasutd339f162015-12-16 15:40:06 +0100506
507 /*
508 * 9. 10. Start the automatic write calibration process
509 * by asserting MPWRDLHWCTL0[HW_WR_DL_EN].
510 */
511 writel(0x00000030, &mmdc0->mpwrdlhwctl);
512
513 /*
514 * Poll for completion.
515 * MMDC indicates that the write data calibration had finished
516 * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0.
517 * Also, ensure that no error bits were set.
518 */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100519 wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100520
521 /* Check both PHYs for x64 configuration, if x32, check only PHY0 */
522 if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f)
523 errors |= 16;
524
Eric Nelson7f17fb72016-10-30 16:33:48 -0700525 if ((sysinfo->dsize == 0x2) &&
526 (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f))
Marek Vasutd339f162015-12-16 15:40:06 +0100527 errors |= 32;
528
529 debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors);
530
531 reset_read_data_fifos();
532
533 /* Enable DDR logic power down timer */
534 setbits_le32(&mmdc0->mdpdc, 0x00005500);
535
536 /* Enable Adopt power down timer */
537 clrbits_le32(&mmdc0->mapsr, 0x1);
538
539 /* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */
540 writel(esdmisc_val, &mmdc0->mdmisc);
541
542 /* Clear DQS pull ups */
Marek Vasut7ec0e392019-11-26 09:34:50 +0100543 mmdc_set_sdqs(false);
Marek Vasutd339f162015-12-16 15:40:06 +0100544
545 /* Re-enable SDE (chip selects) if they were set initially */
546 if (cs1_enable_initial)
547 /* Set SDE_1 */
548 setbits_le32(&mmdc0->mdctl, 1 << 30);
549
550 if (cs0_enable_initial)
551 /* Set SDE_0 */
552 setbits_le32(&mmdc0->mdctl, 1 << 31);
553
554 /* Re-enable to auto refresh */
555 writel(temp_ref, &mmdc0->mdref);
556
557 /* Clear the MDSCR (including the con_req bit) */
558 writel(0x0, &mmdc0->mdscr); /* CS0 */
559
560 /* Poll to make sure the con_ack bit is clear */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100561 wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0);
Marek Vasutd339f162015-12-16 15:40:06 +0100562
563 /*
564 * Print out the registers that were updated as a result
565 * of the calibration process.
566 */
567 debug("MMDC registers updated from calibration\n");
568 debug("Read DQS gating calibration:\n");
Marek Vasut736b4912019-11-26 09:34:49 +0100569 debug("\tMPDGCTRL0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdgctrl0));
570 debug("\tMPDGCTRL1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdgctrl1));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700571 if (sysinfo->dsize == 2) {
Marek Vasut736b4912019-11-26 09:34:49 +0100572 debug("\tMPDGCTRL0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdgctrl0));
573 debug("\tMPDGCTRL1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdgctrl1));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700574 }
Marek Vasutd339f162015-12-16 15:40:06 +0100575 debug("Read calibration:\n");
Marek Vasut736b4912019-11-26 09:34:49 +0100576 debug("\tMPRDDLCTL PHY0 = 0x%08x\n", readl(&mmdc0->mprddlctl));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700577 if (sysinfo->dsize == 2)
Marek Vasut736b4912019-11-26 09:34:49 +0100578 debug("\tMPRDDLCTL PHY1 = 0x%08x\n", readl(&mmdc1->mprddlctl));
Marek Vasutd339f162015-12-16 15:40:06 +0100579 debug("Write calibration:\n");
Marek Vasut736b4912019-11-26 09:34:49 +0100580 debug("\tMPWRDLCTL PHY0 = 0x%08x\n", readl(&mmdc0->mpwrdlctl));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700581 if (sysinfo->dsize == 2)
Marek Vasut736b4912019-11-26 09:34:49 +0100582 debug("\tMPWRDLCTL PHY1 = 0x%08x\n", readl(&mmdc1->mpwrdlctl));
Marek Vasutd339f162015-12-16 15:40:06 +0100583
584 /*
585 * Registers below are for debugging purposes. These print out
586 * the upper and lower boundaries captured during
587 * read DQS gating calibration.
588 */
589 debug("Status registers bounds for read DQS gating:\n");
590 debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0));
591 debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1));
592 debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2));
593 debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3));
Eric Nelson7f17fb72016-10-30 16:33:48 -0700594 if (sysinfo->dsize == 2) {
595 debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0));
596 debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1));
597 debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2));
598 debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3));
599 }
Marek Vasutd339f162015-12-16 15:40:06 +0100600
601 debug("Final do_dqs_calibration error mask: 0x%x\n", errors);
602
603 return errors;
604}
605#endif
606
Peng Fand9efd472014-12-30 17:24:01 +0800607#if defined(CONFIG_MX6SX)
608/* Configure MX6SX mmdc iomux */
609void mx6sx_dram_iocfg(unsigned width,
610 const struct mx6sx_iomux_ddr_regs *ddr,
611 const struct mx6sx_iomux_grp_regs *grp)
612{
613 struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux;
614 struct mx6sx_iomux_grp_regs *mx6_grp_iomux;
615
616 mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE;
617 mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE;
618
619 /* DDR IO TYPE */
620 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
621 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
622
623 /* CLOCK */
624 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
625
626 /* ADDRESS */
627 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
628 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
629 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
630
631 /* Control */
632 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
633 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
634 writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0);
635 writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1);
636 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
637 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
638 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
639
640 /* Data Strobes */
641 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
642 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
643 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
644 if (width >= 32) {
645 writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2);
646 writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3);
647 }
648
649 /* Data */
650 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
651 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
652 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
653 if (width >= 32) {
654 writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds);
655 writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds);
656 }
657 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
658 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
659 if (width >= 32) {
660 writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2);
661 writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3);
662 }
663}
664#endif
665
Fabio Estevam290e7cf2018-01-03 12:33:05 -0200666#if defined(CONFIG_MX6UL) || defined(CONFIG_MX6ULL)
Peng Fana462c342015-07-20 19:28:33 +0800667void mx6ul_dram_iocfg(unsigned width,
668 const struct mx6ul_iomux_ddr_regs *ddr,
669 const struct mx6ul_iomux_grp_regs *grp)
670{
671 struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux;
672 struct mx6ul_iomux_grp_regs *mx6_grp_iomux;
673
674 mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE;
675 mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE;
676
677 /* DDR IO TYPE */
678 writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
679 writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
680
681 /* CLOCK */
682 writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
683
684 /* ADDRESS */
685 writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
686 writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
687 writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
688
689 /* Control */
690 writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
691 writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
692 writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
693 writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
694 writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
695
696 /* Data Strobes */
697 writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
698 writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
699 writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
700
701 /* Data */
702 writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
703 writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
704 writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
705 writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
706 writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
707}
708#endif
709
Peng Fan1b811e22015-08-17 16:11:00 +0800710#if defined(CONFIG_MX6SL)
711void mx6sl_dram_iocfg(unsigned width,
712 const struct mx6sl_iomux_ddr_regs *ddr,
713 const struct mx6sl_iomux_grp_regs *grp)
714{
715 struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux;
716 struct mx6sl_iomux_grp_regs *mx6_grp_iomux;
717
718 mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE;
719 mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE;
720
721 /* DDR IO TYPE */
722 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
723 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
724
725 /* CLOCK */
726 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
727
728 /* ADDRESS */
729 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
730 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
731 mx6_grp_iomux->grp_addds = grp->grp_addds;
732
733 /* Control */
734 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
735 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
736 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
737
738 /* Data Strobes */
739 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
740 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
741 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
742 if (width >= 32) {
743 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
744 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
745 }
746
747 /* Data */
748 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
749 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
750 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
751 if (width >= 32) {
752 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
753 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
754 }
755
756 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
757 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
758 if (width >= 32) {
759 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
760 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
761 }
762}
763#endif
764
Tim Harveyfe0f7f72014-06-02 16:13:23 -0700765#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D)
766/* Configure MX6DQ mmdc iomux */
767void mx6dq_dram_iocfg(unsigned width,
768 const struct mx6dq_iomux_ddr_regs *ddr,
769 const struct mx6dq_iomux_grp_regs *grp)
770{
771 volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux;
772 volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux;
773
774 mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
775 mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE;
776
777 /* DDR IO Type */
778 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
779 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
780
781 /* Clock */
782 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
783 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
784
785 /* Address */
786 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
787 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
788 mx6_grp_iomux->grp_addds = grp->grp_addds;
789
790 /* Control */
791 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
792 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
793 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
794 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
795 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
796 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
797 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
798
799 /* Data Strobes */
800 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
801 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
802 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
803 if (width >= 32) {
804 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
805 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
806 }
807 if (width >= 64) {
808 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
809 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
810 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
811 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
812 }
813
814 /* Data */
815 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
816 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
817 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
818 if (width >= 32) {
819 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
820 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
821 }
822 if (width >= 64) {
823 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
824 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
825 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
826 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
827 }
828 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
829 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
830 if (width >= 32) {
831 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
832 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
833 }
834 if (width >= 64) {
835 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
836 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
837 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
838 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
839 }
840}
841#endif
842
843#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S)
844/* Configure MX6SDL mmdc iomux */
845void mx6sdl_dram_iocfg(unsigned width,
846 const struct mx6sdl_iomux_ddr_regs *ddr,
847 const struct mx6sdl_iomux_grp_regs *grp)
848{
849 volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux;
850 volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux;
851
852 mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE;
853 mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE;
854
855 /* DDR IO Type */
856 mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
857 mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
858
859 /* Clock */
860 mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
861 mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
862
863 /* Address */
864 mx6_ddr_iomux->dram_cas = ddr->dram_cas;
865 mx6_ddr_iomux->dram_ras = ddr->dram_ras;
866 mx6_grp_iomux->grp_addds = grp->grp_addds;
867
868 /* Control */
869 mx6_ddr_iomux->dram_reset = ddr->dram_reset;
870 mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
871 mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
872 mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
873 mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
874 mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
875 mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
876
877 /* Data Strobes */
878 mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
879 mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
880 mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
881 if (width >= 32) {
882 mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
883 mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
884 }
885 if (width >= 64) {
886 mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
887 mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
888 mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
889 mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
890 }
891
892 /* Data */
893 mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
894 mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
895 mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
896 if (width >= 32) {
897 mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
898 mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
899 }
900 if (width >= 64) {
901 mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
902 mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
903 mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
904 mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
905 }
906 mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
907 mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
908 if (width >= 32) {
909 mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
910 mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
911 }
912 if (width >= 64) {
913 mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
914 mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
915 mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
916 mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
917 }
918}
919#endif
920
921/*
922 * Configure mx6 mmdc registers based on:
923 * - board-specific memory configuration
924 * - board-specific calibration data
Peng Faneb796cb2015-08-17 16:11:04 +0800925 * - ddr3/lpddr2 chip details
Tim Harveyfe0f7f72014-06-02 16:13:23 -0700926 *
927 * The various calculations here are derived from the Freescale
Peng Faneb796cb2015-08-17 16:11:04 +0800928 * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate
929 * MMDC configuration registers based on memory system and memory chip
930 * parameters.
931 *
932 * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC
933 * configuration registers based on memory system and memory chip
934 * parameters.
Tim Harveyfe0f7f72014-06-02 16:13:23 -0700935 *
936 * The defaults here are those which were specified in the spreadsheet.
937 * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM
Peng Faneb796cb2015-08-17 16:11:04 +0800938 * and/or IMX6SLRM section titled MMDC initialization.
Tim Harveyfe0f7f72014-06-02 16:13:23 -0700939 */
940#define MR(val, ba, cmd, cs1) \
941 ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba)
Peng Fana462c342015-07-20 19:28:33 +0800942#define MMDC1(entry, value) do { \
Fabio Estevam6a2ccd62018-01-01 22:51:45 -0200943 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) \
Peng Fana462c342015-07-20 19:28:33 +0800944 mmdc1->entry = value; \
945 } while (0)
946
Peng Faneb796cb2015-08-17 16:11:04 +0800947/*
948 * According JESD209-2B-LPDDR2: Table 103
949 * WL: write latency
950 */
951static int lpddr2_wl(uint32_t mem_speed)
952{
953 switch (mem_speed) {
954 case 1066:
955 case 933:
956 return 4;
957 case 800:
958 return 3;
959 case 677:
960 case 533:
961 return 2;
962 case 400:
963 case 333:
964 return 1;
965 default:
966 puts("invalid memory speed\n");
967 hang();
968 }
969
970 return 0;
971}
972
973/*
974 * According JESD209-2B-LPDDR2: Table 103
975 * RL: read latency
976 */
977static int lpddr2_rl(uint32_t mem_speed)
978{
979 switch (mem_speed) {
980 case 1066:
981 return 8;
982 case 933:
983 return 7;
984 case 800:
985 return 6;
986 case 677:
987 return 5;
988 case 533:
989 return 4;
990 case 400:
991 case 333:
992 return 3;
993 default:
994 puts("invalid memory speed\n");
995 hang();
996 }
997
998 return 0;
999}
1000
1001void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo,
1002 const struct mx6_mmdc_calibration *calib,
1003 const struct mx6_lpddr2_cfg *lpddr2_cfg)
1004{
1005 volatile struct mmdc_p_regs *mmdc0;
1006 u32 val;
1007 u8 tcke, tcksrx, tcksre, trrd;
1008 u8 twl, txp, tfaw, tcl;
1009 u16 tras, twr, tmrd, trtp, twtr, trfc, txsr;
1010 u16 trcd_lp, trppb_lp, trpab_lp, trc_lp;
1011 u16 cs0_end;
1012 u8 coladdr;
1013 int clkper; /* clock period in picoseconds */
1014 int clock; /* clock freq in mHz */
1015 int cs;
1016
1017 /* only support 16/32 bits */
1018 if (sysinfo->dsize > 1)
1019 hang();
1020
1021 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1022
1023 clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U;
1024 clkper = (1000 * 1000) / clock; /* pico seconds */
1025
1026 twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1;
1027
1028 /* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */
1029 switch (lpddr2_cfg->density) {
1030 case 1:
1031 case 2:
1032 case 4:
1033 trfc = DIV_ROUND_UP(130000, clkper) - 1;
1034 txsr = DIV_ROUND_UP(140000, clkper) - 1;
1035 break;
1036 case 8:
1037 trfc = DIV_ROUND_UP(210000, clkper) - 1;
1038 txsr = DIV_ROUND_UP(220000, clkper) - 1;
1039 break;
1040 default:
1041 /*
1042 * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently.
1043 */
1044 hang();
1045 break;
1046 }
1047 /*
1048 * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode,
1049 * set them to 0. */
1050 txp = DIV_ROUND_UP(7500, clkper) - 1;
1051 tcke = 3;
1052 if (lpddr2_cfg->mem_speed == 333)
1053 tfaw = DIV_ROUND_UP(60000, clkper) - 1;
1054 else
1055 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
1056 trrd = DIV_ROUND_UP(10000, clkper) - 1;
1057
1058 /* tckesr for LPDDR2 */
1059 tcksre = DIV_ROUND_UP(15000, clkper);
1060 tcksrx = tcksre;
1061 twr = DIV_ROUND_UP(15000, clkper) - 1;
1062 /*
1063 * tMRR: 2, tMRW: 5
1064 * tMRD should be set to max(tMRR, tMRW)
1065 */
1066 tmrd = 5;
1067 tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1;
1068 /* LPDDR2 mode use tRCD_LP filed in MDCFG3. */
1069 trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1;
1070 trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp,
1071 clkper / 10) - 1;
1072 trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1;
1073 trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1;
1074 /* To LPDDR2, CL in MDCFG0 refers to RL */
1075 tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3;
1076 twtr = DIV_ROUND_UP(7500, clkper) - 1;
1077 trtp = DIV_ROUND_UP(7500, clkper) - 1;
1078
1079 cs0_end = 4 * sysinfo->cs_density - 1;
1080
1081 debug("density:%d Gb (%d Gb per chip)\n",
1082 sysinfo->cs_density, lpddr2_cfg->density);
1083 debug("clock: %dMHz (%d ps)\n", clock, clkper);
1084 debug("memspd:%d\n", lpddr2_cfg->mem_speed);
1085 debug("trcd_lp=%d\n", trcd_lp);
1086 debug("trppb_lp=%d\n", trppb_lp);
1087 debug("trpab_lp=%d\n", trpab_lp);
1088 debug("trc_lp=%d\n", trc_lp);
1089 debug("tcke=%d\n", tcke);
1090 debug("tcksrx=%d\n", tcksrx);
1091 debug("tcksre=%d\n", tcksre);
1092 debug("trfc=%d\n", trfc);
1093 debug("txsr=%d\n", txsr);
1094 debug("txp=%d\n", txp);
1095 debug("tfaw=%d\n", tfaw);
1096 debug("tcl=%d\n", tcl);
1097 debug("tras=%d\n", tras);
1098 debug("twr=%d\n", twr);
1099 debug("tmrd=%d\n", tmrd);
1100 debug("twl=%d\n", twl);
1101 debug("trtp=%d\n", trtp);
1102 debug("twtr=%d\n", twtr);
1103 debug("trrd=%d\n", trrd);
1104 debug("cs0_end=%d\n", cs0_end);
1105 debug("ncs=%d\n", sysinfo->ncs);
1106
1107 /*
1108 * board-specific configuration:
1109 * These values are determined empirically and vary per board layout
1110 */
1111 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1112 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1113 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1114 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1115 mmdc0->mprddlctl = calib->p0_mprddlctl;
1116 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1117 mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl;
1118
1119 /* Read data DQ Byte0-3 delay */
1120 mmdc0->mprddqby0dl = 0x33333333;
1121 mmdc0->mprddqby1dl = 0x33333333;
1122 if (sysinfo->dsize > 0) {
1123 mmdc0->mprddqby2dl = 0x33333333;
1124 mmdc0->mprddqby3dl = 0x33333333;
1125 }
1126
1127 /* Write data DQ Byte0-3 delay */
1128 mmdc0->mpwrdqby0dl = 0xf3333333;
1129 mmdc0->mpwrdqby1dl = 0xf3333333;
1130 if (sysinfo->dsize > 0) {
1131 mmdc0->mpwrdqby2dl = 0xf3333333;
1132 mmdc0->mpwrdqby3dl = 0xf3333333;
1133 }
1134
1135 /*
1136 * In LPDDR2 mode this register should be cleared,
1137 * so no termination will be activated.
1138 */
1139 mmdc0->mpodtctrl = 0;
1140
1141 /* complete calibration */
1142 val = (1 << 11); /* Force measurement on delay-lines */
1143 mmdc0->mpmur0 = val;
1144
1145 /* Step 1: configuration request */
1146 mmdc0->mdscr = (u32)(1 << 15); /* config request */
1147
1148 /* Step 2: Timing configuration */
1149 mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) |
1150 (tfaw << 4) | tcl;
1151 mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl;
1152 mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd;
1153 mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) |
1154 (trppb_lp << 4) | trpab_lp;
1155 mmdc0->mdotc = 0;
1156
1157 mmdc0->mdasp = cs0_end; /* CS addressing */
1158
1159 /* Step 3: Configure DDR type */
1160 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1161 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1162 (sysinfo->ralat << 6) | (1 << 3);
1163
1164 /* Step 4: Configure delay while leaving reset */
1165 mmdc0->mdor = (sysinfo->sde_to_rst << 8) |
1166 (sysinfo->rst_to_cke << 0);
1167
1168 /* Step 5: Configure DDR physical parameters (density and burst len) */
1169 coladdr = lpddr2_cfg->coladdr;
1170 if (lpddr2_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
1171 coladdr += 4;
1172 else if (lpddr2_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
1173 coladdr += 1;
1174 mmdc0->mdctl = (lpddr2_cfg->rowaddr - 11) << 24 | /* ROW */
1175 (coladdr - 9) << 20 | /* COL */
1176 (0 << 19) | /* Burst Length = 4 for LPDDR2 */
1177 (sysinfo->dsize << 16); /* DDR data bus size */
1178
1179 /* Step 6: Perform ZQ calibration */
1180 val = 0xa1390003; /* one-time HW ZQ calib */
1181 mmdc0->mpzqhwctrl = val;
1182
1183 /* Step 7: Enable MMDC with desired chip select */
1184 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
1185 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
1186
1187 /* Step 8: Write Mode Registers to Init LPDDR2 devices */
1188 for (cs = 0; cs < sysinfo->ncs; cs++) {
1189 /* MR63: reset */
1190 mmdc0->mdscr = MR(63, 0, 3, cs);
1191 /* MR10: calibration,
1192 * 0xff is calibration command after intilization.
1193 */
1194 val = 0xA | (0xff << 8);
1195 mmdc0->mdscr = MR(val, 0, 3, cs);
1196 /* MR1 */
1197 val = 0x1 | (0x82 << 8);
1198 mmdc0->mdscr = MR(val, 0, 3, cs);
1199 /* MR2 */
1200 val = 0x2 | (0x04 << 8);
1201 mmdc0->mdscr = MR(val, 0, 3, cs);
1202 /* MR3 */
1203 val = 0x3 | (0x02 << 8);
1204 mmdc0->mdscr = MR(val, 0, 3, cs);
1205 }
1206
1207 /* Step 10: Power down control and self-refresh */
1208 mmdc0->mdpdc = (tcke & 0x7) << 16 |
1209 5 << 12 | /* PWDT_1: 256 cycles */
1210 5 << 8 | /* PWDT_0: 256 cycles */
1211 1 << 6 | /* BOTH_CS_PD */
1212 (tcksrx & 0x7) << 3 |
1213 (tcksre & 0x7);
1214 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
1215
1216 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
1217 val = 0xa1310003;
1218 mmdc0->mpzqhwctrl = val;
1219
1220 /* Step 12: Configure and activate periodic refresh */
Fabio Estevamedf00932016-08-29 20:37:15 -03001221 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
Peng Faneb796cb2015-08-17 16:11:04 +08001222
1223 /* Step 13: Deassert config request - init complete */
1224 mmdc0->mdscr = 0x00000000;
1225
1226 /* wait for auto-ZQ calibration to complete */
1227 mdelay(1);
1228}
1229
Peng Fanf2ff8342015-08-17 16:11:03 +08001230void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo,
Nikita Kiryanov33689182014-09-07 18:58:11 +03001231 const struct mx6_mmdc_calibration *calib,
1232 const struct mx6_ddr3_cfg *ddr3_cfg)
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001233{
1234 volatile struct mmdc_p_regs *mmdc0;
1235 volatile struct mmdc_p_regs *mmdc1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001236 u32 val;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001237 u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd;
1238 u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl;
1239 u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */
1240 u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001241 u16 cs0_end;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001242 u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */
Marek Vasutb299ab72014-08-04 01:47:10 +02001243 u8 coladdr;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001244 int clkper; /* clock period in picoseconds */
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001245 int clock; /* clock freq in MHz */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001246 int cs;
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001247 u16 mem_speed = ddr3_cfg->mem_speed;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001248
1249 mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
Fabio Estevam6a2ccd62018-01-01 22:51:45 -02001250 if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl())
Peng Fana462c342015-07-20 19:28:33 +08001251 mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001252
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001253 /* Limit mem_speed for MX6D/MX6Q */
Peng Fane4d79dc2016-05-23 18:35:57 +08001254 if (is_mx6dq() || is_mx6dqp()) {
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001255 if (mem_speed > 1066)
1256 mem_speed = 1066; /* 1066 MT/s */
1257
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001258 tcwl = 4;
1259 }
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001260 /* Limit mem_speed for MX6S/MX6DL */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001261 else {
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001262 if (mem_speed > 800)
1263 mem_speed = 800; /* 800 MT/s */
1264
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001265 tcwl = 3;
1266 }
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001267
1268 clock = mem_speed / 2;
1269 /*
1270 * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports
1271 * up to 528 MHz, so reduce the clock to fit chip specs
1272 */
Peng Fane4d79dc2016-05-23 18:35:57 +08001273 if (is_mx6dq() || is_mx6dqp()) {
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001274 if (clock > 528)
1275 clock = 528; /* 528 MHz */
1276 }
1277
Nikita Kiryanov33689182014-09-07 18:58:11 +03001278 clkper = (1000 * 1000) / clock; /* pico seconds */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001279 todtlon = tcwl;
1280 taxpd = tcwl;
1281 tanpd = tcwl;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001282
Nikita Kiryanov33689182014-09-07 18:58:11 +03001283 switch (ddr3_cfg->density) {
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001284 case 1: /* 1Gb per chip */
1285 trfc = DIV_ROUND_UP(110000, clkper) - 1;
1286 txs = DIV_ROUND_UP(120000, clkper) - 1;
1287 break;
1288 case 2: /* 2Gb per chip */
1289 trfc = DIV_ROUND_UP(160000, clkper) - 1;
1290 txs = DIV_ROUND_UP(170000, clkper) - 1;
1291 break;
1292 case 4: /* 4Gb per chip */
Peng Fan0eca9f62015-09-01 11:03:14 +08001293 trfc = DIV_ROUND_UP(260000, clkper) - 1;
1294 txs = DIV_ROUND_UP(270000, clkper) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001295 break;
1296 case 8: /* 8Gb per chip */
1297 trfc = DIV_ROUND_UP(350000, clkper) - 1;
1298 txs = DIV_ROUND_UP(360000, clkper) - 1;
1299 break;
1300 default:
1301 /* invalid density */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001302 puts("invalid chip density\n");
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001303 hang();
1304 break;
1305 }
1306 txpr = txs;
1307
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001308 switch (mem_speed) {
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001309 case 800:
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001310 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1311 tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001312 if (ddr3_cfg->pagesz == 1) {
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001313 tfaw = DIV_ROUND_UP(40000, clkper) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001314 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001315 } else {
1316 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001317 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001318 }
1319 break;
1320 case 1066:
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001321 txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1322 tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001323 if (ddr3_cfg->pagesz == 1) {
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001324 tfaw = DIV_ROUND_UP(37500, clkper) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001325 trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001326 } else {
1327 tfaw = DIV_ROUND_UP(50000, clkper) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001328 trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001329 }
1330 break;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001331 default:
Nikita Kiryanov33689182014-09-07 18:58:11 +03001332 puts("invalid memory speed\n");
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001333 hang();
1334 break;
1335 }
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001336 txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1;
1337 tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001338 taonpd = DIV_ROUND_UP(2000, clkper) - 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001339 tcksrx = tcksre;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001340 taofpd = taonpd;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001341 twr = DIV_ROUND_UP(15000, clkper) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001342 tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001343 trc = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1;
1344 tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1;
1345 tcl = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3;
1346 trp = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1;
Masahiro Yamadac79cba32014-09-18 13:28:06 +09001347 twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001348 trcd = trp;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001349 trtp = twtr;
Nikita Kiryanov07ee9272014-08-20 15:08:58 +03001350 cs0_end = 4 * sysinfo->cs_density - 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001351
1352 debug("density:%d Gb (%d Gb per chip)\n",
1353 sysinfo->cs_density, ddr3_cfg->density);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001354 debug("clock: %dMHz (%d ps)\n", clock, clkper);
Nikolay Dimitrov8a2bd212015-04-22 18:37:31 +03001355 debug("memspd:%d\n", mem_speed);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001356 debug("tcke=%d\n", tcke);
1357 debug("tcksrx=%d\n", tcksrx);
1358 debug("tcksre=%d\n", tcksre);
1359 debug("taofpd=%d\n", taofpd);
1360 debug("taonpd=%d\n", taonpd);
1361 debug("todtlon=%d\n", todtlon);
1362 debug("tanpd=%d\n", tanpd);
1363 debug("taxpd=%d\n", taxpd);
1364 debug("trfc=%d\n", trfc);
1365 debug("txs=%d\n", txs);
1366 debug("txp=%d\n", txp);
1367 debug("txpdll=%d\n", txpdll);
1368 debug("tfaw=%d\n", tfaw);
1369 debug("tcl=%d\n", tcl);
1370 debug("trcd=%d\n", trcd);
1371 debug("trp=%d\n", trp);
1372 debug("trc=%d\n", trc);
1373 debug("tras=%d\n", tras);
1374 debug("twr=%d\n", twr);
1375 debug("tmrd=%d\n", tmrd);
1376 debug("tcwl=%d\n", tcwl);
1377 debug("tdllk=%d\n", tdllk);
1378 debug("trtp=%d\n", trtp);
1379 debug("twtr=%d\n", twtr);
1380 debug("trrd=%d\n", trrd);
1381 debug("txpr=%d\n", txpr);
Nikita Kiryanov33689182014-09-07 18:58:11 +03001382 debug("cs0_end=%d\n", cs0_end);
1383 debug("ncs=%d\n", sysinfo->ncs);
1384 debug("Rtt_wr=%d\n", sysinfo->rtt_wr);
1385 debug("Rtt_nom=%d\n", sysinfo->rtt_nom);
1386 debug("SRT=%d\n", ddr3_cfg->SRT);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001387 debug("twr=%d\n", twr);
1388
1389 /*
1390 * board-specific configuration:
1391 * These values are determined empirically and vary per board layout
1392 * see:
1393 * appnote, ddr3 spreadsheet
1394 */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001395 mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1396 mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1397 mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1398 mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1399 mmdc0->mprddlctl = calib->p0_mprddlctl;
1400 mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1401 if (sysinfo->dsize > 1) {
Peng Fand9efd472014-12-30 17:24:01 +08001402 MMDC1(mpwldectrl0, calib->p1_mpwldectrl0);
1403 MMDC1(mpwldectrl1, calib->p1_mpwldectrl1);
1404 MMDC1(mpdgctrl0, calib->p1_mpdgctrl0);
1405 MMDC1(mpdgctrl1, calib->p1_mpdgctrl1);
1406 MMDC1(mprddlctl, calib->p1_mprddlctl);
1407 MMDC1(mpwrdlctl, calib->p1_mpwrdlctl);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001408 }
1409
1410 /* Read data DQ Byte0-3 delay */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001411 mmdc0->mprddqby0dl = 0x33333333;
1412 mmdc0->mprddqby1dl = 0x33333333;
1413 if (sysinfo->dsize > 0) {
1414 mmdc0->mprddqby2dl = 0x33333333;
1415 mmdc0->mprddqby3dl = 0x33333333;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001416 }
Nikita Kiryanov33689182014-09-07 18:58:11 +03001417
1418 if (sysinfo->dsize > 1) {
Peng Fand9efd472014-12-30 17:24:01 +08001419 MMDC1(mprddqby0dl, 0x33333333);
1420 MMDC1(mprddqby1dl, 0x33333333);
1421 MMDC1(mprddqby2dl, 0x33333333);
1422 MMDC1(mprddqby3dl, 0x33333333);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001423 }
1424
1425 /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001426 val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227;
1427 mmdc0->mpodtctrl = val;
1428 if (sysinfo->dsize > 1)
Peng Fand9efd472014-12-30 17:24:01 +08001429 MMDC1(mpodtctrl, val);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001430
1431 /* complete calibration */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001432 val = (1 << 11); /* Force measurement on delay-lines */
1433 mmdc0->mpmur0 = val;
1434 if (sysinfo->dsize > 1)
Peng Fand9efd472014-12-30 17:24:01 +08001435 MMDC1(mpmur0, val);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001436
1437 /* Step 1: configuration request */
1438 mmdc0->mdscr = (u32)(1 << 15); /* config request */
1439
1440 /* Step 2: Timing configuration */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001441 mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) |
1442 (txpdll << 9) | (tfaw << 4) | tcl;
1443 mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) |
1444 (tras << 16) | (1 << 15) /* trpa */ |
1445 (twr << 9) | (tmrd << 5) | tcwl;
1446 mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd;
1447 mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) |
1448 (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4);
1449 mmdc0->mdasp = cs0_end; /* CS addressing */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001450
1451 /* Step 3: Configure DDR type */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001452 mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1453 (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1454 (sysinfo->ralat << 6);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001455
1456 /* Step 4: Configure delay while leaving reset */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001457 mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) |
1458 (sysinfo->rst_to_cke << 0);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001459
1460 /* Step 5: Configure DDR physical parameters (density and burst len) */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001461 coladdr = ddr3_cfg->coladdr;
1462 if (ddr3_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
Marek Vasutb299ab72014-08-04 01:47:10 +02001463 coladdr += 4;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001464 else if (ddr3_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
Marek Vasutb299ab72014-08-04 01:47:10 +02001465 coladdr += 1;
Nikita Kiryanov33689182014-09-07 18:58:11 +03001466 mmdc0->mdctl = (ddr3_cfg->rowaddr - 11) << 24 | /* ROW */
1467 (coladdr - 9) << 20 | /* COL */
1468 (1 << 19) | /* Burst Length = 8 for DDR3 */
1469 (sysinfo->dsize << 16); /* DDR data bus size */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001470
1471 /* Step 6: Perform ZQ calibration */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001472 val = 0xa1390001; /* one-time HW ZQ calib */
1473 mmdc0->mpzqhwctrl = val;
1474 if (sysinfo->dsize > 1)
Peng Fand9efd472014-12-30 17:24:01 +08001475 MMDC1(mpzqhwctrl, val);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001476
1477 /* Step 7: Enable MMDC with desired chip select */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001478 mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
1479 ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001480
1481 /* Step 8: Write Mode Registers to Init DDR3 devices */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001482 for (cs = 0; cs < sysinfo->ncs; cs++) {
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001483 /* MR2 */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001484 val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 |
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001485 ((tcwl - 3) & 3) << 3;
Tim Harvey78c5a182015-04-03 16:52:52 -07001486 debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs));
Nikita Kiryanov33689182014-09-07 18:58:11 +03001487 mmdc0->mdscr = MR(val, 2, 3, cs);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001488 /* MR3 */
Tim Harvey78c5a182015-04-03 16:52:52 -07001489 debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs));
Nikita Kiryanov33689182014-09-07 18:58:11 +03001490 mmdc0->mdscr = MR(0, 3, 3, cs);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001491 /* MR1 */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001492 val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 |
1493 ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6;
Tim Harvey78c5a182015-04-03 16:52:52 -07001494 debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs));
Nikita Kiryanov33689182014-09-07 18:58:11 +03001495 mmdc0->mdscr = MR(val, 1, 3, cs);
1496 /* MR0 */
1497 val = ((tcl - 1) << 4) | /* CAS */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001498 (1 << 8) | /* DLL Reset */
Tim Harvey3625fd62015-05-18 07:07:02 -07001499 ((twr - 3) << 9) | /* Write Recovery */
1500 (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */
Tim Harvey78c5a182015-04-03 16:52:52 -07001501 debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs));
Nikita Kiryanov33689182014-09-07 18:58:11 +03001502 mmdc0->mdscr = MR(val, 0, 3, cs);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001503 /* ZQ calibration */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001504 val = (1 << 10);
1505 mmdc0->mdscr = MR(val, 0, 4, cs);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001506 }
1507
1508 /* Step 10: Power down control and self-refresh */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001509 mmdc0->mdpdc = (tcke & 0x7) << 16 |
1510 5 << 12 | /* PWDT_1: 256 cycles */
1511 5 << 8 | /* PWDT_0: 256 cycles */
1512 1 << 6 | /* BOTH_CS_PD */
1513 (tcksrx & 0x7) << 3 |
1514 (tcksre & 0x7);
Tim Harvey78c5a182015-04-03 16:52:52 -07001515 if (!sysinfo->pd_fast_exit)
1516 mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */
Nikita Kiryanov06a51b82014-08-20 15:08:56 +03001517 mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001518
1519 /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001520 val = 0xa1390003;
1521 mmdc0->mpzqhwctrl = val;
1522 if (sysinfo->dsize > 1)
Peng Fand9efd472014-12-30 17:24:01 +08001523 MMDC1(mpzqhwctrl, val);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001524
1525 /* Step 12: Configure and activate periodic refresh */
Fabio Estevamedf00932016-08-29 20:37:15 -03001526 mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001527
1528 /* Step 13: Deassert config request - init complete */
Nikita Kiryanov33689182014-09-07 18:58:11 +03001529 mmdc0->mdscr = 0x00000000;
Tim Harveyfe0f7f72014-06-02 16:13:23 -07001530
1531 /* wait for auto-ZQ calibration to complete */
1532 mdelay(1);
1533}
Peng Fanf2ff8342015-08-17 16:11:03 +08001534
Eric Nelson48c7d432016-10-30 16:33:49 -07001535void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo,
1536 struct mx6_mmdc_calibration *calib)
1537{
1538 struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1539 struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
1540
1541 calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0);
1542 calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1);
1543 calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0);
1544 calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1);
1545 calib->p0_mprddlctl = readl(&mmdc0->mprddlctl);
1546 calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl);
1547
1548 if (sysinfo->dsize == 2) {
1549 calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0);
1550 calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1);
1551 calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0);
1552 calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1);
1553 calib->p1_mprddlctl = readl(&mmdc1->mprddlctl);
1554 calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl);
1555 }
1556}
1557
Peng Fanf2ff8342015-08-17 16:11:03 +08001558void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo,
1559 const struct mx6_mmdc_calibration *calib,
1560 const void *ddr_cfg)
1561{
1562 if (sysinfo->ddr_type == DDR_TYPE_DDR3) {
1563 mx6_ddr3_cfg(sysinfo, calib, ddr_cfg);
Peng Faneb796cb2015-08-17 16:11:04 +08001564 } else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) {
1565 mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg);
Peng Fanf2ff8342015-08-17 16:11:03 +08001566 } else {
1567 puts("Unsupported ddr type\n");
1568 hang();
1569 }
1570}