blob: 86909062b666815df4843b24ed25340eb81ccad7 [file] [log] [blame]
Masahiro Yamada15607d02016-01-09 01:51:14 +09001/*
2 * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
3 *
4 * based on commit 21b6e480f92ccc38fe0502e3116411d6509d3bf2 of Diag by:
5 * Copyright (C) 2015 Socionext Inc.
6 *
7 * SPDX-License-Identifier: GPL-2.0+
8 */
9
10#include <common.h>
11#include <linux/err.h>
12#include <linux/io.h>
13#include <linux/sizes.h>
14#include <asm/processor.h>
15
16#include "../init.h"
17#include "../soc-info.h"
18#include "ddrmphy-regs.h"
Masahiro Yamadafaefef92016-01-17 15:03:29 +090019#include "umc-regs.h"
Masahiro Yamada15607d02016-01-09 01:51:14 +090020
Masahiro Yamada94b756f2016-02-26 14:21:37 +090021#define DRAM_CH_NR 3
Masahiro Yamadaf775c092016-02-05 13:21:07 +090022
Masahiro Yamada15607d02016-01-09 01:51:14 +090023enum dram_freq {
Masahiro Yamada94b756f2016-02-26 14:21:37 +090024 DRAM_FREQ_1866M,
25 DRAM_FREQ_2133M,
26 DRAM_FREQ_NR,
Masahiro Yamada15607d02016-01-09 01:51:14 +090027};
28
29enum dram_size {
Masahiro Yamada94b756f2016-02-26 14:21:37 +090030 DRAM_SZ_256M,
31 DRAM_SZ_512M,
32 DRAM_SZ_NR,
Masahiro Yamada15607d02016-01-09 01:51:14 +090033};
34
Masahiro Yamada94b756f2016-02-26 14:21:37 +090035static u32 ddrphy_pgcr2[DRAM_FREQ_NR] = {0x00FC7E5D, 0x00FC90AB};
36static u32 ddrphy_ptr0[DRAM_FREQ_NR] = {0x0EA09205, 0x10C0A6C6};
37static u32 ddrphy_ptr1[DRAM_FREQ_NR] = {0x0DAC041B, 0x0FA104B1};
38static u32 ddrphy_ptr3[DRAM_FREQ_NR] = {0x15171e45, 0x18182357};
39static u32 ddrphy_ptr4[DRAM_FREQ_NR] = {0x0e9ad8e9, 0x10b34157};
40static u32 ddrphy_dtpr0[DRAM_FREQ_NR] = {0x35a00d88, 0x39e40e88};
41static u32 ddrphy_dtpr1[DRAM_FREQ_NR] = {0x2288cc2c, 0x228a04d0};
42static u32 ddrphy_dtpr2[DRAM_FREQ_NR] = {0x50005e00, 0x50006a00};
43static u32 ddrphy_dtpr3[DRAM_FREQ_NR] = {0x0010cb49, 0x0010ec89};
44static u32 ddrphy_mr0[DRAM_FREQ_NR] = {0x00000115, 0x00000125};
45static u32 ddrphy_mr2[DRAM_FREQ_NR] = {0x000002a0, 0x000002a8};
Masahiro Yamada15607d02016-01-09 01:51:14 +090046
Masahiro Yamadaf775c092016-02-05 13:21:07 +090047/* dependent on package and board design */
Masahiro Yamada94b756f2016-02-26 14:21:37 +090048static u32 ddrphy_acbdlr0[DRAM_CH_NR] = {0x0000000c, 0x0000000c, 0x00000009};
Masahiro Yamadaf775c092016-02-05 13:21:07 +090049
Masahiro Yamada94b756f2016-02-26 14:21:37 +090050static u32 umc_cmdctla[DRAM_FREQ_NR] = {0x66DD131D, 0x77EE1722};
Masahiro Yamada15607d02016-01-09 01:51:14 +090051/*
52 * The ch2 is a different generation UMC core.
53 * The register spec is different, unfortunately.
54 */
Masahiro Yamada94b756f2016-02-26 14:21:37 +090055static u32 umc_cmdctlb_ch01[DRAM_FREQ_NR] = {0x13E87C44, 0x18F88C44};
56static u32 umc_cmdctlb_ch2[DRAM_FREQ_NR] = {0x19E8DC44, 0x1EF8EC44};
57static u32 umc_spcctla[DRAM_FREQ_NR][DRAM_SZ_NR] = {
58 {0x004A071D, 0x0078071D},
59 {0x0055081E, 0x0089081E},
Masahiro Yamada15607d02016-01-09 01:51:14 +090060};
61
62static u32 umc_spcctlb[] = {0x00FF000A, 0x00FF000B};
63/* The ch2 is different for some reason only hardware guys know... */
64static u32 umc_flowctla_ch01[] = {0x0800001E, 0x08000022};
65static u32 umc_flowctla_ch2[] = {0x0800001E, 0x0800001E};
66
67/* DDR multiPHY */
68static inline int ddrphy_get_rank(int dx)
69{
70 return dx / 2;
71}
72
73static void ddrphy_fifo_reset(void __iomem *phy_base)
74{
75 u32 tmp;
76
77 tmp = readl(phy_base + DMPHY_PGCR0);
78 tmp &= ~DMPHY_PGCR0_PHYFRST;
79 writel(tmp, phy_base + DMPHY_PGCR0);
80
81 udelay(1);
82
83 tmp |= DMPHY_PGCR0_PHYFRST;
84 writel(tmp, phy_base + DMPHY_PGCR0);
85
86 udelay(1);
87}
88
89static void ddrphy_vt_ctrl(void __iomem *phy_base, int enable)
90{
91 u32 tmp;
92
93 tmp = readl(phy_base + DMPHY_PGCR1);
94
95 if (enable)
96 tmp &= ~DMPHY_PGCR1_INHVT;
97 else
98 tmp |= DMPHY_PGCR1_INHVT;
99
100 writel(tmp, phy_base + DMPHY_PGCR1);
101
102 if (!enable) {
103 while (!(readl(phy_base + DMPHY_PGSR1) & DMPHY_PGSR1_VTSTOP))
104 cpu_relax();
105 }
106}
107
108static void ddrphy_dqs_delay_fixup(void __iomem *phy_base, int nr_dx, int step)
109{
110 int dx;
111 u32 lcdlr1, rdqsd;
112 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
113
114 ddrphy_vt_ctrl(phy_base, 0);
115
116 for (dx = 0; dx < nr_dx; dx++) {
117 lcdlr1 = readl(dx_base + DMPHY_DX_LCDLR1);
118 rdqsd = (lcdlr1 >> 8) & 0xff;
119 rdqsd = clamp(rdqsd + step, 0U, 0xffU);
120 lcdlr1 = (lcdlr1 & ~(0xff << 8)) | (rdqsd << 8);
121 writel(lcdlr1, dx_base + DMPHY_DX_LCDLR1);
122 readl(dx_base + DMPHY_DX_LCDLR1); /* relax */
123 dx_base += DMPHY_DX_STRIDE;
124 }
125
126 ddrphy_vt_ctrl(phy_base, 1);
127}
128
129static int ddrphy_get_system_latency(void __iomem *phy_base, int width)
130{
131 void __iomem *dx_base = phy_base + DMPHY_DX_BASE;
132 const int nr_dx = width / 8;
133 int dx, rank;
134 u32 gtr;
135 int dgsl, dgsl_min = INT_MAX, dgsl_max = 0;
136
137 for (dx = 0; dx < nr_dx; dx++) {
138 gtr = readl(dx_base + DMPHY_DX_GTR);
139 for (rank = 0; rank < 4; rank++) {
140 dgsl = gtr & 0x7;
141 /* if dgsl is zero, this rank was not trained. skip. */
142 if (dgsl) {
143 dgsl_min = min(dgsl_min, dgsl);
144 dgsl_max = max(dgsl_max, dgsl);
145 }
146 gtr >>= 3;
147 }
148 dx_base += DMPHY_DX_STRIDE;
149 }
150
151 if (dgsl_min != dgsl_max)
152 printf("DQS Gateing System Latencies are not all leveled.\n");
153
154 return dgsl_max;
155}
156
Masahiro Yamadaf775c092016-02-05 13:21:07 +0900157static void ddrphy_init(void __iomem *phy_base, enum dram_freq freq, int width,
158 int ch)
Masahiro Yamada15607d02016-01-09 01:51:14 +0900159{
160 u32 tmp;
161 void __iomem *zq_base, *dx_base;
162 int zq, dx;
163 int nr_dx;
164
165 nr_dx = width / 8;
166
167 writel(DMPHY_PIR_ZCALBYP, phy_base + DMPHY_PIR);
168 /*
169 * Disable RGLVT bit (Read DQS Gating LCDL Delay VT Compensation)
170 * to avoid read error issue.
171 */
172 writel(0x07d81e37, phy_base + DMPHY_PGCR0);
173 writel(0x0200c4e0, phy_base + DMPHY_PGCR1);
174
175 tmp = ddrphy_pgcr2[freq];
176 if (width >= 32)
177 tmp |= DMPHY_PGCR2_DUALCHN | DMPHY_PGCR2_ACPDDC;
178 writel(tmp, phy_base + DMPHY_PGCR2);
179
180 writel(ddrphy_ptr0[freq], phy_base + DMPHY_PTR0);
181 writel(ddrphy_ptr1[freq], phy_base + DMPHY_PTR1);
182 writel(0x00083def, phy_base + DMPHY_PTR2);
183 writel(ddrphy_ptr3[freq], phy_base + DMPHY_PTR3);
184 writel(ddrphy_ptr4[freq], phy_base + DMPHY_PTR4);
185
Masahiro Yamadaf775c092016-02-05 13:21:07 +0900186 writel(ddrphy_acbdlr0[ch], phy_base + DMPHY_ACBDLR0);
187
Masahiro Yamada15607d02016-01-09 01:51:14 +0900188 writel(0x55555555, phy_base + DMPHY_ACIOCR1);
189 writel(0x00000000, phy_base + DMPHY_ACIOCR2);
190 writel(0x55555555, phy_base + DMPHY_ACIOCR3);
191 writel(0x00000000, phy_base + DMPHY_ACIOCR4);
192 writel(0x00000055, phy_base + DMPHY_ACIOCR5);
193 writel(0x00181aa4, phy_base + DMPHY_DXCCR);
194
195 writel(0x0024641e, phy_base + DMPHY_DSGCR);
196 writel(0x0000040b, phy_base + DMPHY_DCR);
197 writel(ddrphy_dtpr0[freq], phy_base + DMPHY_DTPR0);
198 writel(ddrphy_dtpr1[freq], phy_base + DMPHY_DTPR1);
199 writel(ddrphy_dtpr2[freq], phy_base + DMPHY_DTPR2);
200 writel(ddrphy_dtpr3[freq], phy_base + DMPHY_DTPR3);
201 writel(ddrphy_mr0[freq], phy_base + DMPHY_MR0);
202 writel(0x00000006, phy_base + DMPHY_MR1);
203 writel(ddrphy_mr2[freq], phy_base + DMPHY_MR2);
204 writel(0x00000000, phy_base + DMPHY_MR3);
205
206 tmp = 0;
207 for (dx = 0; dx < nr_dx; dx++)
208 tmp |= BIT(DMPHY_DTCR_RANKEN_SHIFT + ddrphy_get_rank(dx));
209 writel(0x90003087 | tmp, phy_base + DMPHY_DTCR);
210
211 writel(0x00000000, phy_base + DMPHY_DTAR0);
212 writel(0x00000008, phy_base + DMPHY_DTAR1);
213 writel(0x00000010, phy_base + DMPHY_DTAR2);
214 writel(0x00000018, phy_base + DMPHY_DTAR3);
215 writel(0xdd22ee11, phy_base + DMPHY_DTDR0);
216 writel(0x7788bb44, phy_base + DMPHY_DTDR1);
217
218 /* impedance control settings */
219 writel(0x04048900, phy_base + DMPHY_ZQCR);
220
221 zq_base = phy_base + DMPHY_ZQ_BASE;
222 for (zq = 0; zq < 4; zq++) {
223 /*
224 * board-dependent
225 * PXS2: CH0ZQ0=0x5B, CH1ZQ0=0x5B, CH2ZQ0=0x59, others=0x5D
226 */
227 writel(0x0007BB5D, zq_base + DMPHY_ZQ_PR);
228 zq_base += DMPHY_ZQ_STRIDE;
229 }
230
231 /* DATX8 settings */
232 dx_base = phy_base + DMPHY_DX_BASE;
233 for (dx = 0; dx < 4; dx++) {
234 tmp = readl(dx_base + DMPHY_DX_GCR0);
235 tmp &= ~DMPHY_DX_GCR0_WLRKEN_MASK;
236 tmp |= BIT(DMPHY_DX_GCR0_WLRKEN_SHIFT + ddrphy_get_rank(dx)) &
237 DMPHY_DX_GCR0_WLRKEN_MASK;
238 writel(tmp, dx_base + DMPHY_DX_GCR0);
239
240 writel(0x00000000, dx_base + DMPHY_DX_GCR1);
241 writel(0x00000000, dx_base + DMPHY_DX_GCR2);
242 writel(0x00000000, dx_base + DMPHY_DX_GCR3);
243 dx_base += DMPHY_DX_STRIDE;
244 }
245
246 while (!(readl(phy_base + DMPHY_PGSR0) & DMPHY_PGSR0_IDONE))
247 cpu_relax();
248
249 ddrphy_dqs_delay_fixup(phy_base, nr_dx, -4);
250}
251
252struct ddrphy_init_sequence {
253 char *description;
254 u32 init_flag;
255 u32 done_flag;
256 u32 err_flag;
257};
258
259static const struct ddrphy_init_sequence impedance_calibration_sequence[] = {
260 {
261 "Impedance Calibration",
262 DMPHY_PIR_ZCAL,
263 DMPHY_PGSR0_ZCDONE,
264 DMPHY_PGSR0_ZCERR,
265 },
266 { /* sentinel */ }
267};
268
269static const struct ddrphy_init_sequence dram_init_sequence[] = {
270 {
271 "DRAM Initialization",
272 DMPHY_PIR_DRAMRST | DMPHY_PIR_DRAMINIT,
273 DMPHY_PGSR0_DIDONE,
274 0,
275 },
276 { /* sentinel */ }
277};
278
279static const struct ddrphy_init_sequence training_sequence[] = {
280 {
281 "Write Leveling",
282 DMPHY_PIR_WL,
283 DMPHY_PGSR0_WLDONE,
284 DMPHY_PGSR0_WLERR,
285 },
286 {
287 "Read DQS Gate Training",
288 DMPHY_PIR_QSGATE,
289 DMPHY_PGSR0_QSGDONE,
290 DMPHY_PGSR0_QSGERR,
291 },
292 {
293 "Write Leveling Adjustment",
294 DMPHY_PIR_WLADJ,
295 DMPHY_PGSR0_WLADONE,
296 DMPHY_PGSR0_WLAERR,
297 },
298 {
299 "Read Bit Deskew",
300 DMPHY_PIR_RDDSKW,
301 DMPHY_PGSR0_RDDONE,
302 DMPHY_PGSR0_RDERR,
303 },
304 {
305 "Write Bit Deskew",
306 DMPHY_PIR_WRDSKW,
307 DMPHY_PGSR0_WDDONE,
308 DMPHY_PGSR0_WDERR,
309 },
310 {
311 "Read Eye Training",
312 DMPHY_PIR_RDEYE,
313 DMPHY_PGSR0_REDONE,
314 DMPHY_PGSR0_REERR,
315 },
316 {
317 "Write Eye Training",
318 DMPHY_PIR_WREYE,
319 DMPHY_PGSR0_WEDONE,
320 DMPHY_PGSR0_WEERR,
321 },
322 { /* sentinel */ }
323};
324
325static int __ddrphy_training(void __iomem *phy_base,
326 const struct ddrphy_init_sequence *seq)
327{
328 const struct ddrphy_init_sequence *s;
329 u32 pgsr0;
330 u32 init_flag = DMPHY_PIR_INIT;
331 u32 done_flag = DMPHY_PGSR0_IDONE;
332 int timeout = 50000; /* 50 msec is long enough */
333#ifdef DISPLAY_ELAPSED_TIME
334 ulong start = get_timer(0);
335#endif
336
337 for (s = seq; s->description; s++) {
338 init_flag |= s->init_flag;
339 done_flag |= s->done_flag;
340 }
341
342 writel(init_flag, phy_base + DMPHY_PIR);
343
344 do {
345 if (--timeout < 0) {
Masahiro Yamadaa54c8792016-02-26 14:21:36 +0900346 pr_err("%s: error: timeout during DDR training\n",
Masahiro Yamada15607d02016-01-09 01:51:14 +0900347 __func__);
348 return -ETIMEDOUT;
349 }
350 udelay(1);
351 pgsr0 = readl(phy_base + DMPHY_PGSR0);
352 } while ((pgsr0 & done_flag) != done_flag);
353
354 for (s = seq; s->description; s++) {
355 if (pgsr0 & s->err_flag) {
Masahiro Yamadaa54c8792016-02-26 14:21:36 +0900356 pr_err("%s: error: %s failed\n", __func__,
Masahiro Yamada15607d02016-01-09 01:51:14 +0900357 s->description);
358 return -EIO;
359 }
360 }
361
362#ifdef DISPLAY_ELAPSED_TIME
363 printf("%s: info: elapsed time %ld msec\n", get_timer(start));
364#endif
365
366 return 0;
367}
368
369static int ddrphy_impedance_calibration(void __iomem *phy_base)
370{
371 int ret;
372 u32 tmp;
373
374 ret = __ddrphy_training(phy_base, impedance_calibration_sequence);
375 if (ret)
376 return ret;
377
378 /*
379 * Because of a hardware bug, IDONE flag is set when the first ZQ block
380 * is calibrated. The flag does not guarantee the completion for all
381 * the ZQ blocks. Wait a little more just in case.
382 */
383 udelay(1);
384
385 /* reflect ZQ settings and enable average algorithm*/
386 tmp = readl(phy_base + DMPHY_ZQCR);
387 tmp |= DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
388 writel(tmp, phy_base + DMPHY_ZQCR);
389 tmp &= ~DMPHY_ZQCR_FORCE_ZCAL_VT_UPDATE;
390 tmp |= DMPHY_ZQCR_AVGEN;
391 writel(tmp, phy_base + DMPHY_ZQCR);
392
393 return 0;
394}
395
396static int ddrphy_dram_init(void __iomem *phy_base)
397{
398 return __ddrphy_training(phy_base, dram_init_sequence);
399}
400
401static int ddrphy_training(void __iomem *phy_base)
402{
403 return __ddrphy_training(phy_base, training_sequence);
404}
405
406/* UMC */
407static void umc_set_system_latency(void __iomem *umc_dc_base, int phy_latency)
408{
409 u32 val;
410 int latency;
411
412 val = readl(umc_dc_base + UMC_RDATACTL_D0);
413 latency = (val & UMC_RDATACTL_RADLTY_MASK) >> UMC_RDATACTL_RADLTY_SHIFT;
414 latency += (val & UMC_RDATACTL_RAD2LTY_MASK) >>
415 UMC_RDATACTL_RAD2LTY_SHIFT;
416 /*
417 * UMC works at the half clock rate of the PHY.
418 * The LSB of latency is ignored
419 */
420 latency += phy_latency & ~1;
421
422 val &= ~(UMC_RDATACTL_RADLTY_MASK | UMC_RDATACTL_RAD2LTY_MASK);
423 if (latency > 0xf) {
424 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
425 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
426 } else {
427 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
428 }
429
430 writel(val, umc_dc_base + UMC_RDATACTL_D0);
431 writel(val, umc_dc_base + UMC_RDATACTL_D1);
432
433 readl(umc_dc_base + UMC_RDATACTL_D1); /* relax */
434}
435
436/* enable/disable auto refresh */
437void umc_refresh_ctrl(void __iomem *umc_dc_base, int enable)
438{
439 u32 tmp;
440
441 tmp = readl(umc_dc_base + UMC_SPCSETB);
442 tmp &= ~UMC_SPCSETB_AREFMD_MASK;
443
444 if (enable)
445 tmp |= UMC_SPCSETB_AREFMD_ARB;
446 else
447 tmp |= UMC_SPCSETB_AREFMD_REG;
448
449 writel(tmp, umc_dc_base + UMC_SPCSETB);
450 udelay(1);
451}
452
453static void umc_ud_init(void __iomem *umc_base, int ch)
454{
455 writel(0x00000003, umc_base + UMC_BITPERPIXELMODE_D0);
456
457 if (ch == 2)
458 writel(0x00000033, umc_base + UMC_PAIR1DOFF_D0);
459}
460
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900461static int umc_dc_init(void __iomem *umc_dc_base, enum dram_freq freq,
462 unsigned long size, int width, int ch)
Masahiro Yamada15607d02016-01-09 01:51:14 +0900463{
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900464 enum dram_size size_e;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900465 int latency;
466 u32 val;
467
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900468 switch (size) {
469 case 0:
470 return 0;
471 case SZ_256M:
472 size_e = DRAM_SZ_256M;
473 break;
474 case SZ_512M:
475 size_e = DRAM_SZ_512M;
476 break;
477 default:
478 pr_err("unsupported DRAM size 0x%08lx (per 16bit) for ch%d\n",
479 size, ch);
480 return -EINVAL;
481 }
482
Masahiro Yamada15607d02016-01-09 01:51:14 +0900483 writel(umc_cmdctla[freq], umc_dc_base + UMC_CMDCTLA);
484
485 writel(ch == 2 ? umc_cmdctlb_ch2[freq] : umc_cmdctlb_ch01[freq],
486 umc_dc_base + UMC_CMDCTLB);
487
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900488 writel(umc_spcctla[freq][size_e],
Masahiro Yamada15607d02016-01-09 01:51:14 +0900489 umc_dc_base + UMC_SPCCTLA);
490 writel(umc_spcctlb[freq], umc_dc_base + UMC_SPCCTLB);
491
492 val = 0x000e000e;
493 latency = 12;
494 /* ES2 inserted one more FF to the logic. */
495 if (uniphier_get_soc_model() >= 2)
496 latency += 2;
497
498 if (latency > 0xf) {
499 val |= 0xf << UMC_RDATACTL_RADLTY_SHIFT;
500 val |= (latency - 0xf) << UMC_RDATACTL_RAD2LTY_SHIFT;
501 } else {
502 val |= latency << UMC_RDATACTL_RADLTY_SHIFT;
503 }
504
505 writel(val, umc_dc_base + UMC_RDATACTL_D0);
506 if (width >= 32)
507 writel(val, umc_dc_base + UMC_RDATACTL_D1);
508
509 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D0);
510 if (width >= 32)
511 writel(0x04060A02, umc_dc_base + UMC_WDATACTL_D1);
512 writel(0x04000000, umc_dc_base + UMC_DATASET);
513 writel(0x00400020, umc_dc_base + UMC_DCCGCTL);
514 writel(0x00000084, umc_dc_base + UMC_FLOWCTLG);
515 writel(0x00000000, umc_dc_base + UMC_ACSSETA);
516
517 writel(ch == 2 ? umc_flowctla_ch2[freq] : umc_flowctla_ch01[freq],
518 umc_dc_base + UMC_FLOWCTLA);
519
520 writel(0x00004400, umc_dc_base + UMC_FLOWCTLC);
521 writel(0x200A0A00, umc_dc_base + UMC_SPCSETB);
522 writel(0x00000520, umc_dc_base + UMC_DFICUPDCTLA);
523 writel(0x0000000D, umc_dc_base + UMC_RESPCTL);
524
525 if (ch != 2) {
526 writel(0x00202000, umc_dc_base + UMC_FLOWCTLB);
527 writel(0xFDBFFFFF, umc_dc_base + UMC_FLOWCTLOB0);
528 writel(0xFFFFFFFF, umc_dc_base + UMC_FLOWCTLOB1);
529 writel(0x00080700, umc_dc_base + UMC_BSICMAPSET);
530 } else {
531 writel(0x00200000, umc_dc_base + UMC_FLOWCTLB);
532 writel(0x00000000, umc_dc_base + UMC_BSICMAPSET);
533 }
534
535 writel(0x00000000, umc_dc_base + UMC_ERRMASKA);
536 writel(0x00000000, umc_dc_base + UMC_ERRMASKB);
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900537
538 return 0;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900539}
540
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900541static int umc_ch_init(void __iomem *umc_ch_base, enum dram_freq freq,
542 unsigned long size, unsigned int width, int ch)
Masahiro Yamada15607d02016-01-09 01:51:14 +0900543{
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900544 void __iomem *umc_dc_base = umc_ch_base + 0x00011000;
545 void __iomem *phy_base = umc_ch_base + 0x00030000;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900546 int ret;
547
548 writel(0x00000002, umc_dc_base + UMC_INITSET);
549 while (readl(umc_dc_base + UMC_INITSTAT) & BIT(2))
550 cpu_relax();
551
552 /* deassert PHY reset signals */
553 writel(UMC_DIOCTLA_CTL_NRST | UMC_DIOCTLA_CFG_NRST,
554 umc_dc_base + UMC_DIOCTLA);
555
Masahiro Yamadaf775c092016-02-05 13:21:07 +0900556 ddrphy_init(phy_base, freq, width, ch);
Masahiro Yamada15607d02016-01-09 01:51:14 +0900557
558 ret = ddrphy_impedance_calibration(phy_base);
559 if (ret)
560 return ret;
561
562 ddrphy_dram_init(phy_base);
563 if (ret)
564 return ret;
565
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900566 ret = umc_dc_init(umc_dc_base, freq, size, width, ch);
567 if (ret)
568 return ret;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900569
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900570 umc_ud_init(umc_ch_base, ch);
Masahiro Yamada15607d02016-01-09 01:51:14 +0900571
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900572 ret = ddrphy_training(phy_base);
573 if (ret)
574 return ret;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900575
576 udelay(1);
577
578 /* match the system latency between UMC and PHY */
579 umc_set_system_latency(umc_dc_base,
580 ddrphy_get_system_latency(phy_base, width));
581
582 udelay(1);
583
584 /* stop auto refresh before clearing FIFO in PHY */
585 umc_refresh_ctrl(umc_dc_base, 0);
586 ddrphy_fifo_reset(phy_base);
587 umc_refresh_ctrl(umc_dc_base, 1);
588
589 udelay(10);
590
591 return 0;
592}
593
594static void um_init(void __iomem *um_base)
595{
596 writel(0x000000ff, um_base + UMC_MBUS0);
597 writel(0x000000ff, um_base + UMC_MBUS1);
598 writel(0x000000ff, um_base + UMC_MBUS2);
599 writel(0x000000ff, um_base + UMC_MBUS3);
600}
601
602int proxstream2_umc_init(const struct uniphier_board_data *bd)
603{
604 void __iomem *um_base = (void __iomem *)0x5b600000;
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900605 void __iomem *umc_ch_base = (void __iomem *)0x5b800000;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900606 enum dram_freq freq;
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900607 int ch, ret;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900608
609 switch (bd->dram_freq) {
610 case 1866:
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900611 freq = DRAM_FREQ_1866M;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900612 break;
613 case 2133:
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900614 freq = DRAM_FREQ_2133M;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900615 break;
616 default:
Masahiro Yamadaa54c8792016-02-26 14:21:36 +0900617 pr_err("unsupported DRAM frequency %d MHz\n", bd->dram_freq);
Masahiro Yamada15607d02016-01-09 01:51:14 +0900618 return -EINVAL;
619 }
620
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900621 for (ch = 0; ch < bd->dram_nr_ch; ch++) {
Masahiro Yamada94b756f2016-02-26 14:21:37 +0900622 unsigned long size = bd->dram_ch[ch].size;
623 unsigned int width = bd->dram_ch[ch].width;
624
625 ret = umc_ch_init(umc_ch_base, freq, size / (width / 16),
626 width, ch);
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900627 if (ret) {
Masahiro Yamadaa54c8792016-02-26 14:21:36 +0900628 pr_err("failed to initialize UMC ch%d\n", ch);
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900629 return ret;
630 }
Masahiro Yamada15607d02016-01-09 01:51:14 +0900631
Masahiro Yamada59fe23c2016-02-26 14:21:35 +0900632 umc_ch_base += 0x00200000;
Masahiro Yamada15607d02016-01-09 01:51:14 +0900633 }
634
635 um_init(um_base);
636
637 return 0;
638}