blob: 65ecdd022c4371adfbe13a8bceb8fe83a80b5840 [file] [log] [blame]
Ley Foon Tan6a48c342019-11-27 15:55:27 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Intel Corporation <www.intel.com>
4 *
5 */
6
7#include <common.h>
8#include <dm.h>
9#include <errno.h>
10#include <div64.h>
11#include <fdtdec.h>
Simon Glassdb41d652019-12-28 10:45:07 -070012#include <hang.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060013#include <log.h>
Ley Foon Tan6a48c342019-11-27 15:55:27 +080014#include <ram.h>
15#include <reset.h>
Simon Glass401d1c42020-10-30 21:38:53 -060016#include <asm/global_data.h>
Ley Foon Tan6a48c342019-11-27 15:55:27 +080017#include "sdram_soc64.h"
18#include <wait_bit.h>
19#include <asm/arch/firewall.h>
20#include <asm/arch/reset_manager.h>
21#include <asm/arch/system_manager.h>
22#include <asm/io.h>
23#include <linux/sizes.h>
24
25DECLARE_GLOBAL_DATA_PTR;
26
27int sdram_mmr_init_full(struct udevice *dev)
28{
Simon Glass0fd3d912020-12-22 19:30:28 -070029 struct altera_sdram_plat *plat = dev_get_plat(dev);
Ley Foon Tan6a48c342019-11-27 15:55:27 +080030 struct altera_sdram_priv *priv = dev_get_priv(dev);
31 u32 i;
32 int ret;
33 phys_size_t hw_size;
Masahiro Yamadab75d8dc2020-06-26 15:13:33 +090034 struct bd_info bd = {0};
Ley Foon Tan6a48c342019-11-27 15:55:27 +080035
36 /* Ensure HMC clock is running */
37 if (poll_hmc_clock_status()) {
38 debug("DDR: Error as HMC clock was not running\n");
39 return -EPERM;
40 }
41
42 /* Trying 3 times to do a calibration */
43 for (i = 0; i < 3; i++) {
44 ret = wait_for_bit_le32((const void *)(plat->hmc +
45 DDRCALSTAT),
46 DDR_HMC_DDRCALSTAT_CAL_MSK, true, 1000,
47 false);
48 if (!ret)
49 break;
50
51 emif_reset(plat);
52 }
53
54 if (ret) {
55 puts("DDR: Error as SDRAM calibration failed\n");
56 return -EPERM;
57 }
58 debug("DDR: Calibration success\n");
59
60 /*
61 * Configure the DDR IO size
62 * niosreserve0: Used to indicate DDR width &
63 * bit[7:0] = Number of data bits (bit[6:5] 0x01=32bit, 0x10=64bit)
64 * bit[8] = 1 if user-mode OCT is present
65 * bit[9] = 1 if warm reset compiled into EMIF Cal Code
66 * bit[10] = 1 if warm reset is on during generation in EMIF Cal
67 * niosreserve1: IP ADCDS version encoded as 16 bit value
68 * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta,
69 * 3=EAP, 4-6 are reserved)
70 * bit[5:3] = Service Pack # (e.g. 1)
71 * bit[9:6] = Minor Release #
72 * bit[14:10] = Major Release #
73 */
74 /* Configure DDR IO size x16, x32 and x64 mode */
75 u32 update_value;
76
77 update_value = hmc_readl(plat, NIOSRESERVED0);
78 update_value = (update_value & 0xFF) >> 5;
79
80 /* Configure DDR data rate 0-HAlf-rate 1-Quarter-rate */
81 update_value |= (hmc_readl(plat, CTRLCFG3) & 0x4);
82 hmc_ecc_writel(plat, update_value, DDRIOCTRL);
83
84 /* Copy values MMR IOHMC dramaddrw to HMC adp DRAMADDRWIDTH */
85 hmc_ecc_writel(plat, hmc_readl(plat, DRAMADDRW), DRAMADDRWIDTH);
86
87 /* assigning the SDRAM size */
88 phys_size_t size = sdram_calculate_size(plat);
89
90 if (size <= 0)
91 hw_size = PHYS_SDRAM_1_SIZE;
92 else
93 hw_size = size;
94
95 /* Get bank configuration from devicetree */
96 ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL,
97 (phys_size_t *)&gd->ram_size, &bd);
98 if (ret) {
99 puts("DDR: Failed to decode memory node\n");
100 return -ENXIO;
101 }
102
103 if (gd->ram_size != hw_size) {
104 printf("DDR: Warning: DRAM size from device tree (%lld MiB)\n",
105 gd->ram_size >> 20);
106 printf(" mismatch with hardware (%lld MiB).\n",
107 hw_size >> 20);
108 }
109
110 if (gd->ram_size > hw_size) {
111 printf("DDR: Error: DRAM size from device tree is greater\n");
112 printf(" than hardware size.\n");
113 hang();
114 }
115
116 printf("DDR: %lld MiB\n", gd->ram_size >> 20);
117
118 /* This enables nonsecure access to DDR */
119 /* mpuregion0addr_limit */
120 FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1,
121 FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT);
122 FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT);
123
124 /* nonmpuregion0addr_limit */
125 FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1,
126 FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT);
127
128 /* Enable mpuregion0enable and nonmpuregion0enable */
129 FW_MPU_DDR_SCR_WRITEL(MPUREGION0_ENABLE | NONMPUREGION0_ENABLE,
130 FW_MPU_DDR_SCR_EN_SET);
131
132 u32 ctrlcfg1 = hmc_readl(plat, CTRLCFG1);
133
134 /* Enable or disable the DDR ECC */
135 if (CTRLCFG1_CFG_CTRL_EN_ECC(ctrlcfg1)) {
136 setbits_le32(plat->hmc + ECCCTRL1,
137 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
138 DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
139 DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
140 clrbits_le32(plat->hmc + ECCCTRL1,
141 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
142 DDR_HMC_ECCCTL_CNT_RST_SET_MSK));
143 setbits_le32(plat->hmc + ECCCTRL2,
144 (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
145 DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
146 setbits_le32(plat->hmc + ERRINTEN,
147 DDR_HMC_ERRINTEN_DERRINTEN_EN_SET_MSK);
148
Ley Foon Tan6a48c342019-11-27 15:55:27 +0800149 if (!cpu_has_been_warmreset())
150 sdram_init_ecc_bits(&bd);
151 } else {
152 clrbits_le32(plat->hmc + ECCCTRL1,
153 (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
154 DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
155 DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
156 clrbits_le32(plat->hmc + ECCCTRL2,
157 (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
158 DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
159 }
160
Thor Thayer8097aee2019-12-06 13:47:32 -0600161 /* Enable non-secure reads/writes to HMC Adapter for SDRAM ECC */
162 writel(FW_HMC_ADAPTOR_MPU_MASK, FW_HMC_ADAPTOR_REG_ADDR);
163
Ley Foon Tan6a48c342019-11-27 15:55:27 +0800164 sdram_size_check(&bd);
165
166 priv->info.base = bd.bi_dram[0].start;
167 priv->info.size = gd->ram_size;
168
169 debug("DDR: HMC init success\n");
170 return 0;
171}