Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2014 Panasonic Corporation |
| 4 | * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> |
| 5 | * Copyright (C) 2009-2010, Intel Corporation and its suppliers. |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 8 | #include <dm.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 9 | #include <malloc.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 10 | #include <nand.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 11 | #include <dm/device_compat.h> |
Simon Glass | 61b29b8 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 12 | #include <dm/devres.h> |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 13 | #include <linux/bitfield.h> |
Simon Glass | c3dc39a | 2020-05-10 11:39:55 -0600 | [diff] [blame^] | 14 | #include <linux/delay.h> |
| 15 | #include <linux/dma-direction.h> |
Masahiro Yamada | 9d86b89 | 2020-02-14 16:40:19 +0900 | [diff] [blame] | 16 | #include <linux/dma-mapping.h> |
Simon Glass | 61b29b8 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 17 | #include <linux/err.h> |
Masahiro Yamada | 1221ce4 | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 18 | #include <linux/errno.h> |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 19 | #include <linux/io.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 20 | #include <linux/mtd/mtd.h> |
| 21 | #include <linux/mtd/rawnand.h> |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 22 | |
| 23 | #include "denali.h" |
| 24 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 25 | #define DENALI_NAND_NAME "denali-nand" |
| 26 | |
| 27 | /* for Indexed Addressing */ |
| 28 | #define DENALI_INDEXED_CTRL 0x00 |
| 29 | #define DENALI_INDEXED_DATA 0x10 |
| 30 | |
| 31 | #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ |
| 32 | #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ |
| 33 | #define DENALI_MAP10 (2 << 26) /* high-level control plane */ |
| 34 | #define DENALI_MAP11 (3 << 26) /* direct controller access */ |
| 35 | |
| 36 | /* MAP11 access cycle type */ |
| 37 | #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ |
| 38 | #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ |
| 39 | #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ |
| 40 | |
| 41 | /* MAP10 commands */ |
| 42 | #define DENALI_ERASE 0x01 |
| 43 | |
| 44 | #define DENALI_BANK(denali) ((denali)->active_bank << 24) |
| 45 | |
| 46 | #define DENALI_INVALID_BANK -1 |
| 47 | #define DENALI_NR_BANKS 4 |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 48 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 49 | static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) |
| 50 | { |
| 51 | return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); |
| 52 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 53 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 54 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 55 | * Direct Addressing - the slave address forms the control information (command |
| 56 | * type, bank, block, and page address). The slave data is the actual data to |
| 57 | * be transferred. This mode requires 28 bits of address region allocated. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 58 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 59 | static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 60 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 61 | return ioread32(denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 62 | } |
| 63 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 64 | static void denali_direct_write(struct denali_nand_info *denali, u32 addr, |
| 65 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 66 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 67 | iowrite32(data, denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 71 | * Indexed Addressing - address translation module intervenes in passing the |
| 72 | * control information. This mode reduces the required address range. The |
| 73 | * control information and transferred data are latched by the registers in |
| 74 | * the translation module. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 75 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 76 | static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 77 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 78 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 79 | return ioread32(denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 80 | } |
| 81 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 82 | static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, |
| 83 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 84 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 85 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 86 | iowrite32(data, denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | /* |
| 90 | * Use the configuration feature register to determine the maximum number of |
| 91 | * banks that the hardware supports. |
| 92 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 93 | static void denali_detect_max_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 94 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 95 | uint32_t features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 96 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 97 | denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 98 | |
| 99 | /* the encoding changed from rev 5.0 to 5.1 */ |
| 100 | if (denali->revision < 0x0501) |
| 101 | denali->max_banks <<= 1; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 102 | } |
| 103 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 104 | static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 105 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 106 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 107 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 108 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 109 | iowrite32(U32_MAX, denali->reg + INTR_EN(i)); |
| 110 | iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 111 | } |
| 112 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 113 | static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 114 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 115 | int i; |
| 116 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 117 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 118 | iowrite32(0, denali->reg + INTR_EN(i)); |
| 119 | iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 120 | } |
| 121 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 122 | static void denali_clear_irq(struct denali_nand_info *denali, |
| 123 | int bank, uint32_t irq_status) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 124 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 125 | /* write one to clear bits */ |
| 126 | iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 127 | } |
| 128 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 129 | static void denali_clear_irq_all(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 130 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 131 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 132 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 133 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 134 | denali_clear_irq(denali, i, U32_MAX); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 135 | } |
| 136 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 137 | static void __denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 138 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 139 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 140 | int i; |
| 141 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 142 | for (i = 0; i < DENALI_NR_BANKS; i++) { |
| 143 | irq_status = ioread32(denali->reg + INTR_STATUS(i)); |
| 144 | denali_clear_irq(denali, i, irq_status); |
| 145 | |
| 146 | if (i != denali->active_bank) |
| 147 | continue; |
| 148 | |
| 149 | denali->irq_status |= irq_status; |
| 150 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 151 | } |
| 152 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 153 | static void denali_reset_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 154 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 155 | denali->irq_status = 0; |
| 156 | denali->irq_mask = 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 157 | } |
| 158 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 159 | static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, |
| 160 | uint32_t irq_mask) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 161 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 162 | unsigned long time_left = 1000000; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 163 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 164 | while (time_left) { |
| 165 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 166 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 167 | if (irq_mask & denali->irq_status) |
| 168 | return denali->irq_status; |
| 169 | udelay(1); |
| 170 | time_left--; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 171 | } |
| 172 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 173 | if (!time_left) { |
| 174 | dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", |
| 175 | irq_mask); |
| 176 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 177 | } |
| 178 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 179 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 180 | } |
| 181 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 182 | static uint32_t denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 183 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 184 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 185 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 186 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
| 190 | { |
| 191 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 192 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 193 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 194 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 195 | for (i = 0; i < len; i++) |
| 196 | buf[i] = denali->host_read(denali, addr); |
| 197 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 198 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 199 | static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
| 200 | { |
| 201 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 202 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 203 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 204 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 205 | for (i = 0; i < len; i++) |
| 206 | denali->host_write(denali, addr, buf[i]); |
| 207 | } |
| 208 | |
| 209 | static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) |
| 210 | { |
| 211 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 212 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 213 | uint16_t *buf16 = (uint16_t *)buf; |
| 214 | int i; |
| 215 | |
| 216 | for (i = 0; i < len / 2; i++) |
| 217 | buf16[i] = denali->host_read(denali, addr); |
| 218 | } |
| 219 | |
| 220 | static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, |
| 221 | int len) |
| 222 | { |
| 223 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 224 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 225 | const uint16_t *buf16 = (const uint16_t *)buf; |
| 226 | int i; |
| 227 | |
| 228 | for (i = 0; i < len / 2; i++) |
| 229 | denali->host_write(denali, addr, buf16[i]); |
| 230 | } |
| 231 | |
| 232 | static uint8_t denali_read_byte(struct mtd_info *mtd) |
| 233 | { |
| 234 | uint8_t byte; |
| 235 | |
| 236 | denali_read_buf(mtd, &byte, 1); |
| 237 | |
| 238 | return byte; |
| 239 | } |
| 240 | |
| 241 | static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) |
| 242 | { |
| 243 | denali_write_buf(mtd, &byte, 1); |
| 244 | } |
| 245 | |
| 246 | static uint16_t denali_read_word(struct mtd_info *mtd) |
| 247 | { |
| 248 | uint16_t word; |
| 249 | |
| 250 | denali_read_buf16(mtd, (uint8_t *)&word, 2); |
| 251 | |
| 252 | return word; |
| 253 | } |
| 254 | |
| 255 | static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) |
| 256 | { |
| 257 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 258 | uint32_t type; |
| 259 | |
| 260 | if (ctrl & NAND_CLE) |
| 261 | type = DENALI_MAP11_CMD; |
| 262 | else if (ctrl & NAND_ALE) |
| 263 | type = DENALI_MAP11_ADDR; |
| 264 | else |
| 265 | return; |
| 266 | |
| 267 | /* |
| 268 | * Some commands are followed by chip->dev_ready or chip->waitfunc. |
| 269 | * irq_status must be cleared here to catch the R/B# interrupt later. |
| 270 | */ |
| 271 | if (ctrl & NAND_CTRL_CHANGE) |
| 272 | denali_reset_irq(denali); |
| 273 | |
| 274 | denali->host_write(denali, DENALI_BANK(denali) | type, dat); |
| 275 | } |
| 276 | |
| 277 | static int denali_dev_ready(struct mtd_info *mtd) |
| 278 | { |
| 279 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 280 | |
| 281 | return !!(denali_check_irq(denali) & INTR__INT_ACT); |
| 282 | } |
| 283 | |
| 284 | static int denali_check_erased_page(struct mtd_info *mtd, |
| 285 | struct nand_chip *chip, uint8_t *buf, |
| 286 | unsigned long uncor_ecc_flags, |
| 287 | unsigned int max_bitflips) |
| 288 | { |
| 289 | uint8_t *ecc_code = chip->buffers->ecccode; |
| 290 | int ecc_steps = chip->ecc.steps; |
| 291 | int ecc_size = chip->ecc.size; |
| 292 | int ecc_bytes = chip->ecc.bytes; |
| 293 | int i, ret, stat; |
| 294 | |
| 295 | ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, |
| 296 | chip->ecc.total); |
| 297 | if (ret) |
| 298 | return ret; |
| 299 | |
| 300 | for (i = 0; i < ecc_steps; i++) { |
| 301 | if (!(uncor_ecc_flags & BIT(i))) |
| 302 | continue; |
| 303 | |
| 304 | stat = nand_check_erased_ecc_chunk(buf, ecc_size, |
| 305 | ecc_code, ecc_bytes, |
| 306 | NULL, 0, |
| 307 | chip->ecc.strength); |
| 308 | if (stat < 0) { |
| 309 | mtd->ecc_stats.failed++; |
| 310 | } else { |
| 311 | mtd->ecc_stats.corrected += stat; |
| 312 | max_bitflips = max_t(unsigned int, max_bitflips, stat); |
| 313 | } |
| 314 | |
| 315 | buf += ecc_size; |
| 316 | ecc_code += ecc_bytes; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 317 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 318 | |
| 319 | return max_bitflips; |
| 320 | } |
| 321 | |
| 322 | static int denali_hw_ecc_fixup(struct mtd_info *mtd, |
| 323 | struct denali_nand_info *denali, |
| 324 | unsigned long *uncor_ecc_flags) |
| 325 | { |
| 326 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 327 | int bank = denali->active_bank; |
| 328 | uint32_t ecc_cor; |
| 329 | unsigned int max_bitflips; |
| 330 | |
| 331 | ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); |
| 332 | ecc_cor >>= ECC_COR_INFO__SHIFT(bank); |
| 333 | |
| 334 | if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { |
| 335 | /* |
| 336 | * This flag is set when uncorrectable error occurs at least in |
| 337 | * one ECC sector. We can not know "how many sectors", or |
| 338 | * "which sector(s)". We need erase-page check for all sectors. |
| 339 | */ |
| 340 | *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); |
| 345 | |
| 346 | /* |
| 347 | * The register holds the maximum of per-sector corrected bitflips. |
| 348 | * This is suitable for the return value of the ->read_page() callback. |
| 349 | * Unfortunately, we can not know the total number of corrected bits in |
| 350 | * the page. Increase the stats by max_bitflips. (compromised solution) |
| 351 | */ |
| 352 | mtd->ecc_stats.corrected += max_bitflips; |
| 353 | |
| 354 | return max_bitflips; |
| 355 | } |
| 356 | |
| 357 | static int denali_sw_ecc_fixup(struct mtd_info *mtd, |
| 358 | struct denali_nand_info *denali, |
| 359 | unsigned long *uncor_ecc_flags, uint8_t *buf) |
| 360 | { |
| 361 | unsigned int ecc_size = denali->nand.ecc.size; |
| 362 | unsigned int bitflips = 0; |
| 363 | unsigned int max_bitflips = 0; |
| 364 | uint32_t err_addr, err_cor_info; |
| 365 | unsigned int err_byte, err_sector, err_device; |
| 366 | uint8_t err_cor_value; |
| 367 | unsigned int prev_sector = 0; |
| 368 | uint32_t irq_status; |
| 369 | |
| 370 | denali_reset_irq(denali); |
| 371 | |
| 372 | do { |
| 373 | err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); |
| 374 | err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); |
| 375 | err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); |
| 376 | |
| 377 | err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); |
| 378 | err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, |
| 379 | err_cor_info); |
| 380 | err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, |
| 381 | err_cor_info); |
| 382 | |
| 383 | /* reset the bitflip counter when crossing ECC sector */ |
| 384 | if (err_sector != prev_sector) |
| 385 | bitflips = 0; |
| 386 | |
| 387 | if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { |
| 388 | /* |
| 389 | * Check later if this is a real ECC error, or |
| 390 | * an erased sector. |
| 391 | */ |
| 392 | *uncor_ecc_flags |= BIT(err_sector); |
| 393 | } else if (err_byte < ecc_size) { |
| 394 | /* |
| 395 | * If err_byte is larger than ecc_size, means error |
| 396 | * happened in OOB, so we ignore it. It's no need for |
| 397 | * us to correct it err_device is represented the NAND |
| 398 | * error bits are happened in if there are more than |
| 399 | * one NAND connected. |
| 400 | */ |
| 401 | int offset; |
| 402 | unsigned int flips_in_byte; |
| 403 | |
| 404 | offset = (err_sector * ecc_size + err_byte) * |
| 405 | denali->devs_per_cs + err_device; |
| 406 | |
| 407 | /* correct the ECC error */ |
| 408 | flips_in_byte = hweight8(buf[offset] ^ err_cor_value); |
| 409 | buf[offset] ^= err_cor_value; |
| 410 | mtd->ecc_stats.corrected += flips_in_byte; |
| 411 | bitflips += flips_in_byte; |
| 412 | |
| 413 | max_bitflips = max(max_bitflips, bitflips); |
| 414 | } |
| 415 | |
| 416 | prev_sector = err_sector; |
| 417 | } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); |
| 418 | |
| 419 | /* |
| 420 | * Once handle all ECC errors, controller will trigger an |
| 421 | * ECC_TRANSACTION_DONE interrupt. |
| 422 | */ |
| 423 | irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); |
| 424 | if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) |
| 425 | return -EIO; |
| 426 | |
| 427 | return max_bitflips; |
| 428 | } |
| 429 | |
| 430 | static void denali_setup_dma64(struct denali_nand_info *denali, |
| 431 | dma_addr_t dma_addr, int page, int write) |
| 432 | { |
| 433 | uint32_t mode; |
| 434 | const int page_count = 1; |
| 435 | |
| 436 | mode = DENALI_MAP10 | DENALI_BANK(denali) | page; |
| 437 | |
| 438 | /* DMA is a three step process */ |
| 439 | |
| 440 | /* |
| 441 | * 1. setup transfer type, interrupt when complete, |
| 442 | * burst len = 64 bytes, the number of pages |
| 443 | */ |
| 444 | denali->host_write(denali, mode, |
| 445 | 0x01002000 | (64 << 16) | (write << 8) | page_count); |
| 446 | |
| 447 | /* 2. set memory low address */ |
| 448 | denali->host_write(denali, mode, lower_32_bits(dma_addr)); |
| 449 | |
| 450 | /* 3. set memory high address */ |
| 451 | denali->host_write(denali, mode, upper_32_bits(dma_addr)); |
| 452 | } |
| 453 | |
| 454 | static void denali_setup_dma32(struct denali_nand_info *denali, |
| 455 | dma_addr_t dma_addr, int page, int write) |
| 456 | { |
| 457 | uint32_t mode; |
| 458 | const int page_count = 1; |
| 459 | |
| 460 | mode = DENALI_MAP10 | DENALI_BANK(denali); |
| 461 | |
| 462 | /* DMA is a four step process */ |
| 463 | |
| 464 | /* 1. setup transfer type and # of pages */ |
| 465 | denali->host_write(denali, mode | page, |
| 466 | 0x2000 | (write << 8) | page_count); |
| 467 | |
| 468 | /* 2. set memory high address bits 23:8 */ |
| 469 | denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); |
| 470 | |
| 471 | /* 3. set memory low address bits 23:8 */ |
| 472 | denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); |
| 473 | |
| 474 | /* 4. interrupt when complete, burst len = 64 bytes */ |
| 475 | denali->host_write(denali, mode | 0x14000, 0x2400); |
| 476 | } |
| 477 | |
| 478 | static int denali_pio_read(struct denali_nand_info *denali, void *buf, |
| 479 | size_t size, int page, int raw) |
| 480 | { |
| 481 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 482 | uint32_t *buf32 = (uint32_t *)buf; |
| 483 | uint32_t irq_status, ecc_err_mask; |
| 484 | int i; |
| 485 | |
| 486 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 487 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 488 | else |
| 489 | ecc_err_mask = INTR__ECC_ERR; |
| 490 | |
| 491 | denali_reset_irq(denali); |
| 492 | |
| 493 | for (i = 0; i < size / 4; i++) |
| 494 | *buf32++ = denali->host_read(denali, addr); |
| 495 | |
| 496 | irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); |
| 497 | if (!(irq_status & INTR__PAGE_XFER_INC)) |
| 498 | return -EIO; |
| 499 | |
| 500 | if (irq_status & INTR__ERASED_PAGE) |
| 501 | memset(buf, 0xff, size); |
| 502 | |
| 503 | return irq_status & ecc_err_mask ? -EBADMSG : 0; |
| 504 | } |
| 505 | |
| 506 | static int denali_pio_write(struct denali_nand_info *denali, |
| 507 | const void *buf, size_t size, int page, int raw) |
| 508 | { |
| 509 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 510 | const uint32_t *buf32 = (uint32_t *)buf; |
| 511 | uint32_t irq_status; |
| 512 | int i; |
| 513 | |
| 514 | denali_reset_irq(denali); |
| 515 | |
| 516 | for (i = 0; i < size / 4; i++) |
| 517 | denali->host_write(denali, addr, *buf32++); |
| 518 | |
| 519 | irq_status = denali_wait_for_irq(denali, |
| 520 | INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); |
| 521 | if (!(irq_status & INTR__PROGRAM_COMP)) |
| 522 | return -EIO; |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, |
| 528 | size_t size, int page, int raw, int write) |
| 529 | { |
| 530 | if (write) |
| 531 | return denali_pio_write(denali, buf, size, page, raw); |
| 532 | else |
| 533 | return denali_pio_read(denali, buf, size, page, raw); |
| 534 | } |
| 535 | |
| 536 | static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, |
| 537 | size_t size, int page, int raw, int write) |
| 538 | { |
| 539 | dma_addr_t dma_addr; |
| 540 | uint32_t irq_mask, irq_status, ecc_err_mask; |
| 541 | enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
| 542 | int ret = 0; |
| 543 | |
Vignesh Raghavendra | 6fff562 | 2020-01-16 14:23:47 +0530 | [diff] [blame] | 544 | dma_addr = dma_map_single(buf, size, dir); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 545 | if (dma_mapping_error(denali->dev, dma_addr)) { |
| 546 | dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); |
| 547 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 548 | } |
| 549 | |
| 550 | if (write) { |
| 551 | /* |
| 552 | * INTR__PROGRAM_COMP is never asserted for the DMA transfer. |
| 553 | * We can use INTR__DMA_CMD_COMP instead. This flag is asserted |
| 554 | * when the page program is completed. |
| 555 | */ |
| 556 | irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; |
| 557 | ecc_err_mask = 0; |
| 558 | } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { |
| 559 | irq_mask = INTR__DMA_CMD_COMP; |
| 560 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 561 | } else { |
| 562 | irq_mask = INTR__DMA_CMD_COMP; |
| 563 | ecc_err_mask = INTR__ECC_ERR; |
| 564 | } |
| 565 | |
| 566 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
Masahiro Yamada | 9d43649 | 2018-12-19 20:03:19 +0900 | [diff] [blame] | 567 | /* |
| 568 | * The ->setup_dma() hook kicks DMA by using the data/command |
| 569 | * interface, which belongs to a different AXI port from the |
| 570 | * register interface. Read back the register to avoid a race. |
| 571 | */ |
| 572 | ioread32(denali->reg + DMA_ENABLE); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 573 | |
| 574 | denali_reset_irq(denali); |
| 575 | denali->setup_dma(denali, dma_addr, page, write); |
| 576 | |
| 577 | irq_status = denali_wait_for_irq(denali, irq_mask); |
| 578 | if (!(irq_status & INTR__DMA_CMD_COMP)) |
| 579 | ret = -EIO; |
| 580 | else if (irq_status & ecc_err_mask) |
| 581 | ret = -EBADMSG; |
| 582 | |
| 583 | iowrite32(0, denali->reg + DMA_ENABLE); |
| 584 | |
Masahiro Yamada | 950c596 | 2020-02-14 16:40:18 +0900 | [diff] [blame] | 585 | dma_unmap_single(dma_addr, size, dir); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 586 | |
| 587 | if (irq_status & INTR__ERASED_PAGE) |
| 588 | memset(buf, 0xff, size); |
| 589 | |
| 590 | return ret; |
| 591 | } |
| 592 | |
| 593 | static int denali_data_xfer(struct denali_nand_info *denali, void *buf, |
| 594 | size_t size, int page, int raw, int write) |
| 595 | { |
| 596 | iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); |
| 597 | iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, |
| 598 | denali->reg + TRANSFER_SPARE_REG); |
| 599 | |
| 600 | if (denali->dma_avail) |
| 601 | return denali_dma_xfer(denali, buf, size, page, raw, write); |
| 602 | else |
| 603 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 604 | } |
| 605 | |
| 606 | static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, |
| 607 | int page, int write) |
| 608 | { |
| 609 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 610 | unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; |
| 611 | unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; |
| 612 | int writesize = mtd->writesize; |
| 613 | int oobsize = mtd->oobsize; |
| 614 | uint8_t *bufpoi = chip->oob_poi; |
| 615 | int ecc_steps = chip->ecc.steps; |
| 616 | int ecc_size = chip->ecc.size; |
| 617 | int ecc_bytes = chip->ecc.bytes; |
| 618 | int oob_skip = denali->oob_skip_bytes; |
| 619 | size_t size = writesize + oobsize; |
| 620 | int i, pos, len; |
| 621 | |
| 622 | /* BBM at the beginning of the OOB area */ |
| 623 | chip->cmdfunc(mtd, start_cmd, writesize, page); |
| 624 | if (write) |
| 625 | chip->write_buf(mtd, bufpoi, oob_skip); |
| 626 | else |
| 627 | chip->read_buf(mtd, bufpoi, oob_skip); |
| 628 | bufpoi += oob_skip; |
| 629 | |
| 630 | /* OOB ECC */ |
| 631 | for (i = 0; i < ecc_steps; i++) { |
| 632 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 633 | len = ecc_bytes; |
| 634 | |
| 635 | if (pos >= writesize) |
| 636 | pos += oob_skip; |
| 637 | else if (pos + len > writesize) |
| 638 | len = writesize - pos; |
| 639 | |
| 640 | chip->cmdfunc(mtd, rnd_cmd, pos, -1); |
| 641 | if (write) |
| 642 | chip->write_buf(mtd, bufpoi, len); |
| 643 | else |
| 644 | chip->read_buf(mtd, bufpoi, len); |
| 645 | bufpoi += len; |
| 646 | if (len < ecc_bytes) { |
| 647 | len = ecc_bytes - len; |
| 648 | chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); |
| 649 | if (write) |
| 650 | chip->write_buf(mtd, bufpoi, len); |
| 651 | else |
| 652 | chip->read_buf(mtd, bufpoi, len); |
| 653 | bufpoi += len; |
| 654 | } |
| 655 | } |
| 656 | |
| 657 | /* OOB free */ |
| 658 | len = oobsize - (bufpoi - chip->oob_poi); |
| 659 | chip->cmdfunc(mtd, rnd_cmd, size - len, -1); |
| 660 | if (write) |
| 661 | chip->write_buf(mtd, bufpoi, len); |
| 662 | else |
| 663 | chip->read_buf(mtd, bufpoi, len); |
| 664 | } |
| 665 | |
| 666 | static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 667 | uint8_t *buf, int oob_required, int page) |
| 668 | { |
| 669 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 670 | int writesize = mtd->writesize; |
| 671 | int oobsize = mtd->oobsize; |
| 672 | int ecc_steps = chip->ecc.steps; |
| 673 | int ecc_size = chip->ecc.size; |
| 674 | int ecc_bytes = chip->ecc.bytes; |
| 675 | void *tmp_buf = denali->buf; |
| 676 | int oob_skip = denali->oob_skip_bytes; |
| 677 | size_t size = writesize + oobsize; |
| 678 | int ret, i, pos, len; |
| 679 | |
| 680 | ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); |
| 681 | if (ret) |
| 682 | return ret; |
| 683 | |
| 684 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 685 | if (buf) { |
| 686 | for (i = 0; i < ecc_steps; i++) { |
| 687 | pos = i * (ecc_size + ecc_bytes); |
| 688 | len = ecc_size; |
| 689 | |
| 690 | if (pos >= writesize) |
| 691 | pos += oob_skip; |
| 692 | else if (pos + len > writesize) |
| 693 | len = writesize - pos; |
| 694 | |
| 695 | memcpy(buf, tmp_buf + pos, len); |
| 696 | buf += len; |
| 697 | if (len < ecc_size) { |
| 698 | len = ecc_size - len; |
| 699 | memcpy(buf, tmp_buf + writesize + oob_skip, |
| 700 | len); |
| 701 | buf += len; |
| 702 | } |
| 703 | } |
| 704 | } |
| 705 | |
| 706 | if (oob_required) { |
| 707 | uint8_t *oob = chip->oob_poi; |
| 708 | |
| 709 | /* BBM at the beginning of the OOB area */ |
| 710 | memcpy(oob, tmp_buf + writesize, oob_skip); |
| 711 | oob += oob_skip; |
| 712 | |
| 713 | /* OOB ECC */ |
| 714 | for (i = 0; i < ecc_steps; i++) { |
| 715 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 716 | len = ecc_bytes; |
| 717 | |
| 718 | if (pos >= writesize) |
| 719 | pos += oob_skip; |
| 720 | else if (pos + len > writesize) |
| 721 | len = writesize - pos; |
| 722 | |
| 723 | memcpy(oob, tmp_buf + pos, len); |
| 724 | oob += len; |
| 725 | if (len < ecc_bytes) { |
| 726 | len = ecc_bytes - len; |
| 727 | memcpy(oob, tmp_buf + writesize + oob_skip, |
| 728 | len); |
| 729 | oob += len; |
| 730 | } |
| 731 | } |
| 732 | |
| 733 | /* OOB free */ |
| 734 | len = oobsize - (oob - chip->oob_poi); |
| 735 | memcpy(oob, tmp_buf + size - len, len); |
| 736 | } |
| 737 | |
| 738 | return 0; |
| 739 | } |
| 740 | |
| 741 | static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 742 | int page) |
| 743 | { |
| 744 | denali_oob_xfer(mtd, chip, page, 0); |
| 745 | |
| 746 | return 0; |
| 747 | } |
| 748 | |
| 749 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 750 | int page) |
| 751 | { |
| 752 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 753 | int status; |
| 754 | |
| 755 | denali_reset_irq(denali); |
| 756 | |
| 757 | denali_oob_xfer(mtd, chip, page, 1); |
| 758 | |
| 759 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); |
| 760 | status = chip->waitfunc(mtd, chip); |
| 761 | |
| 762 | return status & NAND_STATUS_FAIL ? -EIO : 0; |
| 763 | } |
| 764 | |
| 765 | static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 766 | uint8_t *buf, int oob_required, int page) |
| 767 | { |
| 768 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 769 | unsigned long uncor_ecc_flags = 0; |
| 770 | int stat = 0; |
| 771 | int ret; |
| 772 | |
| 773 | ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); |
| 774 | if (ret && ret != -EBADMSG) |
| 775 | return ret; |
| 776 | |
| 777 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 778 | stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); |
| 779 | else if (ret == -EBADMSG) |
| 780 | stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); |
| 781 | |
| 782 | if (stat < 0) |
| 783 | return stat; |
| 784 | |
| 785 | if (uncor_ecc_flags) { |
| 786 | ret = denali_read_oob(mtd, chip, page); |
| 787 | if (ret) |
| 788 | return ret; |
| 789 | |
| 790 | stat = denali_check_erased_page(mtd, chip, buf, |
| 791 | uncor_ecc_flags, stat); |
| 792 | } |
| 793 | |
| 794 | return stat; |
| 795 | } |
| 796 | |
| 797 | static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 798 | const uint8_t *buf, int oob_required, int page) |
| 799 | { |
| 800 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 801 | int writesize = mtd->writesize; |
| 802 | int oobsize = mtd->oobsize; |
| 803 | int ecc_steps = chip->ecc.steps; |
| 804 | int ecc_size = chip->ecc.size; |
| 805 | int ecc_bytes = chip->ecc.bytes; |
| 806 | void *tmp_buf = denali->buf; |
| 807 | int oob_skip = denali->oob_skip_bytes; |
| 808 | size_t size = writesize + oobsize; |
| 809 | int i, pos, len; |
| 810 | |
| 811 | /* |
| 812 | * Fill the buffer with 0xff first except the full page transfer. |
| 813 | * This simplifies the logic. |
| 814 | */ |
| 815 | if (!buf || !oob_required) |
| 816 | memset(tmp_buf, 0xff, size); |
| 817 | |
| 818 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 819 | if (buf) { |
| 820 | for (i = 0; i < ecc_steps; i++) { |
| 821 | pos = i * (ecc_size + ecc_bytes); |
| 822 | len = ecc_size; |
| 823 | |
| 824 | if (pos >= writesize) |
| 825 | pos += oob_skip; |
| 826 | else if (pos + len > writesize) |
| 827 | len = writesize - pos; |
| 828 | |
| 829 | memcpy(tmp_buf + pos, buf, len); |
| 830 | buf += len; |
| 831 | if (len < ecc_size) { |
| 832 | len = ecc_size - len; |
| 833 | memcpy(tmp_buf + writesize + oob_skip, buf, |
| 834 | len); |
| 835 | buf += len; |
| 836 | } |
| 837 | } |
| 838 | } |
| 839 | |
| 840 | if (oob_required) { |
| 841 | const uint8_t *oob = chip->oob_poi; |
| 842 | |
| 843 | /* BBM at the beginning of the OOB area */ |
| 844 | memcpy(tmp_buf + writesize, oob, oob_skip); |
| 845 | oob += oob_skip; |
| 846 | |
| 847 | /* OOB ECC */ |
| 848 | for (i = 0; i < ecc_steps; i++) { |
| 849 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 850 | len = ecc_bytes; |
| 851 | |
| 852 | if (pos >= writesize) |
| 853 | pos += oob_skip; |
| 854 | else if (pos + len > writesize) |
| 855 | len = writesize - pos; |
| 856 | |
| 857 | memcpy(tmp_buf + pos, oob, len); |
| 858 | oob += len; |
| 859 | if (len < ecc_bytes) { |
| 860 | len = ecc_bytes - len; |
| 861 | memcpy(tmp_buf + writesize + oob_skip, oob, |
| 862 | len); |
| 863 | oob += len; |
| 864 | } |
| 865 | } |
| 866 | |
| 867 | /* OOB free */ |
| 868 | len = oobsize - (oob - chip->oob_poi); |
| 869 | memcpy(tmp_buf + size - len, oob, len); |
| 870 | } |
| 871 | |
| 872 | return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); |
| 873 | } |
| 874 | |
| 875 | static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 876 | const uint8_t *buf, int oob_required, int page) |
| 877 | { |
| 878 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 879 | |
| 880 | return denali_data_xfer(denali, (void *)buf, mtd->writesize, |
| 881 | page, 0, 1); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 882 | } |
| 883 | |
| 884 | static void denali_select_chip(struct mtd_info *mtd, int chip) |
| 885 | { |
| 886 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 887 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 888 | denali->active_bank = chip; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 889 | } |
| 890 | |
| 891 | static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) |
| 892 | { |
| 893 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 894 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 895 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 896 | /* R/B# pin transitioned from low to high? */ |
| 897 | irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 898 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 899 | return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 900 | } |
| 901 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 902 | static int denali_erase(struct mtd_info *mtd, int page) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 903 | { |
| 904 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 905 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 906 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 907 | denali_reset_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 908 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 909 | denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, |
| 910 | DENALI_ERASE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 911 | |
| 912 | /* wait for erase to complete or failure to occur */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 913 | irq_status = denali_wait_for_irq(denali, |
| 914 | INTR__ERASE_COMP | INTR__ERASE_FAIL); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 915 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 916 | return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; |
| 917 | } |
| 918 | |
Masahiro Yamada | 1a7e176 | 2017-11-29 19:18:18 +0900 | [diff] [blame] | 919 | static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 920 | const struct nand_data_interface *conf) |
| 921 | { |
| 922 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 923 | const struct nand_sdr_timings *timings; |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 924 | unsigned long t_x, mult_x; |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 925 | int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; |
| 926 | int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; |
| 927 | int addr_2_data_mask; |
| 928 | uint32_t tmp; |
| 929 | |
| 930 | timings = nand_get_sdr_timings(conf); |
| 931 | if (IS_ERR(timings)) |
| 932 | return PTR_ERR(timings); |
| 933 | |
| 934 | /* clk_x period in picoseconds */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 935 | t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); |
| 936 | if (!t_x) |
| 937 | return -EINVAL; |
| 938 | |
| 939 | /* |
| 940 | * The bus interface clock, clk_x, is phase aligned with the core clock. |
| 941 | * The clk_x is an integral multiple N of the core clk. The value N is |
| 942 | * configured at IP delivery time, and its available value is 4, 5, 6. |
| 943 | */ |
| 944 | mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); |
| 945 | if (mult_x < 4 || mult_x > 6) |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 946 | return -EINVAL; |
| 947 | |
| 948 | if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) |
| 949 | return 0; |
| 950 | |
| 951 | /* tREA -> ACC_CLKS */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 952 | acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 953 | acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); |
| 954 | |
| 955 | tmp = ioread32(denali->reg + ACC_CLKS); |
| 956 | tmp &= ~ACC_CLKS__VALUE; |
| 957 | tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); |
| 958 | iowrite32(tmp, denali->reg + ACC_CLKS); |
| 959 | |
| 960 | /* tRWH -> RE_2_WE */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 961 | re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 962 | re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); |
| 963 | |
| 964 | tmp = ioread32(denali->reg + RE_2_WE); |
| 965 | tmp &= ~RE_2_WE__VALUE; |
| 966 | tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); |
| 967 | iowrite32(tmp, denali->reg + RE_2_WE); |
| 968 | |
| 969 | /* tRHZ -> RE_2_RE */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 970 | re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 971 | re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); |
| 972 | |
| 973 | tmp = ioread32(denali->reg + RE_2_RE); |
| 974 | tmp &= ~RE_2_RE__VALUE; |
| 975 | tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); |
| 976 | iowrite32(tmp, denali->reg + RE_2_RE); |
| 977 | |
| 978 | /* |
| 979 | * tCCS, tWHR -> WE_2_RE |
| 980 | * |
| 981 | * With WE_2_RE properly set, the Denali controller automatically takes |
| 982 | * care of the delay; the driver need not set NAND_WAIT_TCCS. |
| 983 | */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 984 | we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 985 | we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); |
| 986 | |
| 987 | tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); |
| 988 | tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; |
| 989 | tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); |
| 990 | iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); |
| 991 | |
| 992 | /* tADL -> ADDR_2_DATA */ |
| 993 | |
| 994 | /* for older versions, ADDR_2_DATA is only 6 bit wide */ |
| 995 | addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 996 | if (denali->revision < 0x0501) |
| 997 | addr_2_data_mask >>= 1; |
| 998 | |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 999 | addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1000 | addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); |
| 1001 | |
| 1002 | tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1003 | tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 1004 | tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); |
| 1005 | iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1006 | |
| 1007 | /* tREH, tWH -> RDWR_EN_HI_CNT */ |
| 1008 | rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1009 | t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1010 | rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); |
| 1011 | |
| 1012 | tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); |
| 1013 | tmp &= ~RDWR_EN_HI_CNT__VALUE; |
| 1014 | tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); |
| 1015 | iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); |
| 1016 | |
| 1017 | /* tRP, tWP -> RDWR_EN_LO_CNT */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1018 | rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1019 | rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1020 | t_x); |
| 1021 | rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1022 | rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); |
| 1023 | rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); |
| 1024 | |
| 1025 | tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); |
| 1026 | tmp &= ~RDWR_EN_LO_CNT__VALUE; |
| 1027 | tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); |
| 1028 | iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); |
| 1029 | |
| 1030 | /* tCS, tCEA -> CS_SETUP_CNT */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1031 | cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, |
| 1032 | (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1033 | 0); |
| 1034 | cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); |
| 1035 | |
| 1036 | tmp = ioread32(denali->reg + CS_SETUP_CNT); |
| 1037 | tmp &= ~CS_SETUP_CNT__VALUE; |
| 1038 | tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); |
| 1039 | iowrite32(tmp, denali->reg + CS_SETUP_CNT); |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1040 | |
| 1041 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1042 | } |
| 1043 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1044 | static void denali_reset_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1045 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1046 | u32 irq_status; |
| 1047 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1048 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1049 | for (i = 0; i < denali->max_banks; i++) { |
| 1050 | denali->active_bank = i; |
| 1051 | |
| 1052 | denali_reset_irq(denali); |
| 1053 | |
| 1054 | iowrite32(DEVICE_RESET__BANK(i), |
| 1055 | denali->reg + DEVICE_RESET); |
| 1056 | |
| 1057 | irq_status = denali_wait_for_irq(denali, |
| 1058 | INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); |
| 1059 | if (!(irq_status & INTR__INT_ACT)) |
| 1060 | break; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1061 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1062 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1063 | dev_dbg(denali->dev, "%d chips connected\n", i); |
| 1064 | denali->max_banks = i; |
| 1065 | } |
| 1066 | |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1067 | static void denali_hw_init(struct denali_nand_info *denali) |
| 1068 | { |
| 1069 | /* |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1070 | * The REVISION register may not be reliable. Platforms are allowed to |
| 1071 | * override it. |
| 1072 | */ |
| 1073 | if (!denali->revision) |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1074 | denali->revision = swab16(ioread32(denali->reg + REVISION)); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1075 | |
| 1076 | /* |
Masahiro Yamada | 80924cc | 2020-01-30 00:55:55 +0900 | [diff] [blame] | 1077 | * Set how many bytes should be skipped before writing data in OOB. |
| 1078 | * If a platform requests a non-zero value, set it to the register. |
| 1079 | * Otherwise, read the value out, expecting it has already been set up |
| 1080 | * by firmware. |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1081 | */ |
Masahiro Yamada | 80924cc | 2020-01-30 00:55:55 +0900 | [diff] [blame] | 1082 | if (denali->oob_skip_bytes) |
| 1083 | iowrite32(denali->oob_skip_bytes, |
| 1084 | denali->reg + SPARE_AREA_SKIP_BYTES); |
| 1085 | else |
| 1086 | denali->oob_skip_bytes = ioread32(denali->reg + |
| 1087 | SPARE_AREA_SKIP_BYTES); |
| 1088 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1089 | denali_detect_max_banks(denali); |
| 1090 | iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); |
| 1091 | iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1092 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1093 | iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); |
| 1094 | } |
| 1095 | |
| 1096 | int denali_calc_ecc_bytes(int step_size, int strength) |
| 1097 | { |
| 1098 | /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ |
| 1099 | return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; |
| 1100 | } |
| 1101 | EXPORT_SYMBOL(denali_calc_ecc_bytes); |
| 1102 | |
| 1103 | static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, |
| 1104 | struct denali_nand_info *denali) |
| 1105 | { |
| 1106 | int oobavail = mtd->oobsize - denali->oob_skip_bytes; |
| 1107 | int ret; |
| 1108 | |
| 1109 | /* |
| 1110 | * If .size and .strength are already set (usually by DT), |
| 1111 | * check if they are supported by this controller. |
| 1112 | */ |
| 1113 | if (chip->ecc.size && chip->ecc.strength) |
| 1114 | return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); |
| 1115 | |
| 1116 | /* |
| 1117 | * We want .size and .strength closest to the chip's requirement |
| 1118 | * unless NAND_ECC_MAXIMIZE is requested. |
| 1119 | */ |
| 1120 | if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { |
| 1121 | ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); |
| 1122 | if (!ret) |
| 1123 | return 0; |
| 1124 | } |
| 1125 | |
| 1126 | /* Max ECC strength is the last thing we can do */ |
| 1127 | return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1128 | } |
| 1129 | |
| 1130 | static struct nand_ecclayout nand_oob; |
| 1131 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1132 | static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, |
| 1133 | struct mtd_oob_region *oobregion) |
| 1134 | { |
| 1135 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1136 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1137 | |
| 1138 | if (section) |
| 1139 | return -ERANGE; |
| 1140 | |
| 1141 | oobregion->offset = denali->oob_skip_bytes; |
| 1142 | oobregion->length = chip->ecc.total; |
| 1143 | |
| 1144 | return 0; |
| 1145 | } |
| 1146 | |
| 1147 | static int denali_ooblayout_free(struct mtd_info *mtd, int section, |
| 1148 | struct mtd_oob_region *oobregion) |
| 1149 | { |
| 1150 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1151 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1152 | |
| 1153 | if (section) |
| 1154 | return -ERANGE; |
| 1155 | |
| 1156 | oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; |
| 1157 | oobregion->length = mtd->oobsize - oobregion->offset; |
| 1158 | |
| 1159 | return 0; |
| 1160 | } |
| 1161 | |
| 1162 | static const struct mtd_ooblayout_ops denali_ooblayout_ops = { |
| 1163 | .ecc = denali_ooblayout_ecc, |
Simon Glass | 8d38a84 | 2020-02-03 07:35:56 -0700 | [diff] [blame] | 1164 | .rfree = denali_ooblayout_free, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1165 | }; |
| 1166 | |
| 1167 | static int denali_multidev_fixup(struct denali_nand_info *denali) |
| 1168 | { |
| 1169 | struct nand_chip *chip = &denali->nand; |
| 1170 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1171 | |
| 1172 | /* |
| 1173 | * Support for multi device: |
| 1174 | * When the IP configuration is x16 capable and two x8 chips are |
| 1175 | * connected in parallel, DEVICES_CONNECTED should be set to 2. |
| 1176 | * In this case, the core framework knows nothing about this fact, |
| 1177 | * so we should tell it the _logical_ pagesize and anything necessary. |
| 1178 | */ |
| 1179 | denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); |
| 1180 | |
| 1181 | /* |
| 1182 | * On some SoCs, DEVICES_CONNECTED is not auto-detected. |
| 1183 | * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. |
| 1184 | */ |
| 1185 | if (denali->devs_per_cs == 0) { |
| 1186 | denali->devs_per_cs = 1; |
| 1187 | iowrite32(1, denali->reg + DEVICES_CONNECTED); |
| 1188 | } |
| 1189 | |
| 1190 | if (denali->devs_per_cs == 1) |
| 1191 | return 0; |
| 1192 | |
| 1193 | if (denali->devs_per_cs != 2) { |
| 1194 | dev_err(denali->dev, "unsupported number of devices %d\n", |
| 1195 | denali->devs_per_cs); |
| 1196 | return -EINVAL; |
| 1197 | } |
| 1198 | |
| 1199 | /* 2 chips in parallel */ |
| 1200 | mtd->size <<= 1; |
| 1201 | mtd->erasesize <<= 1; |
| 1202 | mtd->writesize <<= 1; |
| 1203 | mtd->oobsize <<= 1; |
| 1204 | chip->chipsize <<= 1; |
| 1205 | chip->page_shift += 1; |
| 1206 | chip->phys_erase_shift += 1; |
| 1207 | chip->bbt_erase_shift += 1; |
| 1208 | chip->chip_shift += 1; |
| 1209 | chip->pagemask <<= 1; |
| 1210 | chip->ecc.size <<= 1; |
| 1211 | chip->ecc.bytes <<= 1; |
| 1212 | chip->ecc.strength <<= 1; |
| 1213 | denali->oob_skip_bytes <<= 1; |
| 1214 | |
| 1215 | return 0; |
| 1216 | } |
| 1217 | |
Masahiro Yamada | 1d9654d | 2017-08-26 01:12:31 +0900 | [diff] [blame] | 1218 | int denali_init(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1219 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1220 | struct nand_chip *chip = &denali->nand; |
| 1221 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1222 | u32 features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1223 | int ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1224 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1225 | denali_hw_init(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1226 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1227 | denali_clear_irq_all(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1228 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1229 | denali_reset_banks(denali); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1230 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1231 | denali->active_bank = DENALI_INVALID_BANK; |
| 1232 | |
| 1233 | chip->flash_node = dev_of_offset(denali->dev); |
| 1234 | /* Fallback to the default name if DT did not give "label" property */ |
| 1235 | if (!mtd->name) |
| 1236 | mtd->name = "denali-nand"; |
| 1237 | |
| 1238 | chip->select_chip = denali_select_chip; |
| 1239 | chip->read_byte = denali_read_byte; |
| 1240 | chip->write_byte = denali_write_byte; |
| 1241 | chip->read_word = denali_read_word; |
| 1242 | chip->cmd_ctrl = denali_cmd_ctrl; |
| 1243 | chip->dev_ready = denali_dev_ready; |
| 1244 | chip->waitfunc = denali_waitfunc; |
| 1245 | |
| 1246 | if (features & FEATURES__INDEX_ADDR) { |
| 1247 | denali->host_read = denali_indexed_read; |
| 1248 | denali->host_write = denali_indexed_write; |
| 1249 | } else { |
| 1250 | denali->host_read = denali_direct_read; |
| 1251 | denali->host_write = denali_direct_write; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1252 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1253 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1254 | /* clk rate info is needed for setup_data_interface */ |
| 1255 | if (denali->clk_x_rate) |
| 1256 | chip->setup_data_interface = denali_setup_data_interface; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1257 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1258 | ret = nand_scan_ident(mtd, denali->max_banks, NULL); |
| 1259 | if (ret) |
| 1260 | return ret; |
| 1261 | |
| 1262 | if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) |
| 1263 | denali->dma_avail = 1; |
| 1264 | |
| 1265 | if (denali->dma_avail) { |
Masahiro Yamada | 4a610fa | 2018-07-19 10:13:23 +0900 | [diff] [blame] | 1266 | chip->buf_align = ARCH_DMA_MINALIGN; |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1267 | if (denali->caps & DENALI_CAP_DMA_64BIT) |
| 1268 | denali->setup_dma = denali_setup_dma64; |
| 1269 | else |
| 1270 | denali->setup_dma = denali_setup_dma32; |
| 1271 | } else { |
| 1272 | chip->buf_align = 4; |
| 1273 | } |
| 1274 | |
| 1275 | chip->options |= NAND_USE_BOUNCE_BUFFER; |
| 1276 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
| 1277 | chip->bbt_options |= NAND_BBT_NO_OOB; |
| 1278 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1279 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1280 | /* no subpage writes on denali */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1281 | chip->options |= NAND_NO_SUBPAGE_WRITE; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1282 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1283 | ret = denali_ecc_setup(mtd, chip, denali); |
| 1284 | if (ret) { |
| 1285 | dev_err(denali->dev, "Failed to setup ECC settings.\n"); |
| 1286 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1287 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1288 | |
| 1289 | dev_dbg(denali->dev, |
| 1290 | "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", |
| 1291 | chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); |
| 1292 | |
| 1293 | iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | |
| 1294 | FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), |
| 1295 | denali->reg + ECC_CORRECTION); |
| 1296 | iowrite32(mtd->erasesize / mtd->writesize, |
| 1297 | denali->reg + PAGES_PER_BLOCK); |
| 1298 | iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, |
| 1299 | denali->reg + DEVICE_WIDTH); |
| 1300 | iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, |
| 1301 | denali->reg + TWO_ROW_ADDR_CYCLES); |
| 1302 | iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); |
| 1303 | iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); |
| 1304 | |
| 1305 | iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); |
| 1306 | iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); |
| 1307 | /* chip->ecc.steps is set by nand_scan_tail(); not available here */ |
| 1308 | iowrite32(mtd->writesize / chip->ecc.size, |
| 1309 | denali->reg + CFG_NUM_DATA_BLOCKS); |
| 1310 | |
| 1311 | mtd_set_ooblayout(mtd, &denali_ooblayout_ops); |
| 1312 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1313 | nand_oob.eccbytes = denali->nand.ecc.bytes; |
| 1314 | denali->nand.ecc.layout = &nand_oob; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1315 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1316 | if (chip->options & NAND_BUSWIDTH_16) { |
| 1317 | chip->read_buf = denali_read_buf16; |
| 1318 | chip->write_buf = denali_write_buf16; |
| 1319 | } else { |
| 1320 | chip->read_buf = denali_read_buf; |
| 1321 | chip->write_buf = denali_write_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1322 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1323 | chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; |
| 1324 | chip->ecc.read_page = denali_read_page; |
| 1325 | chip->ecc.read_page_raw = denali_read_page_raw; |
| 1326 | chip->ecc.write_page = denali_write_page; |
| 1327 | chip->ecc.write_page_raw = denali_write_page_raw; |
| 1328 | chip->ecc.read_oob = denali_read_oob; |
| 1329 | chip->ecc.write_oob = denali_write_oob; |
| 1330 | chip->erase = denali_erase; |
| 1331 | |
| 1332 | ret = denali_multidev_fixup(denali); |
| 1333 | if (ret) |
| 1334 | return ret; |
| 1335 | |
| 1336 | /* |
| 1337 | * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not |
| 1338 | * use devm_kmalloc() because the memory allocated by devm_ does not |
| 1339 | * guarantee DMA-safe alignment. |
| 1340 | */ |
| 1341 | denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); |
| 1342 | if (!denali->buf) |
| 1343 | return -ENOMEM; |
| 1344 | |
| 1345 | ret = nand_scan_tail(mtd); |
| 1346 | if (ret) |
| 1347 | goto free_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1348 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 1349 | ret = nand_register(0, mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1350 | if (ret) { |
| 1351 | dev_err(denali->dev, "Failed to register MTD: %d\n", ret); |
| 1352 | goto free_buf; |
| 1353 | } |
| 1354 | return 0; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1355 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1356 | free_buf: |
| 1357 | kfree(denali->buf); |
| 1358 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1359 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1360 | } |