Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2014 Panasonic Corporation |
| 4 | * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> |
| 5 | * Copyright (C) 2009-2010, Intel Corporation and its suppliers. |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 6 | */ |
| 7 | |
Simon Glass | 90526e9 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 8 | #include <common.h> |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 9 | #include <dm.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 10 | #include <malloc.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 11 | #include <nand.h> |
Simon Glass | 90526e9 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 12 | #include <asm/cache.h> |
| 13 | #include <asm/dma-mapping.h> |
Simon Glass | 336d461 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 14 | #include <dm/device_compat.h> |
Simon Glass | 61b29b8 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 15 | #include <dm/devres.h> |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 16 | #include <linux/bitfield.h> |
Simon Glass | c3dc39a | 2020-05-10 11:39:55 -0600 | [diff] [blame] | 17 | #include <linux/delay.h> |
| 18 | #include <linux/dma-direction.h> |
Masahiro Yamada | 9d86b89 | 2020-02-14 16:40:19 +0900 | [diff] [blame] | 19 | #include <linux/dma-mapping.h> |
Simon Glass | 61b29b8 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 20 | #include <linux/err.h> |
Masahiro Yamada | 1221ce4 | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 21 | #include <linux/errno.h> |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 22 | #include <linux/io.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 23 | #include <linux/mtd/mtd.h> |
| 24 | #include <linux/mtd/rawnand.h> |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 25 | |
| 26 | #include "denali.h" |
| 27 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 28 | #define DENALI_NAND_NAME "denali-nand" |
| 29 | |
| 30 | /* for Indexed Addressing */ |
| 31 | #define DENALI_INDEXED_CTRL 0x00 |
| 32 | #define DENALI_INDEXED_DATA 0x10 |
| 33 | |
| 34 | #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ |
| 35 | #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ |
| 36 | #define DENALI_MAP10 (2 << 26) /* high-level control plane */ |
| 37 | #define DENALI_MAP11 (3 << 26) /* direct controller access */ |
| 38 | |
| 39 | /* MAP11 access cycle type */ |
| 40 | #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ |
| 41 | #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ |
| 42 | #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ |
| 43 | |
| 44 | /* MAP10 commands */ |
| 45 | #define DENALI_ERASE 0x01 |
| 46 | |
| 47 | #define DENALI_BANK(denali) ((denali)->active_bank << 24) |
| 48 | |
| 49 | #define DENALI_INVALID_BANK -1 |
| 50 | #define DENALI_NR_BANKS 4 |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 51 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 52 | static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) |
| 53 | { |
| 54 | return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); |
| 55 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 56 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 57 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 58 | * Direct Addressing - the slave address forms the control information (command |
| 59 | * type, bank, block, and page address). The slave data is the actual data to |
| 60 | * be transferred. This mode requires 28 bits of address region allocated. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 61 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 62 | static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 63 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 64 | return ioread32(denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 65 | } |
| 66 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 67 | static void denali_direct_write(struct denali_nand_info *denali, u32 addr, |
| 68 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 69 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 70 | iowrite32(data, denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 74 | * Indexed Addressing - address translation module intervenes in passing the |
| 75 | * control information. This mode reduces the required address range. The |
| 76 | * control information and transferred data are latched by the registers in |
| 77 | * the translation module. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 78 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 79 | static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 80 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 81 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 82 | return ioread32(denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 83 | } |
| 84 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 85 | static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, |
| 86 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 87 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 88 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 89 | iowrite32(data, denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | /* |
| 93 | * Use the configuration feature register to determine the maximum number of |
| 94 | * banks that the hardware supports. |
| 95 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 96 | static void denali_detect_max_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 97 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 98 | uint32_t features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 99 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 100 | denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 101 | |
| 102 | /* the encoding changed from rev 5.0 to 5.1 */ |
| 103 | if (denali->revision < 0x0501) |
| 104 | denali->max_banks <<= 1; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 105 | } |
| 106 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 107 | static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 108 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 109 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 110 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 111 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 112 | iowrite32(U32_MAX, denali->reg + INTR_EN(i)); |
| 113 | iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 114 | } |
| 115 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 116 | static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 117 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 118 | int i; |
| 119 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 120 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 121 | iowrite32(0, denali->reg + INTR_EN(i)); |
| 122 | iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 123 | } |
| 124 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 125 | static void denali_clear_irq(struct denali_nand_info *denali, |
| 126 | int bank, uint32_t irq_status) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 127 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 128 | /* write one to clear bits */ |
| 129 | iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 130 | } |
| 131 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 132 | static void denali_clear_irq_all(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 133 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 134 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 135 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 136 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 137 | denali_clear_irq(denali, i, U32_MAX); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 138 | } |
| 139 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 140 | static void __denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 141 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 142 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 143 | int i; |
| 144 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 145 | for (i = 0; i < DENALI_NR_BANKS; i++) { |
| 146 | irq_status = ioread32(denali->reg + INTR_STATUS(i)); |
| 147 | denali_clear_irq(denali, i, irq_status); |
| 148 | |
| 149 | if (i != denali->active_bank) |
| 150 | continue; |
| 151 | |
| 152 | denali->irq_status |= irq_status; |
| 153 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 154 | } |
| 155 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 156 | static void denali_reset_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 157 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 158 | denali->irq_status = 0; |
| 159 | denali->irq_mask = 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 160 | } |
| 161 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 162 | static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, |
| 163 | uint32_t irq_mask) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 164 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 165 | unsigned long time_left = 1000000; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 166 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 167 | while (time_left) { |
| 168 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 169 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 170 | if (irq_mask & denali->irq_status) |
| 171 | return denali->irq_status; |
| 172 | udelay(1); |
| 173 | time_left--; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 174 | } |
| 175 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 176 | if (!time_left) { |
| 177 | dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", |
| 178 | irq_mask); |
| 179 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 180 | } |
| 181 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 182 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 183 | } |
| 184 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 185 | static uint32_t denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 186 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 187 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 188 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 189 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
| 193 | { |
| 194 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 195 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 196 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 197 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 198 | for (i = 0; i < len; i++) |
| 199 | buf[i] = denali->host_read(denali, addr); |
| 200 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 201 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 202 | static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
| 203 | { |
| 204 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 205 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 206 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 207 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 208 | for (i = 0; i < len; i++) |
| 209 | denali->host_write(denali, addr, buf[i]); |
| 210 | } |
| 211 | |
| 212 | static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) |
| 213 | { |
| 214 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 215 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 216 | uint16_t *buf16 = (uint16_t *)buf; |
| 217 | int i; |
| 218 | |
| 219 | for (i = 0; i < len / 2; i++) |
| 220 | buf16[i] = denali->host_read(denali, addr); |
| 221 | } |
| 222 | |
| 223 | static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, |
| 224 | int len) |
| 225 | { |
| 226 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 227 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 228 | const uint16_t *buf16 = (const uint16_t *)buf; |
| 229 | int i; |
| 230 | |
| 231 | for (i = 0; i < len / 2; i++) |
| 232 | denali->host_write(denali, addr, buf16[i]); |
| 233 | } |
| 234 | |
| 235 | static uint8_t denali_read_byte(struct mtd_info *mtd) |
| 236 | { |
| 237 | uint8_t byte; |
| 238 | |
| 239 | denali_read_buf(mtd, &byte, 1); |
| 240 | |
| 241 | return byte; |
| 242 | } |
| 243 | |
| 244 | static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) |
| 245 | { |
| 246 | denali_write_buf(mtd, &byte, 1); |
| 247 | } |
| 248 | |
| 249 | static uint16_t denali_read_word(struct mtd_info *mtd) |
| 250 | { |
| 251 | uint16_t word; |
| 252 | |
| 253 | denali_read_buf16(mtd, (uint8_t *)&word, 2); |
| 254 | |
| 255 | return word; |
| 256 | } |
| 257 | |
| 258 | static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) |
| 259 | { |
| 260 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 261 | uint32_t type; |
| 262 | |
| 263 | if (ctrl & NAND_CLE) |
| 264 | type = DENALI_MAP11_CMD; |
| 265 | else if (ctrl & NAND_ALE) |
| 266 | type = DENALI_MAP11_ADDR; |
| 267 | else |
| 268 | return; |
| 269 | |
| 270 | /* |
| 271 | * Some commands are followed by chip->dev_ready or chip->waitfunc. |
| 272 | * irq_status must be cleared here to catch the R/B# interrupt later. |
| 273 | */ |
| 274 | if (ctrl & NAND_CTRL_CHANGE) |
| 275 | denali_reset_irq(denali); |
| 276 | |
| 277 | denali->host_write(denali, DENALI_BANK(denali) | type, dat); |
| 278 | } |
| 279 | |
| 280 | static int denali_dev_ready(struct mtd_info *mtd) |
| 281 | { |
| 282 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 283 | |
| 284 | return !!(denali_check_irq(denali) & INTR__INT_ACT); |
| 285 | } |
| 286 | |
| 287 | static int denali_check_erased_page(struct mtd_info *mtd, |
| 288 | struct nand_chip *chip, uint8_t *buf, |
| 289 | unsigned long uncor_ecc_flags, |
| 290 | unsigned int max_bitflips) |
| 291 | { |
| 292 | uint8_t *ecc_code = chip->buffers->ecccode; |
| 293 | int ecc_steps = chip->ecc.steps; |
| 294 | int ecc_size = chip->ecc.size; |
| 295 | int ecc_bytes = chip->ecc.bytes; |
| 296 | int i, ret, stat; |
| 297 | |
| 298 | ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, |
| 299 | chip->ecc.total); |
| 300 | if (ret) |
| 301 | return ret; |
| 302 | |
| 303 | for (i = 0; i < ecc_steps; i++) { |
| 304 | if (!(uncor_ecc_flags & BIT(i))) |
| 305 | continue; |
| 306 | |
| 307 | stat = nand_check_erased_ecc_chunk(buf, ecc_size, |
| 308 | ecc_code, ecc_bytes, |
| 309 | NULL, 0, |
| 310 | chip->ecc.strength); |
| 311 | if (stat < 0) { |
| 312 | mtd->ecc_stats.failed++; |
| 313 | } else { |
| 314 | mtd->ecc_stats.corrected += stat; |
| 315 | max_bitflips = max_t(unsigned int, max_bitflips, stat); |
| 316 | } |
| 317 | |
| 318 | buf += ecc_size; |
| 319 | ecc_code += ecc_bytes; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 320 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 321 | |
| 322 | return max_bitflips; |
| 323 | } |
| 324 | |
| 325 | static int denali_hw_ecc_fixup(struct mtd_info *mtd, |
| 326 | struct denali_nand_info *denali, |
| 327 | unsigned long *uncor_ecc_flags) |
| 328 | { |
| 329 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 330 | int bank = denali->active_bank; |
| 331 | uint32_t ecc_cor; |
| 332 | unsigned int max_bitflips; |
| 333 | |
| 334 | ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); |
| 335 | ecc_cor >>= ECC_COR_INFO__SHIFT(bank); |
| 336 | |
| 337 | if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { |
| 338 | /* |
| 339 | * This flag is set when uncorrectable error occurs at least in |
| 340 | * one ECC sector. We can not know "how many sectors", or |
| 341 | * "which sector(s)". We need erase-page check for all sectors. |
| 342 | */ |
| 343 | *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); |
| 348 | |
| 349 | /* |
| 350 | * The register holds the maximum of per-sector corrected bitflips. |
| 351 | * This is suitable for the return value of the ->read_page() callback. |
| 352 | * Unfortunately, we can not know the total number of corrected bits in |
| 353 | * the page. Increase the stats by max_bitflips. (compromised solution) |
| 354 | */ |
| 355 | mtd->ecc_stats.corrected += max_bitflips; |
| 356 | |
| 357 | return max_bitflips; |
| 358 | } |
| 359 | |
| 360 | static int denali_sw_ecc_fixup(struct mtd_info *mtd, |
| 361 | struct denali_nand_info *denali, |
| 362 | unsigned long *uncor_ecc_flags, uint8_t *buf) |
| 363 | { |
| 364 | unsigned int ecc_size = denali->nand.ecc.size; |
| 365 | unsigned int bitflips = 0; |
| 366 | unsigned int max_bitflips = 0; |
| 367 | uint32_t err_addr, err_cor_info; |
| 368 | unsigned int err_byte, err_sector, err_device; |
| 369 | uint8_t err_cor_value; |
| 370 | unsigned int prev_sector = 0; |
| 371 | uint32_t irq_status; |
| 372 | |
| 373 | denali_reset_irq(denali); |
| 374 | |
| 375 | do { |
| 376 | err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); |
| 377 | err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); |
| 378 | err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); |
| 379 | |
| 380 | err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); |
| 381 | err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, |
| 382 | err_cor_info); |
| 383 | err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, |
| 384 | err_cor_info); |
| 385 | |
| 386 | /* reset the bitflip counter when crossing ECC sector */ |
| 387 | if (err_sector != prev_sector) |
| 388 | bitflips = 0; |
| 389 | |
| 390 | if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { |
| 391 | /* |
| 392 | * Check later if this is a real ECC error, or |
| 393 | * an erased sector. |
| 394 | */ |
| 395 | *uncor_ecc_flags |= BIT(err_sector); |
| 396 | } else if (err_byte < ecc_size) { |
| 397 | /* |
| 398 | * If err_byte is larger than ecc_size, means error |
| 399 | * happened in OOB, so we ignore it. It's no need for |
| 400 | * us to correct it err_device is represented the NAND |
| 401 | * error bits are happened in if there are more than |
| 402 | * one NAND connected. |
| 403 | */ |
| 404 | int offset; |
| 405 | unsigned int flips_in_byte; |
| 406 | |
| 407 | offset = (err_sector * ecc_size + err_byte) * |
| 408 | denali->devs_per_cs + err_device; |
| 409 | |
| 410 | /* correct the ECC error */ |
| 411 | flips_in_byte = hweight8(buf[offset] ^ err_cor_value); |
| 412 | buf[offset] ^= err_cor_value; |
| 413 | mtd->ecc_stats.corrected += flips_in_byte; |
| 414 | bitflips += flips_in_byte; |
| 415 | |
| 416 | max_bitflips = max(max_bitflips, bitflips); |
| 417 | } |
| 418 | |
| 419 | prev_sector = err_sector; |
| 420 | } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); |
| 421 | |
| 422 | /* |
| 423 | * Once handle all ECC errors, controller will trigger an |
| 424 | * ECC_TRANSACTION_DONE interrupt. |
| 425 | */ |
| 426 | irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); |
| 427 | if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) |
| 428 | return -EIO; |
| 429 | |
| 430 | return max_bitflips; |
| 431 | } |
| 432 | |
| 433 | static void denali_setup_dma64(struct denali_nand_info *denali, |
| 434 | dma_addr_t dma_addr, int page, int write) |
| 435 | { |
| 436 | uint32_t mode; |
| 437 | const int page_count = 1; |
| 438 | |
| 439 | mode = DENALI_MAP10 | DENALI_BANK(denali) | page; |
| 440 | |
| 441 | /* DMA is a three step process */ |
| 442 | |
| 443 | /* |
| 444 | * 1. setup transfer type, interrupt when complete, |
| 445 | * burst len = 64 bytes, the number of pages |
| 446 | */ |
| 447 | denali->host_write(denali, mode, |
| 448 | 0x01002000 | (64 << 16) | (write << 8) | page_count); |
| 449 | |
| 450 | /* 2. set memory low address */ |
| 451 | denali->host_write(denali, mode, lower_32_bits(dma_addr)); |
| 452 | |
| 453 | /* 3. set memory high address */ |
| 454 | denali->host_write(denali, mode, upper_32_bits(dma_addr)); |
| 455 | } |
| 456 | |
| 457 | static void denali_setup_dma32(struct denali_nand_info *denali, |
| 458 | dma_addr_t dma_addr, int page, int write) |
| 459 | { |
| 460 | uint32_t mode; |
| 461 | const int page_count = 1; |
| 462 | |
| 463 | mode = DENALI_MAP10 | DENALI_BANK(denali); |
| 464 | |
| 465 | /* DMA is a four step process */ |
| 466 | |
| 467 | /* 1. setup transfer type and # of pages */ |
| 468 | denali->host_write(denali, mode | page, |
| 469 | 0x2000 | (write << 8) | page_count); |
| 470 | |
| 471 | /* 2. set memory high address bits 23:8 */ |
| 472 | denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); |
| 473 | |
| 474 | /* 3. set memory low address bits 23:8 */ |
| 475 | denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); |
| 476 | |
| 477 | /* 4. interrupt when complete, burst len = 64 bytes */ |
| 478 | denali->host_write(denali, mode | 0x14000, 0x2400); |
| 479 | } |
| 480 | |
| 481 | static int denali_pio_read(struct denali_nand_info *denali, void *buf, |
| 482 | size_t size, int page, int raw) |
| 483 | { |
| 484 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 485 | uint32_t *buf32 = (uint32_t *)buf; |
| 486 | uint32_t irq_status, ecc_err_mask; |
| 487 | int i; |
| 488 | |
| 489 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 490 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 491 | else |
| 492 | ecc_err_mask = INTR__ECC_ERR; |
| 493 | |
| 494 | denali_reset_irq(denali); |
| 495 | |
| 496 | for (i = 0; i < size / 4; i++) |
| 497 | *buf32++ = denali->host_read(denali, addr); |
| 498 | |
| 499 | irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); |
| 500 | if (!(irq_status & INTR__PAGE_XFER_INC)) |
| 501 | return -EIO; |
| 502 | |
| 503 | if (irq_status & INTR__ERASED_PAGE) |
| 504 | memset(buf, 0xff, size); |
| 505 | |
| 506 | return irq_status & ecc_err_mask ? -EBADMSG : 0; |
| 507 | } |
| 508 | |
| 509 | static int denali_pio_write(struct denali_nand_info *denali, |
| 510 | const void *buf, size_t size, int page, int raw) |
| 511 | { |
| 512 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 513 | const uint32_t *buf32 = (uint32_t *)buf; |
| 514 | uint32_t irq_status; |
| 515 | int i; |
| 516 | |
| 517 | denali_reset_irq(denali); |
| 518 | |
| 519 | for (i = 0; i < size / 4; i++) |
| 520 | denali->host_write(denali, addr, *buf32++); |
| 521 | |
| 522 | irq_status = denali_wait_for_irq(denali, |
| 523 | INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); |
| 524 | if (!(irq_status & INTR__PROGRAM_COMP)) |
| 525 | return -EIO; |
| 526 | |
| 527 | return 0; |
| 528 | } |
| 529 | |
| 530 | static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, |
| 531 | size_t size, int page, int raw, int write) |
| 532 | { |
| 533 | if (write) |
| 534 | return denali_pio_write(denali, buf, size, page, raw); |
| 535 | else |
| 536 | return denali_pio_read(denali, buf, size, page, raw); |
| 537 | } |
| 538 | |
| 539 | static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, |
| 540 | size_t size, int page, int raw, int write) |
| 541 | { |
| 542 | dma_addr_t dma_addr; |
| 543 | uint32_t irq_mask, irq_status, ecc_err_mask; |
| 544 | enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
| 545 | int ret = 0; |
| 546 | |
Vignesh Raghavendra | 6fff562 | 2020-01-16 14:23:47 +0530 | [diff] [blame] | 547 | dma_addr = dma_map_single(buf, size, dir); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 548 | if (dma_mapping_error(denali->dev, dma_addr)) { |
| 549 | dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); |
| 550 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 551 | } |
| 552 | |
| 553 | if (write) { |
| 554 | /* |
| 555 | * INTR__PROGRAM_COMP is never asserted for the DMA transfer. |
| 556 | * We can use INTR__DMA_CMD_COMP instead. This flag is asserted |
| 557 | * when the page program is completed. |
| 558 | */ |
| 559 | irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; |
| 560 | ecc_err_mask = 0; |
| 561 | } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { |
| 562 | irq_mask = INTR__DMA_CMD_COMP; |
| 563 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 564 | } else { |
| 565 | irq_mask = INTR__DMA_CMD_COMP; |
| 566 | ecc_err_mask = INTR__ECC_ERR; |
| 567 | } |
| 568 | |
| 569 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
Masahiro Yamada | 9d43649 | 2018-12-19 20:03:19 +0900 | [diff] [blame] | 570 | /* |
| 571 | * The ->setup_dma() hook kicks DMA by using the data/command |
| 572 | * interface, which belongs to a different AXI port from the |
| 573 | * register interface. Read back the register to avoid a race. |
| 574 | */ |
| 575 | ioread32(denali->reg + DMA_ENABLE); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 576 | |
| 577 | denali_reset_irq(denali); |
| 578 | denali->setup_dma(denali, dma_addr, page, write); |
| 579 | |
| 580 | irq_status = denali_wait_for_irq(denali, irq_mask); |
| 581 | if (!(irq_status & INTR__DMA_CMD_COMP)) |
| 582 | ret = -EIO; |
| 583 | else if (irq_status & ecc_err_mask) |
| 584 | ret = -EBADMSG; |
| 585 | |
| 586 | iowrite32(0, denali->reg + DMA_ENABLE); |
| 587 | |
Masahiro Yamada | 950c596 | 2020-02-14 16:40:18 +0900 | [diff] [blame] | 588 | dma_unmap_single(dma_addr, size, dir); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 589 | |
| 590 | if (irq_status & INTR__ERASED_PAGE) |
| 591 | memset(buf, 0xff, size); |
| 592 | |
| 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | static int denali_data_xfer(struct denali_nand_info *denali, void *buf, |
| 597 | size_t size, int page, int raw, int write) |
| 598 | { |
| 599 | iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); |
| 600 | iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, |
| 601 | denali->reg + TRANSFER_SPARE_REG); |
| 602 | |
| 603 | if (denali->dma_avail) |
| 604 | return denali_dma_xfer(denali, buf, size, page, raw, write); |
| 605 | else |
| 606 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 607 | } |
| 608 | |
| 609 | static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, |
| 610 | int page, int write) |
| 611 | { |
| 612 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 613 | unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; |
| 614 | unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; |
| 615 | int writesize = mtd->writesize; |
| 616 | int oobsize = mtd->oobsize; |
| 617 | uint8_t *bufpoi = chip->oob_poi; |
| 618 | int ecc_steps = chip->ecc.steps; |
| 619 | int ecc_size = chip->ecc.size; |
| 620 | int ecc_bytes = chip->ecc.bytes; |
| 621 | int oob_skip = denali->oob_skip_bytes; |
| 622 | size_t size = writesize + oobsize; |
| 623 | int i, pos, len; |
| 624 | |
| 625 | /* BBM at the beginning of the OOB area */ |
| 626 | chip->cmdfunc(mtd, start_cmd, writesize, page); |
| 627 | if (write) |
| 628 | chip->write_buf(mtd, bufpoi, oob_skip); |
| 629 | else |
| 630 | chip->read_buf(mtd, bufpoi, oob_skip); |
| 631 | bufpoi += oob_skip; |
| 632 | |
| 633 | /* OOB ECC */ |
| 634 | for (i = 0; i < ecc_steps; i++) { |
| 635 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 636 | len = ecc_bytes; |
| 637 | |
| 638 | if (pos >= writesize) |
| 639 | pos += oob_skip; |
| 640 | else if (pos + len > writesize) |
| 641 | len = writesize - pos; |
| 642 | |
| 643 | chip->cmdfunc(mtd, rnd_cmd, pos, -1); |
| 644 | if (write) |
| 645 | chip->write_buf(mtd, bufpoi, len); |
| 646 | else |
| 647 | chip->read_buf(mtd, bufpoi, len); |
| 648 | bufpoi += len; |
| 649 | if (len < ecc_bytes) { |
| 650 | len = ecc_bytes - len; |
| 651 | chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); |
| 652 | if (write) |
| 653 | chip->write_buf(mtd, bufpoi, len); |
| 654 | else |
| 655 | chip->read_buf(mtd, bufpoi, len); |
| 656 | bufpoi += len; |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | /* OOB free */ |
| 661 | len = oobsize - (bufpoi - chip->oob_poi); |
| 662 | chip->cmdfunc(mtd, rnd_cmd, size - len, -1); |
| 663 | if (write) |
| 664 | chip->write_buf(mtd, bufpoi, len); |
| 665 | else |
| 666 | chip->read_buf(mtd, bufpoi, len); |
| 667 | } |
| 668 | |
| 669 | static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 670 | uint8_t *buf, int oob_required, int page) |
| 671 | { |
| 672 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 673 | int writesize = mtd->writesize; |
| 674 | int oobsize = mtd->oobsize; |
| 675 | int ecc_steps = chip->ecc.steps; |
| 676 | int ecc_size = chip->ecc.size; |
| 677 | int ecc_bytes = chip->ecc.bytes; |
| 678 | void *tmp_buf = denali->buf; |
| 679 | int oob_skip = denali->oob_skip_bytes; |
| 680 | size_t size = writesize + oobsize; |
| 681 | int ret, i, pos, len; |
| 682 | |
| 683 | ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); |
| 684 | if (ret) |
| 685 | return ret; |
| 686 | |
| 687 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 688 | if (buf) { |
| 689 | for (i = 0; i < ecc_steps; i++) { |
| 690 | pos = i * (ecc_size + ecc_bytes); |
| 691 | len = ecc_size; |
| 692 | |
| 693 | if (pos >= writesize) |
| 694 | pos += oob_skip; |
| 695 | else if (pos + len > writesize) |
| 696 | len = writesize - pos; |
| 697 | |
| 698 | memcpy(buf, tmp_buf + pos, len); |
| 699 | buf += len; |
| 700 | if (len < ecc_size) { |
| 701 | len = ecc_size - len; |
| 702 | memcpy(buf, tmp_buf + writesize + oob_skip, |
| 703 | len); |
| 704 | buf += len; |
| 705 | } |
| 706 | } |
| 707 | } |
| 708 | |
| 709 | if (oob_required) { |
| 710 | uint8_t *oob = chip->oob_poi; |
| 711 | |
| 712 | /* BBM at the beginning of the OOB area */ |
| 713 | memcpy(oob, tmp_buf + writesize, oob_skip); |
| 714 | oob += oob_skip; |
| 715 | |
| 716 | /* OOB ECC */ |
| 717 | for (i = 0; i < ecc_steps; i++) { |
| 718 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 719 | len = ecc_bytes; |
| 720 | |
| 721 | if (pos >= writesize) |
| 722 | pos += oob_skip; |
| 723 | else if (pos + len > writesize) |
| 724 | len = writesize - pos; |
| 725 | |
| 726 | memcpy(oob, tmp_buf + pos, len); |
| 727 | oob += len; |
| 728 | if (len < ecc_bytes) { |
| 729 | len = ecc_bytes - len; |
| 730 | memcpy(oob, tmp_buf + writesize + oob_skip, |
| 731 | len); |
| 732 | oob += len; |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | /* OOB free */ |
| 737 | len = oobsize - (oob - chip->oob_poi); |
| 738 | memcpy(oob, tmp_buf + size - len, len); |
| 739 | } |
| 740 | |
| 741 | return 0; |
| 742 | } |
| 743 | |
| 744 | static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 745 | int page) |
| 746 | { |
| 747 | denali_oob_xfer(mtd, chip, page, 0); |
| 748 | |
| 749 | return 0; |
| 750 | } |
| 751 | |
| 752 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 753 | int page) |
| 754 | { |
| 755 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 756 | int status; |
| 757 | |
| 758 | denali_reset_irq(denali); |
| 759 | |
| 760 | denali_oob_xfer(mtd, chip, page, 1); |
| 761 | |
| 762 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); |
| 763 | status = chip->waitfunc(mtd, chip); |
| 764 | |
| 765 | return status & NAND_STATUS_FAIL ? -EIO : 0; |
| 766 | } |
| 767 | |
| 768 | static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 769 | uint8_t *buf, int oob_required, int page) |
| 770 | { |
| 771 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 772 | unsigned long uncor_ecc_flags = 0; |
| 773 | int stat = 0; |
| 774 | int ret; |
| 775 | |
| 776 | ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); |
| 777 | if (ret && ret != -EBADMSG) |
| 778 | return ret; |
| 779 | |
| 780 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 781 | stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); |
| 782 | else if (ret == -EBADMSG) |
| 783 | stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); |
| 784 | |
| 785 | if (stat < 0) |
| 786 | return stat; |
| 787 | |
| 788 | if (uncor_ecc_flags) { |
| 789 | ret = denali_read_oob(mtd, chip, page); |
| 790 | if (ret) |
| 791 | return ret; |
| 792 | |
| 793 | stat = denali_check_erased_page(mtd, chip, buf, |
| 794 | uncor_ecc_flags, stat); |
| 795 | } |
| 796 | |
| 797 | return stat; |
| 798 | } |
| 799 | |
| 800 | static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 801 | const uint8_t *buf, int oob_required, int page) |
| 802 | { |
| 803 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 804 | int writesize = mtd->writesize; |
| 805 | int oobsize = mtd->oobsize; |
| 806 | int ecc_steps = chip->ecc.steps; |
| 807 | int ecc_size = chip->ecc.size; |
| 808 | int ecc_bytes = chip->ecc.bytes; |
| 809 | void *tmp_buf = denali->buf; |
| 810 | int oob_skip = denali->oob_skip_bytes; |
| 811 | size_t size = writesize + oobsize; |
| 812 | int i, pos, len; |
| 813 | |
| 814 | /* |
| 815 | * Fill the buffer with 0xff first except the full page transfer. |
| 816 | * This simplifies the logic. |
| 817 | */ |
| 818 | if (!buf || !oob_required) |
| 819 | memset(tmp_buf, 0xff, size); |
| 820 | |
| 821 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 822 | if (buf) { |
| 823 | for (i = 0; i < ecc_steps; i++) { |
| 824 | pos = i * (ecc_size + ecc_bytes); |
| 825 | len = ecc_size; |
| 826 | |
| 827 | if (pos >= writesize) |
| 828 | pos += oob_skip; |
| 829 | else if (pos + len > writesize) |
| 830 | len = writesize - pos; |
| 831 | |
| 832 | memcpy(tmp_buf + pos, buf, len); |
| 833 | buf += len; |
| 834 | if (len < ecc_size) { |
| 835 | len = ecc_size - len; |
| 836 | memcpy(tmp_buf + writesize + oob_skip, buf, |
| 837 | len); |
| 838 | buf += len; |
| 839 | } |
| 840 | } |
| 841 | } |
| 842 | |
| 843 | if (oob_required) { |
| 844 | const uint8_t *oob = chip->oob_poi; |
| 845 | |
| 846 | /* BBM at the beginning of the OOB area */ |
| 847 | memcpy(tmp_buf + writesize, oob, oob_skip); |
| 848 | oob += oob_skip; |
| 849 | |
| 850 | /* OOB ECC */ |
| 851 | for (i = 0; i < ecc_steps; i++) { |
| 852 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 853 | len = ecc_bytes; |
| 854 | |
| 855 | if (pos >= writesize) |
| 856 | pos += oob_skip; |
| 857 | else if (pos + len > writesize) |
| 858 | len = writesize - pos; |
| 859 | |
| 860 | memcpy(tmp_buf + pos, oob, len); |
| 861 | oob += len; |
| 862 | if (len < ecc_bytes) { |
| 863 | len = ecc_bytes - len; |
| 864 | memcpy(tmp_buf + writesize + oob_skip, oob, |
| 865 | len); |
| 866 | oob += len; |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | /* OOB free */ |
| 871 | len = oobsize - (oob - chip->oob_poi); |
| 872 | memcpy(tmp_buf + size - len, oob, len); |
| 873 | } |
| 874 | |
| 875 | return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); |
| 876 | } |
| 877 | |
| 878 | static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 879 | const uint8_t *buf, int oob_required, int page) |
| 880 | { |
| 881 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 882 | |
| 883 | return denali_data_xfer(denali, (void *)buf, mtd->writesize, |
| 884 | page, 0, 1); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 885 | } |
| 886 | |
| 887 | static void denali_select_chip(struct mtd_info *mtd, int chip) |
| 888 | { |
| 889 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 890 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 891 | denali->active_bank = chip; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 892 | } |
| 893 | |
| 894 | static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) |
| 895 | { |
| 896 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 897 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 898 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 899 | /* R/B# pin transitioned from low to high? */ |
| 900 | irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 901 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 902 | return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 903 | } |
| 904 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 905 | static int denali_erase(struct mtd_info *mtd, int page) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 906 | { |
| 907 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 908 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 909 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 910 | denali_reset_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 911 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 912 | denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, |
| 913 | DENALI_ERASE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 914 | |
| 915 | /* wait for erase to complete or failure to occur */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 916 | irq_status = denali_wait_for_irq(denali, |
| 917 | INTR__ERASE_COMP | INTR__ERASE_FAIL); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 918 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 919 | return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; |
| 920 | } |
| 921 | |
Masahiro Yamada | 1a7e176 | 2017-11-29 19:18:18 +0900 | [diff] [blame] | 922 | static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 923 | const struct nand_data_interface *conf) |
| 924 | { |
| 925 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 926 | const struct nand_sdr_timings *timings; |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 927 | unsigned long t_x, mult_x; |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 928 | int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; |
| 929 | int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; |
| 930 | int addr_2_data_mask; |
| 931 | uint32_t tmp; |
| 932 | |
| 933 | timings = nand_get_sdr_timings(conf); |
| 934 | if (IS_ERR(timings)) |
| 935 | return PTR_ERR(timings); |
| 936 | |
| 937 | /* clk_x period in picoseconds */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 938 | t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); |
| 939 | if (!t_x) |
| 940 | return -EINVAL; |
| 941 | |
| 942 | /* |
| 943 | * The bus interface clock, clk_x, is phase aligned with the core clock. |
| 944 | * The clk_x is an integral multiple N of the core clk. The value N is |
| 945 | * configured at IP delivery time, and its available value is 4, 5, 6. |
| 946 | */ |
| 947 | mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); |
| 948 | if (mult_x < 4 || mult_x > 6) |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 949 | return -EINVAL; |
| 950 | |
| 951 | if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) |
| 952 | return 0; |
| 953 | |
| 954 | /* tREA -> ACC_CLKS */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 955 | acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 956 | acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); |
| 957 | |
| 958 | tmp = ioread32(denali->reg + ACC_CLKS); |
| 959 | tmp &= ~ACC_CLKS__VALUE; |
| 960 | tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); |
| 961 | iowrite32(tmp, denali->reg + ACC_CLKS); |
| 962 | |
| 963 | /* tRWH -> RE_2_WE */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 964 | re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 965 | re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); |
| 966 | |
| 967 | tmp = ioread32(denali->reg + RE_2_WE); |
| 968 | tmp &= ~RE_2_WE__VALUE; |
| 969 | tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); |
| 970 | iowrite32(tmp, denali->reg + RE_2_WE); |
| 971 | |
| 972 | /* tRHZ -> RE_2_RE */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 973 | re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 974 | re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); |
| 975 | |
| 976 | tmp = ioread32(denali->reg + RE_2_RE); |
| 977 | tmp &= ~RE_2_RE__VALUE; |
| 978 | tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); |
| 979 | iowrite32(tmp, denali->reg + RE_2_RE); |
| 980 | |
| 981 | /* |
| 982 | * tCCS, tWHR -> WE_2_RE |
| 983 | * |
| 984 | * With WE_2_RE properly set, the Denali controller automatically takes |
| 985 | * care of the delay; the driver need not set NAND_WAIT_TCCS. |
| 986 | */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 987 | we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 988 | we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); |
| 989 | |
| 990 | tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); |
| 991 | tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; |
| 992 | tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); |
| 993 | iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); |
| 994 | |
| 995 | /* tADL -> ADDR_2_DATA */ |
| 996 | |
| 997 | /* for older versions, ADDR_2_DATA is only 6 bit wide */ |
| 998 | addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 999 | if (denali->revision < 0x0501) |
| 1000 | addr_2_data_mask >>= 1; |
| 1001 | |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1002 | addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1003 | addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); |
| 1004 | |
| 1005 | tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1006 | tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 1007 | tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); |
| 1008 | iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1009 | |
| 1010 | /* tREH, tWH -> RDWR_EN_HI_CNT */ |
| 1011 | rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1012 | t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1013 | rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); |
| 1014 | |
| 1015 | tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); |
| 1016 | tmp &= ~RDWR_EN_HI_CNT__VALUE; |
| 1017 | tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); |
| 1018 | iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); |
| 1019 | |
| 1020 | /* tRP, tWP -> RDWR_EN_LO_CNT */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1021 | rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1022 | rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1023 | t_x); |
| 1024 | rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1025 | rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); |
| 1026 | rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); |
| 1027 | |
| 1028 | tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); |
| 1029 | tmp &= ~RDWR_EN_LO_CNT__VALUE; |
| 1030 | tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); |
| 1031 | iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); |
| 1032 | |
| 1033 | /* tCS, tCEA -> CS_SETUP_CNT */ |
Masahiro Yamada | 8ccfbfb | 2018-12-19 20:03:18 +0900 | [diff] [blame] | 1034 | cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, |
| 1035 | (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1036 | 0); |
| 1037 | cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); |
| 1038 | |
| 1039 | tmp = ioread32(denali->reg + CS_SETUP_CNT); |
| 1040 | tmp &= ~CS_SETUP_CNT__VALUE; |
| 1041 | tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); |
| 1042 | iowrite32(tmp, denali->reg + CS_SETUP_CNT); |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1043 | |
| 1044 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1045 | } |
| 1046 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1047 | static void denali_reset_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1048 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1049 | u32 irq_status; |
| 1050 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1051 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1052 | for (i = 0; i < denali->max_banks; i++) { |
| 1053 | denali->active_bank = i; |
| 1054 | |
| 1055 | denali_reset_irq(denali); |
| 1056 | |
| 1057 | iowrite32(DEVICE_RESET__BANK(i), |
| 1058 | denali->reg + DEVICE_RESET); |
| 1059 | |
| 1060 | irq_status = denali_wait_for_irq(denali, |
| 1061 | INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); |
| 1062 | if (!(irq_status & INTR__INT_ACT)) |
| 1063 | break; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1064 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1065 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1066 | dev_dbg(denali->dev, "%d chips connected\n", i); |
| 1067 | denali->max_banks = i; |
| 1068 | } |
| 1069 | |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1070 | static void denali_hw_init(struct denali_nand_info *denali) |
| 1071 | { |
| 1072 | /* |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1073 | * The REVISION register may not be reliable. Platforms are allowed to |
| 1074 | * override it. |
| 1075 | */ |
| 1076 | if (!denali->revision) |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1077 | denali->revision = swab16(ioread32(denali->reg + REVISION)); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1078 | |
| 1079 | /* |
Masahiro Yamada | 80924cc | 2020-01-30 00:55:55 +0900 | [diff] [blame] | 1080 | * Set how many bytes should be skipped before writing data in OOB. |
| 1081 | * If a platform requests a non-zero value, set it to the register. |
| 1082 | * Otherwise, read the value out, expecting it has already been set up |
| 1083 | * by firmware. |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1084 | */ |
Masahiro Yamada | 80924cc | 2020-01-30 00:55:55 +0900 | [diff] [blame] | 1085 | if (denali->oob_skip_bytes) |
| 1086 | iowrite32(denali->oob_skip_bytes, |
| 1087 | denali->reg + SPARE_AREA_SKIP_BYTES); |
| 1088 | else |
| 1089 | denali->oob_skip_bytes = ioread32(denali->reg + |
| 1090 | SPARE_AREA_SKIP_BYTES); |
| 1091 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1092 | denali_detect_max_banks(denali); |
| 1093 | iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); |
| 1094 | iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1095 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1096 | iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); |
| 1097 | } |
| 1098 | |
| 1099 | int denali_calc_ecc_bytes(int step_size, int strength) |
| 1100 | { |
| 1101 | /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ |
| 1102 | return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; |
| 1103 | } |
| 1104 | EXPORT_SYMBOL(denali_calc_ecc_bytes); |
| 1105 | |
| 1106 | static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, |
| 1107 | struct denali_nand_info *denali) |
| 1108 | { |
| 1109 | int oobavail = mtd->oobsize - denali->oob_skip_bytes; |
| 1110 | int ret; |
| 1111 | |
| 1112 | /* |
| 1113 | * If .size and .strength are already set (usually by DT), |
| 1114 | * check if they are supported by this controller. |
| 1115 | */ |
| 1116 | if (chip->ecc.size && chip->ecc.strength) |
| 1117 | return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); |
| 1118 | |
| 1119 | /* |
| 1120 | * We want .size and .strength closest to the chip's requirement |
| 1121 | * unless NAND_ECC_MAXIMIZE is requested. |
| 1122 | */ |
| 1123 | if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { |
| 1124 | ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); |
| 1125 | if (!ret) |
| 1126 | return 0; |
| 1127 | } |
| 1128 | |
| 1129 | /* Max ECC strength is the last thing we can do */ |
| 1130 | return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1131 | } |
| 1132 | |
| 1133 | static struct nand_ecclayout nand_oob; |
| 1134 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1135 | static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, |
| 1136 | struct mtd_oob_region *oobregion) |
| 1137 | { |
| 1138 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1139 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1140 | |
| 1141 | if (section) |
| 1142 | return -ERANGE; |
| 1143 | |
| 1144 | oobregion->offset = denali->oob_skip_bytes; |
| 1145 | oobregion->length = chip->ecc.total; |
| 1146 | |
| 1147 | return 0; |
| 1148 | } |
| 1149 | |
| 1150 | static int denali_ooblayout_free(struct mtd_info *mtd, int section, |
| 1151 | struct mtd_oob_region *oobregion) |
| 1152 | { |
| 1153 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1154 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1155 | |
| 1156 | if (section) |
| 1157 | return -ERANGE; |
| 1158 | |
| 1159 | oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; |
| 1160 | oobregion->length = mtd->oobsize - oobregion->offset; |
| 1161 | |
| 1162 | return 0; |
| 1163 | } |
| 1164 | |
| 1165 | static const struct mtd_ooblayout_ops denali_ooblayout_ops = { |
| 1166 | .ecc = denali_ooblayout_ecc, |
Simon Glass | 8d38a84 | 2020-02-03 07:35:56 -0700 | [diff] [blame] | 1167 | .rfree = denali_ooblayout_free, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1168 | }; |
| 1169 | |
| 1170 | static int denali_multidev_fixup(struct denali_nand_info *denali) |
| 1171 | { |
| 1172 | struct nand_chip *chip = &denali->nand; |
| 1173 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1174 | |
| 1175 | /* |
| 1176 | * Support for multi device: |
| 1177 | * When the IP configuration is x16 capable and two x8 chips are |
| 1178 | * connected in parallel, DEVICES_CONNECTED should be set to 2. |
| 1179 | * In this case, the core framework knows nothing about this fact, |
| 1180 | * so we should tell it the _logical_ pagesize and anything necessary. |
| 1181 | */ |
| 1182 | denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); |
| 1183 | |
| 1184 | /* |
| 1185 | * On some SoCs, DEVICES_CONNECTED is not auto-detected. |
| 1186 | * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. |
| 1187 | */ |
| 1188 | if (denali->devs_per_cs == 0) { |
| 1189 | denali->devs_per_cs = 1; |
| 1190 | iowrite32(1, denali->reg + DEVICES_CONNECTED); |
| 1191 | } |
| 1192 | |
| 1193 | if (denali->devs_per_cs == 1) |
| 1194 | return 0; |
| 1195 | |
| 1196 | if (denali->devs_per_cs != 2) { |
| 1197 | dev_err(denali->dev, "unsupported number of devices %d\n", |
| 1198 | denali->devs_per_cs); |
| 1199 | return -EINVAL; |
| 1200 | } |
| 1201 | |
| 1202 | /* 2 chips in parallel */ |
| 1203 | mtd->size <<= 1; |
| 1204 | mtd->erasesize <<= 1; |
| 1205 | mtd->writesize <<= 1; |
| 1206 | mtd->oobsize <<= 1; |
| 1207 | chip->chipsize <<= 1; |
| 1208 | chip->page_shift += 1; |
| 1209 | chip->phys_erase_shift += 1; |
| 1210 | chip->bbt_erase_shift += 1; |
| 1211 | chip->chip_shift += 1; |
| 1212 | chip->pagemask <<= 1; |
| 1213 | chip->ecc.size <<= 1; |
| 1214 | chip->ecc.bytes <<= 1; |
| 1215 | chip->ecc.strength <<= 1; |
| 1216 | denali->oob_skip_bytes <<= 1; |
| 1217 | |
| 1218 | return 0; |
| 1219 | } |
| 1220 | |
Masahiro Yamada | 1d9654d | 2017-08-26 01:12:31 +0900 | [diff] [blame] | 1221 | int denali_init(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1222 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1223 | struct nand_chip *chip = &denali->nand; |
| 1224 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1225 | u32 features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1226 | int ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1227 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1228 | denali_hw_init(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1229 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1230 | denali_clear_irq_all(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1231 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1232 | denali_reset_banks(denali); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1233 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1234 | denali->active_bank = DENALI_INVALID_BANK; |
| 1235 | |
| 1236 | chip->flash_node = dev_of_offset(denali->dev); |
| 1237 | /* Fallback to the default name if DT did not give "label" property */ |
| 1238 | if (!mtd->name) |
| 1239 | mtd->name = "denali-nand"; |
| 1240 | |
| 1241 | chip->select_chip = denali_select_chip; |
| 1242 | chip->read_byte = denali_read_byte; |
| 1243 | chip->write_byte = denali_write_byte; |
| 1244 | chip->read_word = denali_read_word; |
| 1245 | chip->cmd_ctrl = denali_cmd_ctrl; |
| 1246 | chip->dev_ready = denali_dev_ready; |
| 1247 | chip->waitfunc = denali_waitfunc; |
| 1248 | |
| 1249 | if (features & FEATURES__INDEX_ADDR) { |
| 1250 | denali->host_read = denali_indexed_read; |
| 1251 | denali->host_write = denali_indexed_write; |
| 1252 | } else { |
| 1253 | denali->host_read = denali_direct_read; |
| 1254 | denali->host_write = denali_direct_write; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1255 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1256 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1257 | /* clk rate info is needed for setup_data_interface */ |
| 1258 | if (denali->clk_x_rate) |
| 1259 | chip->setup_data_interface = denali_setup_data_interface; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1260 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1261 | ret = nand_scan_ident(mtd, denali->max_banks, NULL); |
| 1262 | if (ret) |
| 1263 | return ret; |
| 1264 | |
| 1265 | if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) |
| 1266 | denali->dma_avail = 1; |
| 1267 | |
| 1268 | if (denali->dma_avail) { |
Masahiro Yamada | 4a610fa | 2018-07-19 10:13:23 +0900 | [diff] [blame] | 1269 | chip->buf_align = ARCH_DMA_MINALIGN; |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1270 | if (denali->caps & DENALI_CAP_DMA_64BIT) |
| 1271 | denali->setup_dma = denali_setup_dma64; |
| 1272 | else |
| 1273 | denali->setup_dma = denali_setup_dma32; |
| 1274 | } else { |
| 1275 | chip->buf_align = 4; |
| 1276 | } |
| 1277 | |
| 1278 | chip->options |= NAND_USE_BOUNCE_BUFFER; |
| 1279 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
| 1280 | chip->bbt_options |= NAND_BBT_NO_OOB; |
| 1281 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1282 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1283 | /* no subpage writes on denali */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1284 | chip->options |= NAND_NO_SUBPAGE_WRITE; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1285 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1286 | ret = denali_ecc_setup(mtd, chip, denali); |
| 1287 | if (ret) { |
| 1288 | dev_err(denali->dev, "Failed to setup ECC settings.\n"); |
| 1289 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1290 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1291 | |
| 1292 | dev_dbg(denali->dev, |
| 1293 | "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", |
| 1294 | chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); |
| 1295 | |
| 1296 | iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | |
| 1297 | FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), |
| 1298 | denali->reg + ECC_CORRECTION); |
| 1299 | iowrite32(mtd->erasesize / mtd->writesize, |
| 1300 | denali->reg + PAGES_PER_BLOCK); |
| 1301 | iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, |
| 1302 | denali->reg + DEVICE_WIDTH); |
| 1303 | iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, |
| 1304 | denali->reg + TWO_ROW_ADDR_CYCLES); |
| 1305 | iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); |
| 1306 | iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); |
| 1307 | |
| 1308 | iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); |
| 1309 | iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); |
| 1310 | /* chip->ecc.steps is set by nand_scan_tail(); not available here */ |
| 1311 | iowrite32(mtd->writesize / chip->ecc.size, |
| 1312 | denali->reg + CFG_NUM_DATA_BLOCKS); |
| 1313 | |
| 1314 | mtd_set_ooblayout(mtd, &denali_ooblayout_ops); |
| 1315 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1316 | nand_oob.eccbytes = denali->nand.ecc.bytes; |
| 1317 | denali->nand.ecc.layout = &nand_oob; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1318 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1319 | if (chip->options & NAND_BUSWIDTH_16) { |
| 1320 | chip->read_buf = denali_read_buf16; |
| 1321 | chip->write_buf = denali_write_buf16; |
| 1322 | } else { |
| 1323 | chip->read_buf = denali_read_buf; |
| 1324 | chip->write_buf = denali_write_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1325 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1326 | chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; |
| 1327 | chip->ecc.read_page = denali_read_page; |
| 1328 | chip->ecc.read_page_raw = denali_read_page_raw; |
| 1329 | chip->ecc.write_page = denali_write_page; |
| 1330 | chip->ecc.write_page_raw = denali_write_page_raw; |
| 1331 | chip->ecc.read_oob = denali_read_oob; |
| 1332 | chip->ecc.write_oob = denali_write_oob; |
| 1333 | chip->erase = denali_erase; |
| 1334 | |
| 1335 | ret = denali_multidev_fixup(denali); |
| 1336 | if (ret) |
| 1337 | return ret; |
| 1338 | |
| 1339 | /* |
| 1340 | * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not |
| 1341 | * use devm_kmalloc() because the memory allocated by devm_ does not |
| 1342 | * guarantee DMA-safe alignment. |
| 1343 | */ |
| 1344 | denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); |
| 1345 | if (!denali->buf) |
| 1346 | return -ENOMEM; |
| 1347 | |
| 1348 | ret = nand_scan_tail(mtd); |
| 1349 | if (ret) |
| 1350 | goto free_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1351 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 1352 | ret = nand_register(0, mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1353 | if (ret) { |
| 1354 | dev_err(denali->dev, "Failed to register MTD: %d\n", ret); |
| 1355 | goto free_buf; |
| 1356 | } |
| 1357 | return 0; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1358 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1359 | free_buf: |
| 1360 | kfree(denali->buf); |
| 1361 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1362 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1363 | } |