Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Panasonic Corporation |
| 3 | * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> |
| 4 | * Copyright (C) 2009-2010, Intel Corporation and its suppliers. |
| 5 | * |
| 6 | * SPDX-License-Identifier: GPL-2.0+ |
| 7 | */ |
| 8 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 9 | #include <dm.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 10 | #include <nand.h> |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 11 | #include <linux/bitfield.h> |
| 12 | #include <linux/dma-direction.h> |
Masahiro Yamada | 1221ce4 | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 13 | #include <linux/errno.h> |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 14 | #include <linux/io.h> |
Masahiro Yamada | 0faef2e | 2017-11-30 13:45:27 +0900 | [diff] [blame] | 15 | #include <linux/mtd/mtd.h> |
| 16 | #include <linux/mtd/rawnand.h> |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 17 | |
| 18 | #include "denali.h" |
| 19 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 20 | static dma_addr_t dma_map_single(void *dev, void *ptr, size_t size, |
| 21 | enum dma_data_direction dir) |
| 22 | { |
| 23 | unsigned long addr = (unsigned long)ptr; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 24 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 25 | if (dir == DMA_FROM_DEVICE) |
| 26 | invalidate_dcache_range(addr, addr + size); |
| 27 | else |
| 28 | flush_dcache_range(addr, addr + size); |
| 29 | |
| 30 | return addr; |
| 31 | } |
| 32 | |
| 33 | static void dma_unmap_single(void *dev, dma_addr_t addr, size_t size, |
| 34 | enum dma_data_direction dir) |
| 35 | { |
| 36 | if (dir != DMA_TO_DEVICE) |
| 37 | invalidate_dcache_range(addr, addr + size); |
| 38 | } |
| 39 | |
| 40 | static int dma_mapping_error(void *dev, dma_addr_t addr) |
| 41 | { |
| 42 | return 0; |
| 43 | } |
| 44 | |
| 45 | #define DENALI_NAND_NAME "denali-nand" |
| 46 | |
| 47 | /* for Indexed Addressing */ |
| 48 | #define DENALI_INDEXED_CTRL 0x00 |
| 49 | #define DENALI_INDEXED_DATA 0x10 |
| 50 | |
| 51 | #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ |
| 52 | #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ |
| 53 | #define DENALI_MAP10 (2 << 26) /* high-level control plane */ |
| 54 | #define DENALI_MAP11 (3 << 26) /* direct controller access */ |
| 55 | |
| 56 | /* MAP11 access cycle type */ |
| 57 | #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ |
| 58 | #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ |
| 59 | #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ |
| 60 | |
| 61 | /* MAP10 commands */ |
| 62 | #define DENALI_ERASE 0x01 |
| 63 | |
| 64 | #define DENALI_BANK(denali) ((denali)->active_bank << 24) |
| 65 | |
| 66 | #define DENALI_INVALID_BANK -1 |
| 67 | #define DENALI_NR_BANKS 4 |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 68 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 69 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 70 | * The bus interface clock, clk_x, is phase aligned with the core clock. The |
| 71 | * clk_x is an integral multiple N of the core clk. The value N is configured |
| 72 | * at IP delivery time, and its available value is 4, 5, or 6. We need to align |
| 73 | * to the largest value to make it work with any possible configuration. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 74 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 75 | #define DENALI_CLK_X_MULT 6 |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 76 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 77 | static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) |
| 78 | { |
| 79 | return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); |
| 80 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 81 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 82 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 83 | * Direct Addressing - the slave address forms the control information (command |
| 84 | * type, bank, block, and page address). The slave data is the actual data to |
| 85 | * be transferred. This mode requires 28 bits of address region allocated. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 86 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 87 | static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 88 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 89 | return ioread32(denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 90 | } |
| 91 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 92 | static void denali_direct_write(struct denali_nand_info *denali, u32 addr, |
| 93 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 94 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 95 | iowrite32(data, denali->host + addr); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /* |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 99 | * Indexed Addressing - address translation module intervenes in passing the |
| 100 | * control information. This mode reduces the required address range. The |
| 101 | * control information and transferred data are latched by the registers in |
| 102 | * the translation module. |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 103 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 104 | static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 105 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 106 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 107 | return ioread32(denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 108 | } |
| 109 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 110 | static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, |
| 111 | u32 data) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 112 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 113 | iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); |
| 114 | iowrite32(data, denali->host + DENALI_INDEXED_DATA); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 115 | } |
| 116 | |
| 117 | /* |
| 118 | * Use the configuration feature register to determine the maximum number of |
| 119 | * banks that the hardware supports. |
| 120 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 121 | static void denali_detect_max_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 122 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 123 | uint32_t features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 124 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 125 | denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 126 | |
| 127 | /* the encoding changed from rev 5.0 to 5.1 */ |
| 128 | if (denali->revision < 0x0501) |
| 129 | denali->max_banks <<= 1; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 130 | } |
| 131 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 132 | static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 133 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 134 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 135 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 136 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 137 | iowrite32(U32_MAX, denali->reg + INTR_EN(i)); |
| 138 | iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 139 | } |
| 140 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 141 | static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 142 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 143 | int i; |
| 144 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 145 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 146 | iowrite32(0, denali->reg + INTR_EN(i)); |
| 147 | iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 148 | } |
| 149 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 150 | static void denali_clear_irq(struct denali_nand_info *denali, |
| 151 | int bank, uint32_t irq_status) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 152 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 153 | /* write one to clear bits */ |
| 154 | iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 155 | } |
| 156 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 157 | static void denali_clear_irq_all(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 158 | { |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 159 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 160 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 161 | for (i = 0; i < DENALI_NR_BANKS; i++) |
| 162 | denali_clear_irq(denali, i, U32_MAX); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 163 | } |
| 164 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 165 | static void __denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 166 | { |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 167 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 168 | int i; |
| 169 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 170 | for (i = 0; i < DENALI_NR_BANKS; i++) { |
| 171 | irq_status = ioread32(denali->reg + INTR_STATUS(i)); |
| 172 | denali_clear_irq(denali, i, irq_status); |
| 173 | |
| 174 | if (i != denali->active_bank) |
| 175 | continue; |
| 176 | |
| 177 | denali->irq_status |= irq_status; |
| 178 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 179 | } |
| 180 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 181 | static void denali_reset_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 182 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 183 | denali->irq_status = 0; |
| 184 | denali->irq_mask = 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 185 | } |
| 186 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 187 | static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, |
| 188 | uint32_t irq_mask) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 189 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 190 | unsigned long time_left = 1000000; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 191 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 192 | while (time_left) { |
| 193 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 194 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 195 | if (irq_mask & denali->irq_status) |
| 196 | return denali->irq_status; |
| 197 | udelay(1); |
| 198 | time_left--; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 199 | } |
| 200 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 201 | if (!time_left) { |
| 202 | dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", |
| 203 | irq_mask); |
| 204 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 205 | } |
| 206 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 207 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 208 | } |
| 209 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 210 | static uint32_t denali_check_irq(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 211 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 212 | __denali_check_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 213 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 214 | return denali->irq_status; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
| 218 | { |
| 219 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 220 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 221 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 222 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 223 | for (i = 0; i < len; i++) |
| 224 | buf[i] = denali->host_read(denali, addr); |
| 225 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 226 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 227 | static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
| 228 | { |
| 229 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 230 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 231 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 232 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 233 | for (i = 0; i < len; i++) |
| 234 | denali->host_write(denali, addr, buf[i]); |
| 235 | } |
| 236 | |
| 237 | static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) |
| 238 | { |
| 239 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 240 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 241 | uint16_t *buf16 = (uint16_t *)buf; |
| 242 | int i; |
| 243 | |
| 244 | for (i = 0; i < len / 2; i++) |
| 245 | buf16[i] = denali->host_read(denali, addr); |
| 246 | } |
| 247 | |
| 248 | static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, |
| 249 | int len) |
| 250 | { |
| 251 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 252 | u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); |
| 253 | const uint16_t *buf16 = (const uint16_t *)buf; |
| 254 | int i; |
| 255 | |
| 256 | for (i = 0; i < len / 2; i++) |
| 257 | denali->host_write(denali, addr, buf16[i]); |
| 258 | } |
| 259 | |
| 260 | static uint8_t denali_read_byte(struct mtd_info *mtd) |
| 261 | { |
| 262 | uint8_t byte; |
| 263 | |
| 264 | denali_read_buf(mtd, &byte, 1); |
| 265 | |
| 266 | return byte; |
| 267 | } |
| 268 | |
| 269 | static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) |
| 270 | { |
| 271 | denali_write_buf(mtd, &byte, 1); |
| 272 | } |
| 273 | |
| 274 | static uint16_t denali_read_word(struct mtd_info *mtd) |
| 275 | { |
| 276 | uint16_t word; |
| 277 | |
| 278 | denali_read_buf16(mtd, (uint8_t *)&word, 2); |
| 279 | |
| 280 | return word; |
| 281 | } |
| 282 | |
| 283 | static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) |
| 284 | { |
| 285 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 286 | uint32_t type; |
| 287 | |
| 288 | if (ctrl & NAND_CLE) |
| 289 | type = DENALI_MAP11_CMD; |
| 290 | else if (ctrl & NAND_ALE) |
| 291 | type = DENALI_MAP11_ADDR; |
| 292 | else |
| 293 | return; |
| 294 | |
| 295 | /* |
| 296 | * Some commands are followed by chip->dev_ready or chip->waitfunc. |
| 297 | * irq_status must be cleared here to catch the R/B# interrupt later. |
| 298 | */ |
| 299 | if (ctrl & NAND_CTRL_CHANGE) |
| 300 | denali_reset_irq(denali); |
| 301 | |
| 302 | denali->host_write(denali, DENALI_BANK(denali) | type, dat); |
| 303 | } |
| 304 | |
| 305 | static int denali_dev_ready(struct mtd_info *mtd) |
| 306 | { |
| 307 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 308 | |
| 309 | return !!(denali_check_irq(denali) & INTR__INT_ACT); |
| 310 | } |
| 311 | |
| 312 | static int denali_check_erased_page(struct mtd_info *mtd, |
| 313 | struct nand_chip *chip, uint8_t *buf, |
| 314 | unsigned long uncor_ecc_flags, |
| 315 | unsigned int max_bitflips) |
| 316 | { |
| 317 | uint8_t *ecc_code = chip->buffers->ecccode; |
| 318 | int ecc_steps = chip->ecc.steps; |
| 319 | int ecc_size = chip->ecc.size; |
| 320 | int ecc_bytes = chip->ecc.bytes; |
| 321 | int i, ret, stat; |
| 322 | |
| 323 | ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, |
| 324 | chip->ecc.total); |
| 325 | if (ret) |
| 326 | return ret; |
| 327 | |
| 328 | for (i = 0; i < ecc_steps; i++) { |
| 329 | if (!(uncor_ecc_flags & BIT(i))) |
| 330 | continue; |
| 331 | |
| 332 | stat = nand_check_erased_ecc_chunk(buf, ecc_size, |
| 333 | ecc_code, ecc_bytes, |
| 334 | NULL, 0, |
| 335 | chip->ecc.strength); |
| 336 | if (stat < 0) { |
| 337 | mtd->ecc_stats.failed++; |
| 338 | } else { |
| 339 | mtd->ecc_stats.corrected += stat; |
| 340 | max_bitflips = max_t(unsigned int, max_bitflips, stat); |
| 341 | } |
| 342 | |
| 343 | buf += ecc_size; |
| 344 | ecc_code += ecc_bytes; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 345 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 346 | |
| 347 | return max_bitflips; |
| 348 | } |
| 349 | |
| 350 | static int denali_hw_ecc_fixup(struct mtd_info *mtd, |
| 351 | struct denali_nand_info *denali, |
| 352 | unsigned long *uncor_ecc_flags) |
| 353 | { |
| 354 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 355 | int bank = denali->active_bank; |
| 356 | uint32_t ecc_cor; |
| 357 | unsigned int max_bitflips; |
| 358 | |
| 359 | ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); |
| 360 | ecc_cor >>= ECC_COR_INFO__SHIFT(bank); |
| 361 | |
| 362 | if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { |
| 363 | /* |
| 364 | * This flag is set when uncorrectable error occurs at least in |
| 365 | * one ECC sector. We can not know "how many sectors", or |
| 366 | * "which sector(s)". We need erase-page check for all sectors. |
| 367 | */ |
| 368 | *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); |
| 369 | return 0; |
| 370 | } |
| 371 | |
| 372 | max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); |
| 373 | |
| 374 | /* |
| 375 | * The register holds the maximum of per-sector corrected bitflips. |
| 376 | * This is suitable for the return value of the ->read_page() callback. |
| 377 | * Unfortunately, we can not know the total number of corrected bits in |
| 378 | * the page. Increase the stats by max_bitflips. (compromised solution) |
| 379 | */ |
| 380 | mtd->ecc_stats.corrected += max_bitflips; |
| 381 | |
| 382 | return max_bitflips; |
| 383 | } |
| 384 | |
| 385 | static int denali_sw_ecc_fixup(struct mtd_info *mtd, |
| 386 | struct denali_nand_info *denali, |
| 387 | unsigned long *uncor_ecc_flags, uint8_t *buf) |
| 388 | { |
| 389 | unsigned int ecc_size = denali->nand.ecc.size; |
| 390 | unsigned int bitflips = 0; |
| 391 | unsigned int max_bitflips = 0; |
| 392 | uint32_t err_addr, err_cor_info; |
| 393 | unsigned int err_byte, err_sector, err_device; |
| 394 | uint8_t err_cor_value; |
| 395 | unsigned int prev_sector = 0; |
| 396 | uint32_t irq_status; |
| 397 | |
| 398 | denali_reset_irq(denali); |
| 399 | |
| 400 | do { |
| 401 | err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); |
| 402 | err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); |
| 403 | err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); |
| 404 | |
| 405 | err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); |
| 406 | err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, |
| 407 | err_cor_info); |
| 408 | err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, |
| 409 | err_cor_info); |
| 410 | |
| 411 | /* reset the bitflip counter when crossing ECC sector */ |
| 412 | if (err_sector != prev_sector) |
| 413 | bitflips = 0; |
| 414 | |
| 415 | if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { |
| 416 | /* |
| 417 | * Check later if this is a real ECC error, or |
| 418 | * an erased sector. |
| 419 | */ |
| 420 | *uncor_ecc_flags |= BIT(err_sector); |
| 421 | } else if (err_byte < ecc_size) { |
| 422 | /* |
| 423 | * If err_byte is larger than ecc_size, means error |
| 424 | * happened in OOB, so we ignore it. It's no need for |
| 425 | * us to correct it err_device is represented the NAND |
| 426 | * error bits are happened in if there are more than |
| 427 | * one NAND connected. |
| 428 | */ |
| 429 | int offset; |
| 430 | unsigned int flips_in_byte; |
| 431 | |
| 432 | offset = (err_sector * ecc_size + err_byte) * |
| 433 | denali->devs_per_cs + err_device; |
| 434 | |
| 435 | /* correct the ECC error */ |
| 436 | flips_in_byte = hweight8(buf[offset] ^ err_cor_value); |
| 437 | buf[offset] ^= err_cor_value; |
| 438 | mtd->ecc_stats.corrected += flips_in_byte; |
| 439 | bitflips += flips_in_byte; |
| 440 | |
| 441 | max_bitflips = max(max_bitflips, bitflips); |
| 442 | } |
| 443 | |
| 444 | prev_sector = err_sector; |
| 445 | } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); |
| 446 | |
| 447 | /* |
| 448 | * Once handle all ECC errors, controller will trigger an |
| 449 | * ECC_TRANSACTION_DONE interrupt. |
| 450 | */ |
| 451 | irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); |
| 452 | if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) |
| 453 | return -EIO; |
| 454 | |
| 455 | return max_bitflips; |
| 456 | } |
| 457 | |
| 458 | static void denali_setup_dma64(struct denali_nand_info *denali, |
| 459 | dma_addr_t dma_addr, int page, int write) |
| 460 | { |
| 461 | uint32_t mode; |
| 462 | const int page_count = 1; |
| 463 | |
| 464 | mode = DENALI_MAP10 | DENALI_BANK(denali) | page; |
| 465 | |
| 466 | /* DMA is a three step process */ |
| 467 | |
| 468 | /* |
| 469 | * 1. setup transfer type, interrupt when complete, |
| 470 | * burst len = 64 bytes, the number of pages |
| 471 | */ |
| 472 | denali->host_write(denali, mode, |
| 473 | 0x01002000 | (64 << 16) | (write << 8) | page_count); |
| 474 | |
| 475 | /* 2. set memory low address */ |
| 476 | denali->host_write(denali, mode, lower_32_bits(dma_addr)); |
| 477 | |
| 478 | /* 3. set memory high address */ |
| 479 | denali->host_write(denali, mode, upper_32_bits(dma_addr)); |
| 480 | } |
| 481 | |
| 482 | static void denali_setup_dma32(struct denali_nand_info *denali, |
| 483 | dma_addr_t dma_addr, int page, int write) |
| 484 | { |
| 485 | uint32_t mode; |
| 486 | const int page_count = 1; |
| 487 | |
| 488 | mode = DENALI_MAP10 | DENALI_BANK(denali); |
| 489 | |
| 490 | /* DMA is a four step process */ |
| 491 | |
| 492 | /* 1. setup transfer type and # of pages */ |
| 493 | denali->host_write(denali, mode | page, |
| 494 | 0x2000 | (write << 8) | page_count); |
| 495 | |
| 496 | /* 2. set memory high address bits 23:8 */ |
| 497 | denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); |
| 498 | |
| 499 | /* 3. set memory low address bits 23:8 */ |
| 500 | denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); |
| 501 | |
| 502 | /* 4. interrupt when complete, burst len = 64 bytes */ |
| 503 | denali->host_write(denali, mode | 0x14000, 0x2400); |
| 504 | } |
| 505 | |
| 506 | static int denali_pio_read(struct denali_nand_info *denali, void *buf, |
| 507 | size_t size, int page, int raw) |
| 508 | { |
| 509 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 510 | uint32_t *buf32 = (uint32_t *)buf; |
| 511 | uint32_t irq_status, ecc_err_mask; |
| 512 | int i; |
| 513 | |
| 514 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 515 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 516 | else |
| 517 | ecc_err_mask = INTR__ECC_ERR; |
| 518 | |
| 519 | denali_reset_irq(denali); |
| 520 | |
| 521 | for (i = 0; i < size / 4; i++) |
| 522 | *buf32++ = denali->host_read(denali, addr); |
| 523 | |
| 524 | irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); |
| 525 | if (!(irq_status & INTR__PAGE_XFER_INC)) |
| 526 | return -EIO; |
| 527 | |
| 528 | if (irq_status & INTR__ERASED_PAGE) |
| 529 | memset(buf, 0xff, size); |
| 530 | |
| 531 | return irq_status & ecc_err_mask ? -EBADMSG : 0; |
| 532 | } |
| 533 | |
| 534 | static int denali_pio_write(struct denali_nand_info *denali, |
| 535 | const void *buf, size_t size, int page, int raw) |
| 536 | { |
| 537 | u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; |
| 538 | const uint32_t *buf32 = (uint32_t *)buf; |
| 539 | uint32_t irq_status; |
| 540 | int i; |
| 541 | |
| 542 | denali_reset_irq(denali); |
| 543 | |
| 544 | for (i = 0; i < size / 4; i++) |
| 545 | denali->host_write(denali, addr, *buf32++); |
| 546 | |
| 547 | irq_status = denali_wait_for_irq(denali, |
| 548 | INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); |
| 549 | if (!(irq_status & INTR__PROGRAM_COMP)) |
| 550 | return -EIO; |
| 551 | |
| 552 | return 0; |
| 553 | } |
| 554 | |
| 555 | static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, |
| 556 | size_t size, int page, int raw, int write) |
| 557 | { |
| 558 | if (write) |
| 559 | return denali_pio_write(denali, buf, size, page, raw); |
| 560 | else |
| 561 | return denali_pio_read(denali, buf, size, page, raw); |
| 562 | } |
| 563 | |
| 564 | static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, |
| 565 | size_t size, int page, int raw, int write) |
| 566 | { |
| 567 | dma_addr_t dma_addr; |
| 568 | uint32_t irq_mask, irq_status, ecc_err_mask; |
| 569 | enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
| 570 | int ret = 0; |
| 571 | |
| 572 | dma_addr = dma_map_single(denali->dev, buf, size, dir); |
| 573 | if (dma_mapping_error(denali->dev, dma_addr)) { |
| 574 | dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); |
| 575 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 576 | } |
| 577 | |
| 578 | if (write) { |
| 579 | /* |
| 580 | * INTR__PROGRAM_COMP is never asserted for the DMA transfer. |
| 581 | * We can use INTR__DMA_CMD_COMP instead. This flag is asserted |
| 582 | * when the page program is completed. |
| 583 | */ |
| 584 | irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; |
| 585 | ecc_err_mask = 0; |
| 586 | } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { |
| 587 | irq_mask = INTR__DMA_CMD_COMP; |
| 588 | ecc_err_mask = INTR__ECC_UNCOR_ERR; |
| 589 | } else { |
| 590 | irq_mask = INTR__DMA_CMD_COMP; |
| 591 | ecc_err_mask = INTR__ECC_ERR; |
| 592 | } |
| 593 | |
| 594 | iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); |
| 595 | |
| 596 | denali_reset_irq(denali); |
| 597 | denali->setup_dma(denali, dma_addr, page, write); |
| 598 | |
| 599 | irq_status = denali_wait_for_irq(denali, irq_mask); |
| 600 | if (!(irq_status & INTR__DMA_CMD_COMP)) |
| 601 | ret = -EIO; |
| 602 | else if (irq_status & ecc_err_mask) |
| 603 | ret = -EBADMSG; |
| 604 | |
| 605 | iowrite32(0, denali->reg + DMA_ENABLE); |
| 606 | |
| 607 | dma_unmap_single(denali->dev, dma_addr, size, dir); |
| 608 | |
| 609 | if (irq_status & INTR__ERASED_PAGE) |
| 610 | memset(buf, 0xff, size); |
| 611 | |
| 612 | return ret; |
| 613 | } |
| 614 | |
| 615 | static int denali_data_xfer(struct denali_nand_info *denali, void *buf, |
| 616 | size_t size, int page, int raw, int write) |
| 617 | { |
| 618 | iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); |
| 619 | iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, |
| 620 | denali->reg + TRANSFER_SPARE_REG); |
| 621 | |
| 622 | if (denali->dma_avail) |
| 623 | return denali_dma_xfer(denali, buf, size, page, raw, write); |
| 624 | else |
| 625 | return denali_pio_xfer(denali, buf, size, page, raw, write); |
| 626 | } |
| 627 | |
| 628 | static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, |
| 629 | int page, int write) |
| 630 | { |
| 631 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 632 | unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; |
| 633 | unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; |
| 634 | int writesize = mtd->writesize; |
| 635 | int oobsize = mtd->oobsize; |
| 636 | uint8_t *bufpoi = chip->oob_poi; |
| 637 | int ecc_steps = chip->ecc.steps; |
| 638 | int ecc_size = chip->ecc.size; |
| 639 | int ecc_bytes = chip->ecc.bytes; |
| 640 | int oob_skip = denali->oob_skip_bytes; |
| 641 | size_t size = writesize + oobsize; |
| 642 | int i, pos, len; |
| 643 | |
| 644 | /* BBM at the beginning of the OOB area */ |
| 645 | chip->cmdfunc(mtd, start_cmd, writesize, page); |
| 646 | if (write) |
| 647 | chip->write_buf(mtd, bufpoi, oob_skip); |
| 648 | else |
| 649 | chip->read_buf(mtd, bufpoi, oob_skip); |
| 650 | bufpoi += oob_skip; |
| 651 | |
| 652 | /* OOB ECC */ |
| 653 | for (i = 0; i < ecc_steps; i++) { |
| 654 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 655 | len = ecc_bytes; |
| 656 | |
| 657 | if (pos >= writesize) |
| 658 | pos += oob_skip; |
| 659 | else if (pos + len > writesize) |
| 660 | len = writesize - pos; |
| 661 | |
| 662 | chip->cmdfunc(mtd, rnd_cmd, pos, -1); |
| 663 | if (write) |
| 664 | chip->write_buf(mtd, bufpoi, len); |
| 665 | else |
| 666 | chip->read_buf(mtd, bufpoi, len); |
| 667 | bufpoi += len; |
| 668 | if (len < ecc_bytes) { |
| 669 | len = ecc_bytes - len; |
| 670 | chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); |
| 671 | if (write) |
| 672 | chip->write_buf(mtd, bufpoi, len); |
| 673 | else |
| 674 | chip->read_buf(mtd, bufpoi, len); |
| 675 | bufpoi += len; |
| 676 | } |
| 677 | } |
| 678 | |
| 679 | /* OOB free */ |
| 680 | len = oobsize - (bufpoi - chip->oob_poi); |
| 681 | chip->cmdfunc(mtd, rnd_cmd, size - len, -1); |
| 682 | if (write) |
| 683 | chip->write_buf(mtd, bufpoi, len); |
| 684 | else |
| 685 | chip->read_buf(mtd, bufpoi, len); |
| 686 | } |
| 687 | |
| 688 | static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 689 | uint8_t *buf, int oob_required, int page) |
| 690 | { |
| 691 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 692 | int writesize = mtd->writesize; |
| 693 | int oobsize = mtd->oobsize; |
| 694 | int ecc_steps = chip->ecc.steps; |
| 695 | int ecc_size = chip->ecc.size; |
| 696 | int ecc_bytes = chip->ecc.bytes; |
| 697 | void *tmp_buf = denali->buf; |
| 698 | int oob_skip = denali->oob_skip_bytes; |
| 699 | size_t size = writesize + oobsize; |
| 700 | int ret, i, pos, len; |
| 701 | |
| 702 | ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); |
| 703 | if (ret) |
| 704 | return ret; |
| 705 | |
| 706 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 707 | if (buf) { |
| 708 | for (i = 0; i < ecc_steps; i++) { |
| 709 | pos = i * (ecc_size + ecc_bytes); |
| 710 | len = ecc_size; |
| 711 | |
| 712 | if (pos >= writesize) |
| 713 | pos += oob_skip; |
| 714 | else if (pos + len > writesize) |
| 715 | len = writesize - pos; |
| 716 | |
| 717 | memcpy(buf, tmp_buf + pos, len); |
| 718 | buf += len; |
| 719 | if (len < ecc_size) { |
| 720 | len = ecc_size - len; |
| 721 | memcpy(buf, tmp_buf + writesize + oob_skip, |
| 722 | len); |
| 723 | buf += len; |
| 724 | } |
| 725 | } |
| 726 | } |
| 727 | |
| 728 | if (oob_required) { |
| 729 | uint8_t *oob = chip->oob_poi; |
| 730 | |
| 731 | /* BBM at the beginning of the OOB area */ |
| 732 | memcpy(oob, tmp_buf + writesize, oob_skip); |
| 733 | oob += oob_skip; |
| 734 | |
| 735 | /* OOB ECC */ |
| 736 | for (i = 0; i < ecc_steps; i++) { |
| 737 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 738 | len = ecc_bytes; |
| 739 | |
| 740 | if (pos >= writesize) |
| 741 | pos += oob_skip; |
| 742 | else if (pos + len > writesize) |
| 743 | len = writesize - pos; |
| 744 | |
| 745 | memcpy(oob, tmp_buf + pos, len); |
| 746 | oob += len; |
| 747 | if (len < ecc_bytes) { |
| 748 | len = ecc_bytes - len; |
| 749 | memcpy(oob, tmp_buf + writesize + oob_skip, |
| 750 | len); |
| 751 | oob += len; |
| 752 | } |
| 753 | } |
| 754 | |
| 755 | /* OOB free */ |
| 756 | len = oobsize - (oob - chip->oob_poi); |
| 757 | memcpy(oob, tmp_buf + size - len, len); |
| 758 | } |
| 759 | |
| 760 | return 0; |
| 761 | } |
| 762 | |
| 763 | static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 764 | int page) |
| 765 | { |
| 766 | denali_oob_xfer(mtd, chip, page, 0); |
| 767 | |
| 768 | return 0; |
| 769 | } |
| 770 | |
| 771 | static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, |
| 772 | int page) |
| 773 | { |
| 774 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 775 | int status; |
| 776 | |
| 777 | denali_reset_irq(denali); |
| 778 | |
| 779 | denali_oob_xfer(mtd, chip, page, 1); |
| 780 | |
| 781 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); |
| 782 | status = chip->waitfunc(mtd, chip); |
| 783 | |
| 784 | return status & NAND_STATUS_FAIL ? -EIO : 0; |
| 785 | } |
| 786 | |
| 787 | static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 788 | uint8_t *buf, int oob_required, int page) |
| 789 | { |
| 790 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 791 | unsigned long uncor_ecc_flags = 0; |
| 792 | int stat = 0; |
| 793 | int ret; |
| 794 | |
| 795 | ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); |
| 796 | if (ret && ret != -EBADMSG) |
| 797 | return ret; |
| 798 | |
| 799 | if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) |
| 800 | stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); |
| 801 | else if (ret == -EBADMSG) |
| 802 | stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); |
| 803 | |
| 804 | if (stat < 0) |
| 805 | return stat; |
| 806 | |
| 807 | if (uncor_ecc_flags) { |
| 808 | ret = denali_read_oob(mtd, chip, page); |
| 809 | if (ret) |
| 810 | return ret; |
| 811 | |
| 812 | stat = denali_check_erased_page(mtd, chip, buf, |
| 813 | uncor_ecc_flags, stat); |
| 814 | } |
| 815 | |
| 816 | return stat; |
| 817 | } |
| 818 | |
| 819 | static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
| 820 | const uint8_t *buf, int oob_required, int page) |
| 821 | { |
| 822 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 823 | int writesize = mtd->writesize; |
| 824 | int oobsize = mtd->oobsize; |
| 825 | int ecc_steps = chip->ecc.steps; |
| 826 | int ecc_size = chip->ecc.size; |
| 827 | int ecc_bytes = chip->ecc.bytes; |
| 828 | void *tmp_buf = denali->buf; |
| 829 | int oob_skip = denali->oob_skip_bytes; |
| 830 | size_t size = writesize + oobsize; |
| 831 | int i, pos, len; |
| 832 | |
| 833 | /* |
| 834 | * Fill the buffer with 0xff first except the full page transfer. |
| 835 | * This simplifies the logic. |
| 836 | */ |
| 837 | if (!buf || !oob_required) |
| 838 | memset(tmp_buf, 0xff, size); |
| 839 | |
| 840 | /* Arrange the buffer for syndrome payload/ecc layout */ |
| 841 | if (buf) { |
| 842 | for (i = 0; i < ecc_steps; i++) { |
| 843 | pos = i * (ecc_size + ecc_bytes); |
| 844 | len = ecc_size; |
| 845 | |
| 846 | if (pos >= writesize) |
| 847 | pos += oob_skip; |
| 848 | else if (pos + len > writesize) |
| 849 | len = writesize - pos; |
| 850 | |
| 851 | memcpy(tmp_buf + pos, buf, len); |
| 852 | buf += len; |
| 853 | if (len < ecc_size) { |
| 854 | len = ecc_size - len; |
| 855 | memcpy(tmp_buf + writesize + oob_skip, buf, |
| 856 | len); |
| 857 | buf += len; |
| 858 | } |
| 859 | } |
| 860 | } |
| 861 | |
| 862 | if (oob_required) { |
| 863 | const uint8_t *oob = chip->oob_poi; |
| 864 | |
| 865 | /* BBM at the beginning of the OOB area */ |
| 866 | memcpy(tmp_buf + writesize, oob, oob_skip); |
| 867 | oob += oob_skip; |
| 868 | |
| 869 | /* OOB ECC */ |
| 870 | for (i = 0; i < ecc_steps; i++) { |
| 871 | pos = ecc_size + i * (ecc_size + ecc_bytes); |
| 872 | len = ecc_bytes; |
| 873 | |
| 874 | if (pos >= writesize) |
| 875 | pos += oob_skip; |
| 876 | else if (pos + len > writesize) |
| 877 | len = writesize - pos; |
| 878 | |
| 879 | memcpy(tmp_buf + pos, oob, len); |
| 880 | oob += len; |
| 881 | if (len < ecc_bytes) { |
| 882 | len = ecc_bytes - len; |
| 883 | memcpy(tmp_buf + writesize + oob_skip, oob, |
| 884 | len); |
| 885 | oob += len; |
| 886 | } |
| 887 | } |
| 888 | |
| 889 | /* OOB free */ |
| 890 | len = oobsize - (oob - chip->oob_poi); |
| 891 | memcpy(tmp_buf + size - len, oob, len); |
| 892 | } |
| 893 | |
| 894 | return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); |
| 895 | } |
| 896 | |
| 897 | static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
| 898 | const uint8_t *buf, int oob_required, int page) |
| 899 | { |
| 900 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 901 | |
| 902 | return denali_data_xfer(denali, (void *)buf, mtd->writesize, |
| 903 | page, 0, 1); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 904 | } |
| 905 | |
| 906 | static void denali_select_chip(struct mtd_info *mtd, int chip) |
| 907 | { |
| 908 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 909 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 910 | denali->active_bank = chip; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 911 | } |
| 912 | |
| 913 | static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) |
| 914 | { |
| 915 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 916 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 917 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 918 | /* R/B# pin transitioned from low to high? */ |
| 919 | irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 920 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 921 | return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 922 | } |
| 923 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 924 | static int denali_erase(struct mtd_info *mtd, int page) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 925 | { |
| 926 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 927 | uint32_t irq_status; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 928 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 929 | denali_reset_irq(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 930 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 931 | denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, |
| 932 | DENALI_ERASE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 933 | |
| 934 | /* wait for erase to complete or failure to occur */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 935 | irq_status = denali_wait_for_irq(denali, |
| 936 | INTR__ERASE_COMP | INTR__ERASE_FAIL); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 937 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 938 | return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; |
| 939 | } |
| 940 | |
Masahiro Yamada | 1a7e176 | 2017-11-29 19:18:18 +0900 | [diff] [blame] | 941 | static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 942 | const struct nand_data_interface *conf) |
| 943 | { |
| 944 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 945 | const struct nand_sdr_timings *timings; |
| 946 | unsigned long t_clk; |
| 947 | int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; |
| 948 | int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; |
| 949 | int addr_2_data_mask; |
| 950 | uint32_t tmp; |
| 951 | |
| 952 | timings = nand_get_sdr_timings(conf); |
| 953 | if (IS_ERR(timings)) |
| 954 | return PTR_ERR(timings); |
| 955 | |
| 956 | /* clk_x period in picoseconds */ |
| 957 | t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); |
| 958 | if (!t_clk) |
| 959 | return -EINVAL; |
| 960 | |
| 961 | if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) |
| 962 | return 0; |
| 963 | |
| 964 | /* tREA -> ACC_CLKS */ |
| 965 | acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk); |
| 966 | acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); |
| 967 | |
| 968 | tmp = ioread32(denali->reg + ACC_CLKS); |
| 969 | tmp &= ~ACC_CLKS__VALUE; |
| 970 | tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); |
| 971 | iowrite32(tmp, denali->reg + ACC_CLKS); |
| 972 | |
| 973 | /* tRWH -> RE_2_WE */ |
| 974 | re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk); |
| 975 | re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); |
| 976 | |
| 977 | tmp = ioread32(denali->reg + RE_2_WE); |
| 978 | tmp &= ~RE_2_WE__VALUE; |
| 979 | tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); |
| 980 | iowrite32(tmp, denali->reg + RE_2_WE); |
| 981 | |
| 982 | /* tRHZ -> RE_2_RE */ |
| 983 | re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk); |
| 984 | re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); |
| 985 | |
| 986 | tmp = ioread32(denali->reg + RE_2_RE); |
| 987 | tmp &= ~RE_2_RE__VALUE; |
| 988 | tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); |
| 989 | iowrite32(tmp, denali->reg + RE_2_RE); |
| 990 | |
| 991 | /* |
| 992 | * tCCS, tWHR -> WE_2_RE |
| 993 | * |
| 994 | * With WE_2_RE properly set, the Denali controller automatically takes |
| 995 | * care of the delay; the driver need not set NAND_WAIT_TCCS. |
| 996 | */ |
| 997 | we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), |
| 998 | t_clk); |
| 999 | we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); |
| 1000 | |
| 1001 | tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); |
| 1002 | tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; |
| 1003 | tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); |
| 1004 | iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); |
| 1005 | |
| 1006 | /* tADL -> ADDR_2_DATA */ |
| 1007 | |
| 1008 | /* for older versions, ADDR_2_DATA is only 6 bit wide */ |
| 1009 | addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 1010 | if (denali->revision < 0x0501) |
| 1011 | addr_2_data_mask >>= 1; |
| 1012 | |
| 1013 | addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk); |
| 1014 | addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); |
| 1015 | |
| 1016 | tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1017 | tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; |
| 1018 | tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); |
| 1019 | iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); |
| 1020 | |
| 1021 | /* tREH, tWH -> RDWR_EN_HI_CNT */ |
| 1022 | rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), |
| 1023 | t_clk); |
| 1024 | rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); |
| 1025 | |
| 1026 | tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); |
| 1027 | tmp &= ~RDWR_EN_HI_CNT__VALUE; |
| 1028 | tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); |
| 1029 | iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); |
| 1030 | |
| 1031 | /* tRP, tWP -> RDWR_EN_LO_CNT */ |
| 1032 | rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), |
| 1033 | t_clk); |
| 1034 | rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), |
| 1035 | t_clk); |
| 1036 | rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT); |
| 1037 | rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); |
| 1038 | rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); |
| 1039 | |
| 1040 | tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); |
| 1041 | tmp &= ~RDWR_EN_LO_CNT__VALUE; |
| 1042 | tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); |
| 1043 | iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); |
| 1044 | |
| 1045 | /* tCS, tCEA -> CS_SETUP_CNT */ |
| 1046 | cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo, |
| 1047 | (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks, |
| 1048 | 0); |
| 1049 | cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); |
| 1050 | |
| 1051 | tmp = ioread32(denali->reg + CS_SETUP_CNT); |
| 1052 | tmp &= ~CS_SETUP_CNT__VALUE; |
| 1053 | tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); |
| 1054 | iowrite32(tmp, denali->reg + CS_SETUP_CNT); |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1055 | |
| 1056 | return 0; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1057 | } |
| 1058 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1059 | static void denali_reset_banks(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1060 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1061 | u32 irq_status; |
| 1062 | int i; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1063 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1064 | for (i = 0; i < denali->max_banks; i++) { |
| 1065 | denali->active_bank = i; |
| 1066 | |
| 1067 | denali_reset_irq(denali); |
| 1068 | |
| 1069 | iowrite32(DEVICE_RESET__BANK(i), |
| 1070 | denali->reg + DEVICE_RESET); |
| 1071 | |
| 1072 | irq_status = denali_wait_for_irq(denali, |
| 1073 | INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); |
| 1074 | if (!(irq_status & INTR__INT_ACT)) |
| 1075 | break; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1076 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1077 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1078 | dev_dbg(denali->dev, "%d chips connected\n", i); |
| 1079 | denali->max_banks = i; |
| 1080 | } |
| 1081 | |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1082 | static void denali_hw_init(struct denali_nand_info *denali) |
| 1083 | { |
| 1084 | /* |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1085 | * The REVISION register may not be reliable. Platforms are allowed to |
| 1086 | * override it. |
| 1087 | */ |
| 1088 | if (!denali->revision) |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1089 | denali->revision = swab16(ioread32(denali->reg + REVISION)); |
Masahiro Yamada | 6c71b6f | 2017-09-15 21:43:19 +0900 | [diff] [blame] | 1090 | |
| 1091 | /* |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1092 | * tell driver how many bit controller will skip before writing |
| 1093 | * ECC code in OOB. This is normally used for bad block marker |
| 1094 | */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1095 | denali->oob_skip_bytes = CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES; |
| 1096 | iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES); |
| 1097 | denali_detect_max_banks(denali); |
| 1098 | iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); |
| 1099 | iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1100 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1101 | iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); |
| 1102 | } |
| 1103 | |
| 1104 | int denali_calc_ecc_bytes(int step_size, int strength) |
| 1105 | { |
| 1106 | /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ |
| 1107 | return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; |
| 1108 | } |
| 1109 | EXPORT_SYMBOL(denali_calc_ecc_bytes); |
| 1110 | |
| 1111 | static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, |
| 1112 | struct denali_nand_info *denali) |
| 1113 | { |
| 1114 | int oobavail = mtd->oobsize - denali->oob_skip_bytes; |
| 1115 | int ret; |
| 1116 | |
| 1117 | /* |
| 1118 | * If .size and .strength are already set (usually by DT), |
| 1119 | * check if they are supported by this controller. |
| 1120 | */ |
| 1121 | if (chip->ecc.size && chip->ecc.strength) |
| 1122 | return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); |
| 1123 | |
| 1124 | /* |
| 1125 | * We want .size and .strength closest to the chip's requirement |
| 1126 | * unless NAND_ECC_MAXIMIZE is requested. |
| 1127 | */ |
| 1128 | if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { |
| 1129 | ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); |
| 1130 | if (!ret) |
| 1131 | return 0; |
| 1132 | } |
| 1133 | |
| 1134 | /* Max ECC strength is the last thing we can do */ |
| 1135 | return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1136 | } |
| 1137 | |
| 1138 | static struct nand_ecclayout nand_oob; |
| 1139 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1140 | static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, |
| 1141 | struct mtd_oob_region *oobregion) |
| 1142 | { |
| 1143 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1144 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1145 | |
| 1146 | if (section) |
| 1147 | return -ERANGE; |
| 1148 | |
| 1149 | oobregion->offset = denali->oob_skip_bytes; |
| 1150 | oobregion->length = chip->ecc.total; |
| 1151 | |
| 1152 | return 0; |
| 1153 | } |
| 1154 | |
| 1155 | static int denali_ooblayout_free(struct mtd_info *mtd, int section, |
| 1156 | struct mtd_oob_region *oobregion) |
| 1157 | { |
| 1158 | struct denali_nand_info *denali = mtd_to_denali(mtd); |
| 1159 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1160 | |
| 1161 | if (section) |
| 1162 | return -ERANGE; |
| 1163 | |
| 1164 | oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; |
| 1165 | oobregion->length = mtd->oobsize - oobregion->offset; |
| 1166 | |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
| 1170 | static const struct mtd_ooblayout_ops denali_ooblayout_ops = { |
| 1171 | .ecc = denali_ooblayout_ecc, |
| 1172 | .free = denali_ooblayout_free, |
| 1173 | }; |
| 1174 | |
| 1175 | static int denali_multidev_fixup(struct denali_nand_info *denali) |
| 1176 | { |
| 1177 | struct nand_chip *chip = &denali->nand; |
| 1178 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1179 | |
| 1180 | /* |
| 1181 | * Support for multi device: |
| 1182 | * When the IP configuration is x16 capable and two x8 chips are |
| 1183 | * connected in parallel, DEVICES_CONNECTED should be set to 2. |
| 1184 | * In this case, the core framework knows nothing about this fact, |
| 1185 | * so we should tell it the _logical_ pagesize and anything necessary. |
| 1186 | */ |
| 1187 | denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); |
| 1188 | |
| 1189 | /* |
| 1190 | * On some SoCs, DEVICES_CONNECTED is not auto-detected. |
| 1191 | * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. |
| 1192 | */ |
| 1193 | if (denali->devs_per_cs == 0) { |
| 1194 | denali->devs_per_cs = 1; |
| 1195 | iowrite32(1, denali->reg + DEVICES_CONNECTED); |
| 1196 | } |
| 1197 | |
| 1198 | if (denali->devs_per_cs == 1) |
| 1199 | return 0; |
| 1200 | |
| 1201 | if (denali->devs_per_cs != 2) { |
| 1202 | dev_err(denali->dev, "unsupported number of devices %d\n", |
| 1203 | denali->devs_per_cs); |
| 1204 | return -EINVAL; |
| 1205 | } |
| 1206 | |
| 1207 | /* 2 chips in parallel */ |
| 1208 | mtd->size <<= 1; |
| 1209 | mtd->erasesize <<= 1; |
| 1210 | mtd->writesize <<= 1; |
| 1211 | mtd->oobsize <<= 1; |
| 1212 | chip->chipsize <<= 1; |
| 1213 | chip->page_shift += 1; |
| 1214 | chip->phys_erase_shift += 1; |
| 1215 | chip->bbt_erase_shift += 1; |
| 1216 | chip->chip_shift += 1; |
| 1217 | chip->pagemask <<= 1; |
| 1218 | chip->ecc.size <<= 1; |
| 1219 | chip->ecc.bytes <<= 1; |
| 1220 | chip->ecc.strength <<= 1; |
| 1221 | denali->oob_skip_bytes <<= 1; |
| 1222 | |
| 1223 | return 0; |
| 1224 | } |
| 1225 | |
Masahiro Yamada | 1d9654d | 2017-08-26 01:12:31 +0900 | [diff] [blame] | 1226 | int denali_init(struct denali_nand_info *denali) |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1227 | { |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1228 | struct nand_chip *chip = &denali->nand; |
| 1229 | struct mtd_info *mtd = nand_to_mtd(chip); |
| 1230 | u32 features = ioread32(denali->reg + FEATURES); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1231 | int ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1232 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1233 | denali_hw_init(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1234 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1235 | denali_clear_irq_all(denali); |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1236 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1237 | denali_reset_banks(denali); |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1238 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1239 | denali->active_bank = DENALI_INVALID_BANK; |
| 1240 | |
| 1241 | chip->flash_node = dev_of_offset(denali->dev); |
| 1242 | /* Fallback to the default name if DT did not give "label" property */ |
| 1243 | if (!mtd->name) |
| 1244 | mtd->name = "denali-nand"; |
| 1245 | |
| 1246 | chip->select_chip = denali_select_chip; |
| 1247 | chip->read_byte = denali_read_byte; |
| 1248 | chip->write_byte = denali_write_byte; |
| 1249 | chip->read_word = denali_read_word; |
| 1250 | chip->cmd_ctrl = denali_cmd_ctrl; |
| 1251 | chip->dev_ready = denali_dev_ready; |
| 1252 | chip->waitfunc = denali_waitfunc; |
| 1253 | |
| 1254 | if (features & FEATURES__INDEX_ADDR) { |
| 1255 | denali->host_read = denali_indexed_read; |
| 1256 | denali->host_write = denali_indexed_write; |
| 1257 | } else { |
| 1258 | denali->host_read = denali_direct_read; |
| 1259 | denali->host_write = denali_direct_write; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1260 | } |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1261 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1262 | /* clk rate info is needed for setup_data_interface */ |
| 1263 | if (denali->clk_x_rate) |
| 1264 | chip->setup_data_interface = denali_setup_data_interface; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1265 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1266 | ret = nand_scan_ident(mtd, denali->max_banks, NULL); |
| 1267 | if (ret) |
| 1268 | return ret; |
| 1269 | |
| 1270 | if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) |
| 1271 | denali->dma_avail = 1; |
| 1272 | |
| 1273 | if (denali->dma_avail) { |
| 1274 | chip->buf_align = 16; |
| 1275 | if (denali->caps & DENALI_CAP_DMA_64BIT) |
| 1276 | denali->setup_dma = denali_setup_dma64; |
| 1277 | else |
| 1278 | denali->setup_dma = denali_setup_dma32; |
| 1279 | } else { |
| 1280 | chip->buf_align = 4; |
| 1281 | } |
| 1282 | |
| 1283 | chip->options |= NAND_USE_BOUNCE_BUFFER; |
| 1284 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
| 1285 | chip->bbt_options |= NAND_BBT_NO_OOB; |
| 1286 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1287 | |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1288 | /* no subpage writes on denali */ |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1289 | chip->options |= NAND_NO_SUBPAGE_WRITE; |
Scott Wood | d396372 | 2015-06-26 19:03:26 -0500 | [diff] [blame] | 1290 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1291 | ret = denali_ecc_setup(mtd, chip, denali); |
| 1292 | if (ret) { |
| 1293 | dev_err(denali->dev, "Failed to setup ECC settings.\n"); |
| 1294 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1295 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1296 | |
| 1297 | dev_dbg(denali->dev, |
| 1298 | "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", |
| 1299 | chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); |
| 1300 | |
| 1301 | iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | |
| 1302 | FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), |
| 1303 | denali->reg + ECC_CORRECTION); |
| 1304 | iowrite32(mtd->erasesize / mtd->writesize, |
| 1305 | denali->reg + PAGES_PER_BLOCK); |
| 1306 | iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, |
| 1307 | denali->reg + DEVICE_WIDTH); |
| 1308 | iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, |
| 1309 | denali->reg + TWO_ROW_ADDR_CYCLES); |
| 1310 | iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); |
| 1311 | iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); |
| 1312 | |
| 1313 | iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); |
| 1314 | iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); |
| 1315 | /* chip->ecc.steps is set by nand_scan_tail(); not available here */ |
| 1316 | iowrite32(mtd->writesize / chip->ecc.size, |
| 1317 | denali->reg + CFG_NUM_DATA_BLOCKS); |
| 1318 | |
| 1319 | mtd_set_ooblayout(mtd, &denali_ooblayout_ops); |
| 1320 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1321 | nand_oob.eccbytes = denali->nand.ecc.bytes; |
| 1322 | denali->nand.ecc.layout = &nand_oob; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1323 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1324 | if (chip->options & NAND_BUSWIDTH_16) { |
| 1325 | chip->read_buf = denali_read_buf16; |
| 1326 | chip->write_buf = denali_write_buf16; |
| 1327 | } else { |
| 1328 | chip->read_buf = denali_read_buf; |
| 1329 | chip->write_buf = denali_write_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1330 | } |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1331 | chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; |
| 1332 | chip->ecc.read_page = denali_read_page; |
| 1333 | chip->ecc.read_page_raw = denali_read_page_raw; |
| 1334 | chip->ecc.write_page = denali_write_page; |
| 1335 | chip->ecc.write_page_raw = denali_write_page_raw; |
| 1336 | chip->ecc.read_oob = denali_read_oob; |
| 1337 | chip->ecc.write_oob = denali_write_oob; |
| 1338 | chip->erase = denali_erase; |
| 1339 | |
| 1340 | ret = denali_multidev_fixup(denali); |
| 1341 | if (ret) |
| 1342 | return ret; |
| 1343 | |
| 1344 | /* |
| 1345 | * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not |
| 1346 | * use devm_kmalloc() because the memory allocated by devm_ does not |
| 1347 | * guarantee DMA-safe alignment. |
| 1348 | */ |
| 1349 | denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); |
| 1350 | if (!denali->buf) |
| 1351 | return -ENOMEM; |
| 1352 | |
| 1353 | ret = nand_scan_tail(mtd); |
| 1354 | if (ret) |
| 1355 | goto free_buf; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1356 | |
Scott Wood | ceee07b | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 1357 | ret = nand_register(0, mtd); |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1358 | if (ret) { |
| 1359 | dev_err(denali->dev, "Failed to register MTD: %d\n", ret); |
| 1360 | goto free_buf; |
| 1361 | } |
| 1362 | return 0; |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1363 | |
Masahiro Yamada | 350d052 | 2017-11-22 02:38:32 +0900 | [diff] [blame] | 1364 | free_buf: |
| 1365 | kfree(denali->buf); |
| 1366 | |
Masahiro Yamada | 65e4145 | 2014-11-13 20:31:50 +0900 | [diff] [blame] | 1367 | return ret; |
Chin Liang See | 3eb3e72 | 2014-09-12 00:42:17 -0500 | [diff] [blame] | 1368 | } |