blob: 47cf37d1d9b7d2934d1c5e04a0aef0361758c907 [file] [log] [blame]
Chin Liang See3eb3e722014-09-12 00:42:17 -05001/*
2 * Copyright (C) 2014 Panasonic Corporation
3 * Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
4 * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <malloc.h>
11#include <nand.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090012#include <linux/errno.h>
Chin Liang See3eb3e722014-09-12 00:42:17 -050013#include <asm/io.h>
14
15#include "denali.h"
16
17#define NAND_DEFAULT_TIMINGS -1
18
19static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
20
Scott Woodd3963722015-06-26 19:03:26 -050021/*
22 * We define a macro here that combines all interrupts this driver uses into
23 * a single constant value, for convenience.
24 */
Chin Liang See3eb3e722014-09-12 00:42:17 -050025#define DENALI_IRQ_ALL (INTR_STATUS__DMA_CMD_COMP | \
26 INTR_STATUS__ECC_TRANSACTION_DONE | \
27 INTR_STATUS__ECC_ERR | \
28 INTR_STATUS__PROGRAM_FAIL | \
29 INTR_STATUS__LOAD_COMP | \
30 INTR_STATUS__PROGRAM_COMP | \
31 INTR_STATUS__TIME_OUT | \
32 INTR_STATUS__ERASE_FAIL | \
33 INTR_STATUS__RST_COMP | \
34 INTR_STATUS__ERASE_COMP | \
35 INTR_STATUS__ECC_UNCOR_ERR | \
36 INTR_STATUS__INT_ACT | \
37 INTR_STATUS__LOCKED_BLK)
38
Scott Woodd3963722015-06-26 19:03:26 -050039/*
40 * indicates whether or not the internal value for the flash bank is
41 * valid or not
42 */
Chin Liang See3eb3e722014-09-12 00:42:17 -050043#define CHIP_SELECT_INVALID -1
44
45#define SUPPORT_8BITECC 1
46
47/*
48 * this macro allows us to convert from an MTD structure to our own
49 * device context (denali) structure.
50 */
Scott Woodceee07b2016-05-30 13:57:58 -050051static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
52{
53 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
54}
Chin Liang See3eb3e722014-09-12 00:42:17 -050055
Scott Woodd3963722015-06-26 19:03:26 -050056/*
57 * These constants are defined by the driver to enable common driver
58 * configuration options.
59 */
Chin Liang See3eb3e722014-09-12 00:42:17 -050060#define SPARE_ACCESS 0x41
61#define MAIN_ACCESS 0x42
62#define MAIN_SPARE_ACCESS 0x43
Scott Woodd3963722015-06-26 19:03:26 -050063#define PIPELINE_ACCESS 0x2000
Chin Liang See3eb3e722014-09-12 00:42:17 -050064
65#define DENALI_UNLOCK_START 0x10
66#define DENALI_UNLOCK_END 0x11
67#define DENALI_LOCK 0x21
68#define DENALI_LOCK_TIGHT 0x31
69#define DENALI_BUFFER_LOAD 0x60
70#define DENALI_BUFFER_WRITE 0x62
71
72#define DENALI_READ 0
73#define DENALI_WRITE 0x100
74
75/* types of device accesses. We can issue commands and get status */
76#define COMMAND_CYCLE 0
77#define ADDR_CYCLE 1
78#define STATUS_CYCLE 2
79
Scott Woodd3963722015-06-26 19:03:26 -050080/*
81 * this is a helper macro that allows us to
82 * format the bank into the proper bits for the controller
83 */
Chin Liang See3eb3e722014-09-12 00:42:17 -050084#define BANK(x) ((x) << 24)
85
86/* Interrupts are cleared by writing a 1 to the appropriate status bit */
87static inline void clear_interrupt(struct denali_nand_info *denali,
88 uint32_t irq_mask)
89{
90 uint32_t intr_status_reg;
91
92 intr_status_reg = INTR_STATUS(denali->flash_bank);
93
94 writel(irq_mask, denali->flash_reg + intr_status_reg);
95}
96
97static uint32_t read_interrupt_status(struct denali_nand_info *denali)
98{
99 uint32_t intr_status_reg;
100
101 intr_status_reg = INTR_STATUS(denali->flash_bank);
102
103 return readl(denali->flash_reg + intr_status_reg);
104}
105
106static void clear_interrupts(struct denali_nand_info *denali)
107{
108 uint32_t status;
109
110 status = read_interrupt_status(denali);
111 clear_interrupt(denali, status);
112
113 denali->irq_status = 0;
114}
115
116static void denali_irq_enable(struct denali_nand_info *denali,
117 uint32_t int_mask)
118{
119 int i;
120
121 for (i = 0; i < denali->max_banks; ++i)
122 writel(int_mask, denali->flash_reg + INTR_EN(i));
123}
124
125static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
126{
127 unsigned long timeout = 1000000;
128 uint32_t intr_status;
129
130 do {
131 intr_status = read_interrupt_status(denali) & DENALI_IRQ_ALL;
132 if (intr_status & irq_mask) {
133 denali->irq_status &= ~irq_mask;
134 /* our interrupt was detected */
135 break;
136 }
137 udelay(1);
138 timeout--;
139 } while (timeout != 0);
140
141 if (timeout == 0) {
142 /* timeout */
143 printf("Denali timeout with interrupt status %08x\n",
144 read_interrupt_status(denali));
145 intr_status = 0;
146 }
147 return intr_status;
148}
149
150/*
151 * Certain operations for the denali NAND controller use an indexed mode to
152 * read/write data. The operation is performed by writing the address value
153 * of the command to the device memory followed by the data. This function
154 * abstracts this common operation.
Scott Woodd3963722015-06-26 19:03:26 -0500155 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500156static void index_addr(struct denali_nand_info *denali,
157 uint32_t address, uint32_t data)
158{
159 writel(address, denali->flash_mem + INDEX_CTRL_REG);
160 writel(data, denali->flash_mem + INDEX_DATA_REG);
161}
162
163/* Perform an indexed read of the device */
164static void index_addr_read_data(struct denali_nand_info *denali,
165 uint32_t address, uint32_t *pdata)
166{
167 writel(address, denali->flash_mem + INDEX_CTRL_REG);
168 *pdata = readl(denali->flash_mem + INDEX_DATA_REG);
169}
170
Scott Woodd3963722015-06-26 19:03:26 -0500171/*
172 * We need to buffer some data for some of the NAND core routines.
173 * The operations manage buffering that data.
174 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500175static void reset_buf(struct denali_nand_info *denali)
176{
177 denali->buf.head = 0;
178 denali->buf.tail = 0;
179}
180
181static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
182{
183 denali->buf.buf[denali->buf.tail++] = byte;
184}
185
186/* resets a specific device connected to the core */
187static void reset_bank(struct denali_nand_info *denali)
188{
189 uint32_t irq_status;
Scott Woodd3963722015-06-26 19:03:26 -0500190 uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500191
192 clear_interrupts(denali);
193
194 writel(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
195
196 irq_status = wait_for_irq(denali, irq_mask);
197 if (irq_status & INTR_STATUS__TIME_OUT)
198 debug("reset bank failed.\n");
199}
200
201/* Reset the flash controller */
202static uint32_t denali_nand_reset(struct denali_nand_info *denali)
203{
Scott Woodd3963722015-06-26 19:03:26 -0500204 int i;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500205
206 for (i = 0; i < denali->max_banks; i++)
207 writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
208 denali->flash_reg + INTR_STATUS(i));
209
210 for (i = 0; i < denali->max_banks; i++) {
211 writel(1 << i, denali->flash_reg + DEVICE_RESET);
212 while (!(readl(denali->flash_reg + INTR_STATUS(i)) &
213 (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
214 if (readl(denali->flash_reg + INTR_STATUS(i)) &
215 INTR_STATUS__TIME_OUT)
216 debug("NAND Reset operation timed out on bank"
217 " %d\n", i);
218 }
219
220 for (i = 0; i < denali->max_banks; i++)
221 writel(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
222 denali->flash_reg + INTR_STATUS(i));
223
224 return 0;
225}
226
227/*
228 * this routine calculates the ONFI timing values for a given mode and
229 * programs the clocking register accordingly. The mode is determined by
230 * the get_onfi_nand_para routine.
231 */
232static void nand_onfi_timing_set(struct denali_nand_info *denali,
233 uint16_t mode)
234{
235 uint32_t trea[6] = {40, 30, 25, 20, 20, 16};
236 uint32_t trp[6] = {50, 25, 17, 15, 12, 10};
237 uint32_t treh[6] = {30, 15, 15, 10, 10, 7};
238 uint32_t trc[6] = {100, 50, 35, 30, 25, 20};
239 uint32_t trhoh[6] = {0, 15, 15, 15, 15, 15};
240 uint32_t trloh[6] = {0, 0, 0, 0, 5, 5};
241 uint32_t tcea[6] = {100, 45, 30, 25, 25, 25};
242 uint32_t tadl[6] = {200, 100, 100, 100, 70, 70};
243 uint32_t trhw[6] = {200, 100, 100, 100, 100, 100};
244 uint32_t trhz[6] = {200, 100, 100, 100, 100, 100};
245 uint32_t twhr[6] = {120, 80, 80, 60, 60, 60};
246 uint32_t tcs[6] = {70, 35, 25, 25, 20, 15};
247
Chin Liang See3eb3e722014-09-12 00:42:17 -0500248 uint32_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
249 uint32_t dv_window = 0;
250 uint32_t en_lo, en_hi;
251 uint32_t acc_clks;
252 uint32_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
253
254 en_lo = DIV_ROUND_UP(trp[mode], CLK_X);
255 en_hi = DIV_ROUND_UP(treh[mode], CLK_X);
256 if ((en_hi * CLK_X) < (treh[mode] + 2))
257 en_hi++;
258
259 if ((en_lo + en_hi) * CLK_X < trc[mode])
260 en_lo += DIV_ROUND_UP((trc[mode] - (en_lo + en_hi) * CLK_X),
261 CLK_X);
262
263 if ((en_lo + en_hi) < CLK_MULTI)
264 en_lo += CLK_MULTI - en_lo - en_hi;
265
266 while (dv_window < 8) {
267 data_invalid_rhoh = en_lo * CLK_X + trhoh[mode];
268
269 data_invalid_rloh = (en_lo + en_hi) * CLK_X + trloh[mode];
270
Scott Woodd3963722015-06-26 19:03:26 -0500271 data_invalid = data_invalid_rhoh < data_invalid_rloh ?
272 data_invalid_rhoh : data_invalid_rloh;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500273
274 dv_window = data_invalid - trea[mode];
275
276 if (dv_window < 8)
277 en_lo++;
278 }
279
280 acc_clks = DIV_ROUND_UP(trea[mode], CLK_X);
281
Scott Woodd3963722015-06-26 19:03:26 -0500282 while (acc_clks * CLK_X - trea[mode] < 3)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500283 acc_clks++;
284
Scott Woodd3963722015-06-26 19:03:26 -0500285 if (data_invalid - acc_clks * CLK_X < 2)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500286 debug("%s, Line %d: Warning!\n", __FILE__, __LINE__);
287
288 addr_2_data = DIV_ROUND_UP(tadl[mode], CLK_X);
289 re_2_we = DIV_ROUND_UP(trhw[mode], CLK_X);
290 re_2_re = DIV_ROUND_UP(trhz[mode], CLK_X);
291 we_2_re = DIV_ROUND_UP(twhr[mode], CLK_X);
292 cs_cnt = DIV_ROUND_UP((tcs[mode] - trp[mode]), CLK_X);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500293 if (cs_cnt == 0)
294 cs_cnt = 1;
295
296 if (tcea[mode]) {
Scott Woodd3963722015-06-26 19:03:26 -0500297 while (cs_cnt * CLK_X + trea[mode] < tcea[mode])
Chin Liang See3eb3e722014-09-12 00:42:17 -0500298 cs_cnt++;
299 }
300
301 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
Scott Woodd3963722015-06-26 19:03:26 -0500302 if (readl(denali->flash_reg + MANUFACTURER_ID) == 0 &&
303 readl(denali->flash_reg + DEVICE_ID) == 0x88)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500304 acc_clks = 6;
305
306 writel(acc_clks, denali->flash_reg + ACC_CLKS);
307 writel(re_2_we, denali->flash_reg + RE_2_WE);
308 writel(re_2_re, denali->flash_reg + RE_2_RE);
309 writel(we_2_re, denali->flash_reg + WE_2_RE);
310 writel(addr_2_data, denali->flash_reg + ADDR_2_DATA);
311 writel(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
312 writel(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
313 writel(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
314}
315
316/* queries the NAND device to see what ONFI modes it supports. */
317static uint32_t get_onfi_nand_para(struct denali_nand_info *denali)
318{
319 int i;
Scott Woodd3963722015-06-26 19:03:26 -0500320
Chin Liang See3eb3e722014-09-12 00:42:17 -0500321 /*
322 * we needn't to do a reset here because driver has already
323 * reset all the banks before
324 */
325 if (!(readl(denali->flash_reg + ONFI_TIMING_MODE) &
326 ONFI_TIMING_MODE__VALUE))
327 return -EIO;
328
329 for (i = 5; i > 0; i--) {
330 if (readl(denali->flash_reg + ONFI_TIMING_MODE) &
331 (0x01 << i))
332 break;
333 }
334
335 nand_onfi_timing_set(denali, i);
336
Scott Woodd3963722015-06-26 19:03:26 -0500337 /*
338 * By now, all the ONFI devices we know support the page cache
339 * rw feature. So here we enable the pipeline_rw_ahead feature
340 */
341
Chin Liang See3eb3e722014-09-12 00:42:17 -0500342 return 0;
343}
344
345static void get_samsung_nand_para(struct denali_nand_info *denali,
346 uint8_t device_id)
347{
348 if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
349 /* Set timing register values according to datasheet */
350 writel(5, denali->flash_reg + ACC_CLKS);
351 writel(20, denali->flash_reg + RE_2_WE);
352 writel(12, denali->flash_reg + WE_2_RE);
353 writel(14, denali->flash_reg + ADDR_2_DATA);
354 writel(3, denali->flash_reg + RDWR_EN_LO_CNT);
355 writel(2, denali->flash_reg + RDWR_EN_HI_CNT);
356 writel(2, denali->flash_reg + CS_SETUP_CNT);
357 }
358}
359
360static void get_toshiba_nand_para(struct denali_nand_info *denali)
361{
362 uint32_t tmp;
363
Scott Woodd3963722015-06-26 19:03:26 -0500364 /*
365 * Workaround to fix a controller bug which reports a wrong
366 * spare area size for some kind of Toshiba NAND device
367 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500368 if ((readl(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
369 (readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
370 writel(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
371 tmp = readl(denali->flash_reg + DEVICES_CONNECTED) *
372 readl(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
373 writel(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
374 }
375}
376
377static void get_hynix_nand_para(struct denali_nand_info *denali,
378 uint8_t device_id)
379{
380 uint32_t main_size, spare_size;
381
382 switch (device_id) {
383 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
384 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
385 writel(128, denali->flash_reg + PAGES_PER_BLOCK);
386 writel(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
387 writel(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
388 main_size = 4096 *
389 readl(denali->flash_reg + DEVICES_CONNECTED);
390 spare_size = 224 *
391 readl(denali->flash_reg + DEVICES_CONNECTED);
392 writel(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
393 writel(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
394 writel(0, denali->flash_reg + DEVICE_WIDTH);
395 break;
396 default:
Scott Woodd3963722015-06-26 19:03:26 -0500397 debug("Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
Chin Liang See3eb3e722014-09-12 00:42:17 -0500398 "Will use default parameter values instead.\n",
399 device_id);
400 }
401}
402
403/*
404 * determines how many NAND chips are connected to the controller. Note for
405 * Intel CE4100 devices we don't support more than one device.
406 */
407static void find_valid_banks(struct denali_nand_info *denali)
408{
409 uint32_t id[denali->max_banks];
410 int i;
411
412 denali->total_used_banks = 1;
413 for (i = 0; i < denali->max_banks; i++) {
Scott Woodd3963722015-06-26 19:03:26 -0500414 index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
415 index_addr(denali, MODE_11 | (i << 24) | 1, 0);
416 index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500417
418 if (i == 0) {
419 if (!(id[i] & 0x0ff))
420 break;
421 } else {
422 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
423 denali->total_used_banks++;
424 else
425 break;
426 }
427 }
428}
429
430/*
431 * Use the configuration feature register to determine the maximum number of
432 * banks that the hardware supports.
433 */
434static void detect_max_banks(struct denali_nand_info *denali)
435{
436 uint32_t features = readl(denali->flash_reg + FEATURES);
Graham Moore15305c22016-03-24 22:14:35 +0900437 /*
438 * Read the revision register, so we can calculate the max_banks
439 * properly: the encoding changed from rev 5.0 to 5.1
440 */
441 u32 revision = MAKE_COMPARABLE_REVISION(
442 readl(denali->flash_reg + REVISION));
443 if (revision < REVISION_5_1)
444 denali->max_banks = 2 << (features & FEATURES__N_BANKS);
445 else
446 denali->max_banks = 1 << (features & FEATURES__N_BANKS);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500447}
448
449static void detect_partition_feature(struct denali_nand_info *denali)
450{
451 /*
452 * For MRST platform, denali->fwblks represent the
453 * number of blocks firmware is taken,
454 * FW is in protect partition and MTD driver has no
455 * permission to access it. So let driver know how many
456 * blocks it can't touch.
457 */
458 if (readl(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
459 if ((readl(denali->flash_reg + PERM_SRC_ID(1)) &
460 PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
461 denali->fwblks =
462 ((readl(denali->flash_reg + MIN_MAX_BANK(1)) &
463 MIN_MAX_BANK__MIN_VALUE) *
464 denali->blksperchip)
465 +
466 (readl(denali->flash_reg + MIN_BLK_ADDR(1)) &
467 MIN_BLK_ADDR__VALUE);
468 } else {
469 denali->fwblks = SPECTRA_START_BLOCK;
470 }
471 } else {
472 denali->fwblks = SPECTRA_START_BLOCK;
473 }
474}
475
476static uint32_t denali_nand_timing_set(struct denali_nand_info *denali)
477{
Scott Woodd3963722015-06-26 19:03:26 -0500478 uint32_t id_bytes[8], addr;
479 uint8_t maf_id, device_id;
480 int i;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500481
Scott Woodd3963722015-06-26 19:03:26 -0500482 /*
483 * Use read id method to get device ID and other params.
484 * For some NAND chips, controller can't report the correct
485 * device ID by reading from DEVICE_ID register
486 */
487 addr = MODE_11 | BANK(denali->flash_bank);
488 index_addr(denali, addr | 0, 0x90);
489 index_addr(denali, addr | 1, 0);
490 for (i = 0; i < 8; i++)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500491 index_addr_read_data(denali, addr | 2, &id_bytes[i]);
492 maf_id = id_bytes[0];
493 device_id = id_bytes[1];
494
495 if (readl(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
496 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
497 if (get_onfi_nand_para(denali))
498 return -EIO;
499 } else if (maf_id == 0xEC) { /* Samsung NAND */
500 get_samsung_nand_para(denali, device_id);
501 } else if (maf_id == 0x98) { /* Toshiba NAND */
502 get_toshiba_nand_para(denali);
503 } else if (maf_id == 0xAD) { /* Hynix NAND */
504 get_hynix_nand_para(denali, device_id);
505 }
506
507 find_valid_banks(denali);
508
509 detect_partition_feature(denali);
510
Scott Woodd3963722015-06-26 19:03:26 -0500511 /*
512 * If the user specified to override the default timings
Chin Liang See3eb3e722014-09-12 00:42:17 -0500513 * with a specific ONFI mode, we apply those changes here.
514 */
515 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
516 nand_onfi_timing_set(denali, onfi_timing_mode);
517
518 return 0;
519}
520
Scott Woodd3963722015-06-26 19:03:26 -0500521/*
522 * validation function to verify that the controlling software is making
Chin Liang See3eb3e722014-09-12 00:42:17 -0500523 * a valid request
524 */
525static inline bool is_flash_bank_valid(int flash_bank)
526{
527 return flash_bank >= 0 && flash_bank < 4;
528}
529
530static void denali_irq_init(struct denali_nand_info *denali)
531{
Scott Woodd3963722015-06-26 19:03:26 -0500532 uint32_t int_mask;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500533 int i;
534
535 /* Disable global interrupts */
536 writel(0, denali->flash_reg + GLOBAL_INT_ENABLE);
537
538 int_mask = DENALI_IRQ_ALL;
539
540 /* Clear all status bits */
541 for (i = 0; i < denali->max_banks; ++i)
542 writel(0xFFFF, denali->flash_reg + INTR_STATUS(i));
543
544 denali_irq_enable(denali, int_mask);
545}
546
Scott Woodd3963722015-06-26 19:03:26 -0500547/*
548 * This helper function setups the registers for ECC and whether or not
549 * the spare area will be transferred.
550 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500551static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
552 bool transfer_spare)
553{
Scott Woodd3963722015-06-26 19:03:26 -0500554 int ecc_en_flag, transfer_spare_flag;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500555
556 /* set ECC, transfer spare bits if needed */
557 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
558 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
559
560 /* Enable spare area/ECC per user's request. */
561 writel(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
562 /* applicable for MAP01 only */
563 writel(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
564}
565
Scott Woodd3963722015-06-26 19:03:26 -0500566/*
567 * sends a pipeline command operation to the controller. See the Denali NAND
Chin Liang See3eb3e722014-09-12 00:42:17 -0500568 * controller's user guide for more information (section 4.2.3.6).
569 */
570static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
Scott Woodd3963722015-06-26 19:03:26 -0500571 bool ecc_en, bool transfer_spare,
572 int access_type, int op)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500573{
574 uint32_t addr, cmd, irq_status;
575 static uint32_t page_count = 1;
576
577 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
578
Chin Liang See3eb3e722014-09-12 00:42:17 -0500579 clear_interrupts(denali);
580
581 addr = BANK(denali->flash_bank) | denali->page;
582
583 /* setup the acccess type */
584 cmd = MODE_10 | addr;
585 index_addr(denali, cmd, access_type);
586
587 /* setup the pipeline command */
588 index_addr(denali, cmd, 0x2000 | op | page_count);
589
590 cmd = MODE_01 | addr;
591 writel(cmd, denali->flash_mem + INDEX_CTRL_REG);
592
593 if (op == DENALI_READ) {
594 /* wait for command to be accepted */
595 irq_status = wait_for_irq(denali, INTR_STATUS__LOAD_COMP);
596
597 if (irq_status == 0)
598 return -EIO;
599 }
600
601 return 0;
602}
603
604/* helper function that simply writes a buffer to the flash */
605static int write_data_to_flash_mem(struct denali_nand_info *denali,
Scott Woodd3963722015-06-26 19:03:26 -0500606 const uint8_t *buf, int len)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500607{
Scott Woodd3963722015-06-26 19:03:26 -0500608 uint32_t *buf32;
609 int i;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500610
Scott Woodd3963722015-06-26 19:03:26 -0500611 /*
612 * verify that the len is a multiple of 4.
613 * see comment in read_data_from_flash_mem()
614 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500615 BUG_ON((len % 4) != 0);
616
617 /* write the data to the flash memory */
618 buf32 = (uint32_t *)buf;
619 for (i = 0; i < len / 4; i++)
620 writel(*buf32++, denali->flash_mem + INDEX_DATA_REG);
621 return i * 4; /* intent is to return the number of bytes read */
622}
623
624/* helper function that simply reads a buffer from the flash */
625static int read_data_from_flash_mem(struct denali_nand_info *denali,
Scott Woodd3963722015-06-26 19:03:26 -0500626 uint8_t *buf, int len)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500627{
Scott Woodd3963722015-06-26 19:03:26 -0500628 uint32_t *buf32;
629 int i;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500630
631 /*
Scott Woodd3963722015-06-26 19:03:26 -0500632 * we assume that len will be a multiple of 4, if not it would be nice
633 * to know about it ASAP rather than have random failures...
634 * This assumption is based on the fact that this function is designed
635 * to be used to read flash pages, which are typically multiples of 4.
Chin Liang See3eb3e722014-09-12 00:42:17 -0500636 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500637 BUG_ON((len % 4) != 0);
638
639 /* transfer the data from the flash */
640 buf32 = (uint32_t *)buf;
641 for (i = 0; i < len / 4; i++)
642 *buf32++ = readl(denali->flash_mem + INDEX_DATA_REG);
643
644 return i * 4; /* intent is to return the number of bytes read */
645}
646
647static void denali_mode_main_access(struct denali_nand_info *denali)
648{
649 uint32_t addr, cmd;
650
651 addr = BANK(denali->flash_bank) | denali->page;
652 cmd = MODE_10 | addr;
653 index_addr(denali, cmd, MAIN_ACCESS);
654}
655
656static void denali_mode_main_spare_access(struct denali_nand_info *denali)
657{
658 uint32_t addr, cmd;
659
660 addr = BANK(denali->flash_bank) | denali->page;
661 cmd = MODE_10 | addr;
662 index_addr(denali, cmd, MAIN_SPARE_ACCESS);
663}
664
665/* writes OOB data to the device */
666static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
667{
668 struct denali_nand_info *denali = mtd_to_denali(mtd);
669 uint32_t irq_status;
670 uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
671 INTR_STATUS__PROGRAM_FAIL;
672 int status = 0;
673
674 denali->page = page;
675
676 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
677 DENALI_WRITE) == 0) {
678 write_data_to_flash_mem(denali, buf, mtd->oobsize);
679
680 /* wait for operation to complete */
681 irq_status = wait_for_irq(denali, irq_mask);
682
683 if (irq_status == 0) {
684 dev_err(denali->dev, "OOB write failed\n");
685 status = -EIO;
686 }
687 } else {
688 printf("unable to send pipeline command\n");
689 status = -EIO;
690 }
691 return status;
692}
693
694/* reads OOB data from the device */
695static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
696{
697 struct denali_nand_info *denali = mtd_to_denali(mtd);
Scott Woodd3963722015-06-26 19:03:26 -0500698 uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
699 uint32_t irq_status, addr, cmd;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500700
701 denali->page = page;
702
703 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
704 DENALI_READ) == 0) {
705 read_data_from_flash_mem(denali, buf, mtd->oobsize);
706
Scott Woodd3963722015-06-26 19:03:26 -0500707 /*
708 * wait for command to be accepted
709 * can always use status0 bit as the
710 * mask is identical for each bank.
711 */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500712 irq_status = wait_for_irq(denali, irq_mask);
713
714 if (irq_status == 0)
715 printf("page on OOB timeout %d\n", denali->page);
716
Scott Woodd3963722015-06-26 19:03:26 -0500717 /*
718 * We set the device back to MAIN_ACCESS here as I observed
Chin Liang See3eb3e722014-09-12 00:42:17 -0500719 * instability with the controller if you do a block erase
720 * and the last transaction was a SPARE_ACCESS. Block erase
721 * is reliable (according to the MTD test infrastructure)
722 * if you are in MAIN_ACCESS.
723 */
724 addr = BANK(denali->flash_bank) | denali->page;
725 cmd = MODE_10 | addr;
726 index_addr(denali, cmd, MAIN_ACCESS);
727 }
728}
729
Scott Woodd3963722015-06-26 19:03:26 -0500730/*
731 * this function examines buffers to see if they contain data that
Chin Liang See3eb3e722014-09-12 00:42:17 -0500732 * indicate that the buffer is part of an erased region of flash.
733 */
734static bool is_erased(uint8_t *buf, int len)
735{
Scott Woodd3963722015-06-26 19:03:26 -0500736 int i;
737
Chin Liang See3eb3e722014-09-12 00:42:17 -0500738 for (i = 0; i < len; i++)
739 if (buf[i] != 0xFF)
740 return false;
741 return true;
742}
743
744/* programs the controller to either enable/disable DMA transfers */
745static void denali_enable_dma(struct denali_nand_info *denali, bool en)
746{
Scott Woodd3963722015-06-26 19:03:26 -0500747 writel(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500748 readl(denali->flash_reg + DMA_ENABLE);
749}
750
751/* setups the HW to perform the data DMA */
752static void denali_setup_dma(struct denali_nand_info *denali, int op)
753{
754 uint32_t mode;
755 const int page_count = 1;
Masahiro Yamada73b5b272016-02-29 20:57:29 +0900756 uint64_t addr = (unsigned long)denali->buf.dma_buf;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500757
758 flush_dcache_range(addr, addr + sizeof(denali->buf.dma_buf));
759
760/* For Denali controller that is 64 bit bus IP core */
761#ifdef CONFIG_SYS_NAND_DENALI_64BIT
762 mode = MODE_10 | BANK(denali->flash_bank) | denali->page;
763
764 /* DMA is a three step process */
765
766 /* 1. setup transfer type, interrupt when complete,
767 burst len = 64 bytes, the number of pages */
768 index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count);
769
770 /* 2. set memory low address bits 31:0 */
771 index_addr(denali, mode, addr);
772
773 /* 3. set memory high address bits 64:32 */
Masahiro Yamada73b5b272016-02-29 20:57:29 +0900774 index_addr(denali, mode, addr >> 32);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500775#else
776 mode = MODE_10 | BANK(denali->flash_bank);
777
778 /* DMA is a four step process */
779
780 /* 1. setup transfer type and # of pages */
781 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
782
783 /* 2. set memory high address bits 23:8 */
Masahiro Yamada73b5b272016-02-29 20:57:29 +0900784 index_addr(denali, mode | (((addr >> 16) & 0xffff) << 8), 0x2200);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500785
786 /* 3. set memory low address bits 23:8 */
Scott Woodd3963722015-06-26 19:03:26 -0500787 index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
Chin Liang See3eb3e722014-09-12 00:42:17 -0500788
Scott Woodd3963722015-06-26 19:03:26 -0500789 /* 4. interrupt when complete, burst len = 64 bytes */
Chin Liang See3eb3e722014-09-12 00:42:17 -0500790 index_addr(denali, mode | 0x14000, 0x2400);
791#endif
792}
793
794/* Common DMA function */
795static uint32_t denali_dma_configuration(struct denali_nand_info *denali,
796 uint32_t ops, bool raw_xfer,
797 uint32_t irq_mask, int oob_required)
798{
799 uint32_t irq_status = 0;
800 /* setup_ecc_for_xfer(bool ecc_en, bool transfer_spare) */
801 setup_ecc_for_xfer(denali, !raw_xfer, oob_required);
802
803 /* clear any previous interrupt flags */
804 clear_interrupts(denali);
805
806 /* enable the DMA */
807 denali_enable_dma(denali, true);
808
809 /* setup the DMA */
810 denali_setup_dma(denali, ops);
811
812 /* wait for operation to complete */
813 irq_status = wait_for_irq(denali, irq_mask);
814
815 /* if ECC fault happen, seems we need delay before turning off DMA.
816 * If not, the controller will go into non responsive condition */
817 if (irq_status & INTR_STATUS__ECC_UNCOR_ERR)
818 udelay(100);
819
820 /* disable the DMA */
821 denali_enable_dma(denali, false);
822
823 return irq_status;
824}
825
826static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
827 const uint8_t *buf, bool raw_xfer, int oob_required)
828{
829 struct denali_nand_info *denali = mtd_to_denali(mtd);
830
831 uint32_t irq_status = 0;
832 uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
833
834 denali->status = 0;
835
836 /* copy buffer into DMA buffer */
837 memcpy(denali->buf.dma_buf, buf, mtd->writesize);
838
839 /* need extra memcpy for raw transfer */
840 if (raw_xfer)
841 memcpy(denali->buf.dma_buf + mtd->writesize,
842 chip->oob_poi, mtd->oobsize);
843
844 /* setting up DMA */
845 irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer,
846 irq_mask, oob_required);
847
848 /* if timeout happen, error out */
849 if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
850 debug("DMA timeout for denali write_page\n");
851 denali->status = NAND_STATUS_FAIL;
852 return -EIO;
853 }
854
855 if (irq_status & INTR_STATUS__LOCKED_BLK) {
856 debug("Failed as write to locked block\n");
857 denali->status = NAND_STATUS_FAIL;
858 return -EIO;
859 }
860 return 0;
861}
862
863/* NAND core entry points */
864
865/*
866 * this is the callback that the NAND core calls to write a page. Since
867 * writing a page with ECC or without is similar, all the work is done
868 * by write_page above.
869 */
870static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Scott Woodceee07b2016-05-30 13:57:58 -0500871 const uint8_t *buf, int oob_required, int page)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500872{
873 struct denali_nand_info *denali = mtd_to_denali(mtd);
874
875 /*
876 * for regular page writes, we let HW handle all the ECC
877 * data written to the device.
878 */
879 if (oob_required)
880 /* switch to main + spare access */
881 denali_mode_main_spare_access(denali);
882 else
883 /* switch to main access only */
884 denali_mode_main_access(denali);
885
886 return write_page(mtd, chip, buf, false, oob_required);
887}
888
889/*
890 * This is the callback that the NAND core calls to write a page without ECC.
891 * raw access is similar to ECC page writes, so all the work is done in the
892 * write_page() function above.
893 */
894static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Scott Woodceee07b2016-05-30 13:57:58 -0500895 const uint8_t *buf, int oob_required,
896 int page)
Chin Liang See3eb3e722014-09-12 00:42:17 -0500897{
898 struct denali_nand_info *denali = mtd_to_denali(mtd);
899
900 /*
901 * for raw page writes, we want to disable ECC and simply write
902 * whatever data is in the buffer.
903 */
904
905 if (oob_required)
906 /* switch to main + spare access */
907 denali_mode_main_spare_access(denali);
908 else
909 /* switch to main access only */
910 denali_mode_main_access(denali);
911
912 return write_page(mtd, chip, buf, true, oob_required);
913}
914
915static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
916 int page)
917{
918 return write_oob_data(mtd, chip->oob_poi, page);
919}
920
921/* raw include ECC value and all the spare area */
922static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
923 uint8_t *buf, int oob_required, int page)
924{
925 struct denali_nand_info *denali = mtd_to_denali(mtd);
926
927 uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP;
928
929 if (denali->page != page) {
930 debug("Missing NAND_CMD_READ0 command\n");
931 return -EIO;
932 }
933
934 if (oob_required)
935 /* switch to main + spare access */
936 denali_mode_main_spare_access(denali);
937 else
938 /* switch to main access only */
939 denali_mode_main_access(denali);
940
941 /* setting up the DMA where ecc_enable is false */
942 irq_status = denali_dma_configuration(denali, DENALI_READ, true,
943 irq_mask, oob_required);
944
945 /* if timeout happen, error out */
946 if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
947 debug("DMA timeout for denali_read_page_raw\n");
948 return -EIO;
949 }
950
951 /* splitting the content to destination buffer holder */
952 memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize),
953 mtd->oobsize);
954 memcpy(buf, denali->buf.dma_buf, mtd->writesize);
955
956 return 0;
957}
958
959static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
960 uint8_t *buf, int oob_required, int page)
961{
962 struct denali_nand_info *denali = mtd_to_denali(mtd);
963 uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP;
964
965 if (denali->page != page) {
966 debug("Missing NAND_CMD_READ0 command\n");
967 return -EIO;
968 }
969
970 if (oob_required)
971 /* switch to main + spare access */
972 denali_mode_main_spare_access(denali);
973 else
974 /* switch to main access only */
975 denali_mode_main_access(denali);
976
977 /* setting up the DMA where ecc_enable is true */
978 irq_status = denali_dma_configuration(denali, DENALI_READ, false,
979 irq_mask, oob_required);
980
981 memcpy(buf, denali->buf.dma_buf, mtd->writesize);
982
983 /* check whether any ECC error */
984 if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) {
985 /* is the ECC cause by erase page, check using read_page_raw */
986 debug(" Uncorrected ECC detected\n");
987 denali_read_page_raw(mtd, chip, buf, oob_required,
988 denali->page);
989
990 if (is_erased(buf, mtd->writesize) == true &&
991 is_erased(chip->oob_poi, mtd->oobsize) == true) {
992 debug(" ECC error cause by erased block\n");
993 /* false alarm, return the 0xFF */
994 } else {
Scott Woodceee07b2016-05-30 13:57:58 -0500995 return -EBADMSG;
Chin Liang See3eb3e722014-09-12 00:42:17 -0500996 }
997 }
998 memcpy(buf, denali->buf.dma_buf, mtd->writesize);
999 return 0;
1000}
1001
1002static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1003 int page)
1004{
1005 read_oob_data(mtd, chip->oob_poi, page);
1006
1007 return 0;
1008}
1009
1010static uint8_t denali_read_byte(struct mtd_info *mtd)
1011{
1012 struct denali_nand_info *denali = mtd_to_denali(mtd);
1013 uint32_t addr, result;
1014
1015 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
1016 index_addr_read_data(denali, addr | 2, &result);
1017 return (uint8_t)result & 0xFF;
1018}
1019
1020static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1021{
1022 struct denali_nand_info *denali = mtd_to_denali(mtd);
1023 uint32_t i, addr, result;
1024
1025 /* delay for tR (data transfer from Flash array to data register) */
1026 udelay(25);
1027
1028 /* ensure device completed else additional delay and polling */
1029 wait_for_irq(denali, INTR_STATUS__INT_ACT);
1030
1031 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
1032 for (i = 0; i < len; i++) {
1033 index_addr_read_data(denali, (uint32_t)addr | 2, &result);
1034 write_byte_to_buf(denali, result);
1035 }
1036 memcpy(buf, denali->buf.buf, len);
1037}
1038
1039static void denali_select_chip(struct mtd_info *mtd, int chip)
1040{
1041 struct denali_nand_info *denali = mtd_to_denali(mtd);
1042
1043 denali->flash_bank = chip;
1044}
1045
1046static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1047{
1048 struct denali_nand_info *denali = mtd_to_denali(mtd);
1049 int status = denali->status;
Scott Woodd3963722015-06-26 19:03:26 -05001050
Chin Liang See3eb3e722014-09-12 00:42:17 -05001051 denali->status = 0;
1052
1053 return status;
1054}
1055
Scott Woodd3963722015-06-26 19:03:26 -05001056static int denali_erase(struct mtd_info *mtd, int page)
Chin Liang See3eb3e722014-09-12 00:42:17 -05001057{
1058 struct denali_nand_info *denali = mtd_to_denali(mtd);
Scott Woodd3963722015-06-26 19:03:26 -05001059
Chin Liang See3eb3e722014-09-12 00:42:17 -05001060 uint32_t cmd, irq_status;
1061
Chin Liang See3eb3e722014-09-12 00:42:17 -05001062 clear_interrupts(denali);
1063
1064 /* setup page read request for access type */
1065 cmd = MODE_10 | BANK(denali->flash_bank) | page;
1066 index_addr(denali, cmd, 0x1);
1067
1068 /* wait for erase to complete or failure to occur */
1069 irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1070 INTR_STATUS__ERASE_FAIL);
1071
1072 if (irq_status & INTR_STATUS__ERASE_FAIL ||
1073 irq_status & INTR_STATUS__LOCKED_BLK)
Scott Woodd3963722015-06-26 19:03:26 -05001074 return NAND_STATUS_FAIL;
1075
1076 return 0;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001077}
1078
1079static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1080 int page)
1081{
1082 struct denali_nand_info *denali = mtd_to_denali(mtd);
1083 uint32_t addr;
1084
1085 switch (cmd) {
1086 case NAND_CMD_PAGEPROG:
1087 break;
1088 case NAND_CMD_STATUS:
1089 addr = MODE_11 | BANK(denali->flash_bank);
1090 index_addr(denali, addr | 0, cmd);
1091 break;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001092 case NAND_CMD_READID:
Masahiro Yamada05968e72014-10-03 20:03:03 +09001093 case NAND_CMD_PARAM:
Chin Liang See3eb3e722014-09-12 00:42:17 -05001094 reset_buf(denali);
Scott Woodd3963722015-06-26 19:03:26 -05001095 /*
1096 * sometimes ManufactureId read from register is not right
Chin Liang See3eb3e722014-09-12 00:42:17 -05001097 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1098 * So here we send READID cmd to NAND insteand
Scott Woodd3963722015-06-26 19:03:26 -05001099 */
Chin Liang See3eb3e722014-09-12 00:42:17 -05001100 addr = MODE_11 | BANK(denali->flash_bank);
1101 index_addr(denali, addr | 0, cmd);
1102 index_addr(denali, addr | 1, col & 0xFF);
Masahiro Yamada05968e72014-10-03 20:03:03 +09001103 if (cmd == NAND_CMD_PARAM)
1104 udelay(50);
Chin Liang See3eb3e722014-09-12 00:42:17 -05001105 break;
Masahiro Yamadaed3c9802014-10-03 20:03:04 +09001106 case NAND_CMD_RNDOUT:
1107 addr = MODE_11 | BANK(denali->flash_bank);
1108 index_addr(denali, addr | 0, cmd);
1109 index_addr(denali, addr | 1, col & 0xFF);
1110 index_addr(denali, addr | 1, col >> 8);
1111 index_addr(denali, addr | 0, NAND_CMD_RNDOUTSTART);
1112 break;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001113 case NAND_CMD_READ0:
1114 case NAND_CMD_SEQIN:
1115 denali->page = page;
1116 break;
1117 case NAND_CMD_RESET:
1118 reset_bank(denali);
1119 break;
1120 case NAND_CMD_READOOB:
1121 /* TODO: Read OOB data */
1122 break;
1123 case NAND_CMD_ERASE1:
1124 /*
1125 * supporting block erase only, not multiblock erase as
1126 * it will cross plane and software need complex calculation
1127 * to identify the block count for the cross plane
1128 */
1129 denali_erase(mtd, page);
1130 break;
1131 case NAND_CMD_ERASE2:
1132 /* nothing to do here as it was done during NAND_CMD_ERASE1 */
1133 break;
1134 case NAND_CMD_UNLOCK1:
1135 addr = MODE_10 | BANK(denali->flash_bank) | page;
1136 index_addr(denali, addr | 0, DENALI_UNLOCK_START);
1137 break;
1138 case NAND_CMD_UNLOCK2:
1139 addr = MODE_10 | BANK(denali->flash_bank) | page;
1140 index_addr(denali, addr | 0, DENALI_UNLOCK_END);
1141 break;
1142 case NAND_CMD_LOCK:
1143 addr = MODE_10 | BANK(denali->flash_bank);
1144 index_addr(denali, addr | 0, DENALI_LOCK);
1145 break;
1146 default:
1147 printf(": unsupported command received 0x%x\n", cmd);
1148 break;
1149 }
1150}
1151/* end NAND core entry points */
1152
1153/* Initialization code to bring the device up to a known good state */
1154static void denali_hw_init(struct denali_nand_info *denali)
1155{
1156 /*
1157 * tell driver how many bit controller will skip before writing
1158 * ECC code in OOB. This is normally used for bad block marker
1159 */
1160 writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES,
1161 denali->flash_reg + SPARE_AREA_SKIP_BYTES);
1162 detect_max_banks(denali);
1163 denali_nand_reset(denali);
1164 writel(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1165 writel(CHIP_EN_DONT_CARE__FLAG,
1166 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1167 writel(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1168
1169 /* Should set value for these registers when init */
1170 writel(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1171 writel(1, denali->flash_reg + ECC_ENABLE);
1172 denali_nand_timing_set(denali);
1173 denali_irq_init(denali);
1174}
1175
1176static struct nand_ecclayout nand_oob;
1177
Masahiro Yamada1d9654d2017-08-26 01:12:31 +09001178int denali_init(struct denali_nand_info *denali)
Chin Liang See3eb3e722014-09-12 00:42:17 -05001179{
Scott Woodceee07b2016-05-30 13:57:58 -05001180 struct mtd_info *mtd = nand_to_mtd(&denali->nand);
Masahiro Yamada65e41452014-11-13 20:31:50 +09001181 int ret;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001182
Masahiro Yamada65e41452014-11-13 20:31:50 +09001183 denali_hw_init(denali);
Chin Liang See3eb3e722014-09-12 00:42:17 -05001184
Scott Woodceee07b2016-05-30 13:57:58 -05001185 mtd->name = "denali-nand";
1186 mtd->owner = THIS_MODULE;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001187
Masahiro Yamada65e41452014-11-13 20:31:50 +09001188 /* register the driver with the NAND core subsystem */
1189 denali->nand.select_chip = denali_select_chip;
1190 denali->nand.cmdfunc = denali_cmdfunc;
1191 denali->nand.read_byte = denali_read_byte;
1192 denali->nand.read_buf = denali_read_buf;
1193 denali->nand.waitfunc = denali_waitfunc;
1194
1195 /*
1196 * scan for NAND devices attached to the controller
1197 * this is the first stage in a two step process to register
1198 * with the nand subsystem
1199 */
Scott Woodceee07b2016-05-30 13:57:58 -05001200 if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
Masahiro Yamada65e41452014-11-13 20:31:50 +09001201 ret = -ENXIO;
1202 goto fail;
1203 }
Chin Liang See3eb3e722014-09-12 00:42:17 -05001204
1205#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1206 /* check whether flash got BBT table (located at end of flash). As we
1207 * use NAND_BBT_NO_OOB, the BBT page will start with
1208 * bbt_pattern. We will have mirror pattern too */
Masahiro Yamada65e41452014-11-13 20:31:50 +09001209 denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001210 /*
1211 * We are using main + spare with ECC support. As BBT need ECC support,
1212 * we need to ensure BBT code don't write to OOB for the BBT pattern.
1213 * All BBT info will be stored into data area with ECC support.
1214 */
Masahiro Yamada65e41452014-11-13 20:31:50 +09001215 denali->nand.bbt_options |= NAND_BBT_NO_OOB;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001216#endif
1217
Masahiro Yamada65e41452014-11-13 20:31:50 +09001218 denali->nand.ecc.mode = NAND_ECC_HW;
1219 denali->nand.ecc.size = CONFIG_NAND_DENALI_ECC_SIZE;
1220
Scott Woodd3963722015-06-26 19:03:26 -05001221 /* no subpage writes on denali */
1222 denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
1223
Chin Liang See3eb3e722014-09-12 00:42:17 -05001224 /*
1225 * Tell driver the ecc strength. This register may be already set
1226 * correctly. So we read this value out.
1227 */
Masahiro Yamada65e41452014-11-13 20:31:50 +09001228 denali->nand.ecc.strength = readl(denali->flash_reg + ECC_CORRECTION);
1229 switch (denali->nand.ecc.size) {
Chin Liang See3eb3e722014-09-12 00:42:17 -05001230 case 512:
Masahiro Yamada65e41452014-11-13 20:31:50 +09001231 denali->nand.ecc.bytes =
1232 (denali->nand.ecc.strength * 13 + 15) / 16 * 2;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001233 break;
1234 case 1024:
Masahiro Yamada65e41452014-11-13 20:31:50 +09001235 denali->nand.ecc.bytes =
1236 (denali->nand.ecc.strength * 14 + 15) / 16 * 2;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001237 break;
1238 default:
1239 pr_err("Unsupported ECC size\n");
Masahiro Yamada65e41452014-11-13 20:31:50 +09001240 ret = -EINVAL;
1241 goto fail;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001242 }
Masahiro Yamada65e41452014-11-13 20:31:50 +09001243 nand_oob.eccbytes = denali->nand.ecc.bytes;
1244 denali->nand.ecc.layout = &nand_oob;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001245
Scott Woodceee07b2016-05-30 13:57:58 -05001246 writel(mtd->erasesize / mtd->writesize,
Masahiro Yamadaf09eb522014-11-13 20:31:51 +09001247 denali->flash_reg + PAGES_PER_BLOCK);
1248 writel(denali->nand.options & NAND_BUSWIDTH_16 ? 1 : 0,
1249 denali->flash_reg + DEVICE_WIDTH);
Scott Woodceee07b2016-05-30 13:57:58 -05001250 writel(mtd->writesize,
Masahiro Yamadaf09eb522014-11-13 20:31:51 +09001251 denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
Scott Woodceee07b2016-05-30 13:57:58 -05001252 writel(mtd->oobsize,
Masahiro Yamadaf09eb522014-11-13 20:31:51 +09001253 denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
1254 if (readl(denali->flash_reg + DEVICES_CONNECTED) == 0)
1255 writel(1, denali->flash_reg + DEVICES_CONNECTED);
1256
Masahiro Yamada65e41452014-11-13 20:31:50 +09001257 /* override the default operations */
1258 denali->nand.ecc.read_page = denali_read_page;
1259 denali->nand.ecc.read_page_raw = denali_read_page_raw;
1260 denali->nand.ecc.write_page = denali_write_page;
1261 denali->nand.ecc.write_page_raw = denali_write_page_raw;
1262 denali->nand.ecc.read_oob = denali_read_oob;
1263 denali->nand.ecc.write_oob = denali_write_oob;
1264
Scott Woodceee07b2016-05-30 13:57:58 -05001265 if (nand_scan_tail(mtd)) {
Masahiro Yamada65e41452014-11-13 20:31:50 +09001266 ret = -ENXIO;
1267 goto fail;
1268 }
1269
Scott Woodceee07b2016-05-30 13:57:58 -05001270 ret = nand_register(0, mtd);
Masahiro Yamada65e41452014-11-13 20:31:50 +09001271
1272fail:
1273 return ret;
Chin Liang See3eb3e722014-09-12 00:42:17 -05001274}
1275
Masahiro Yamada1d9654d2017-08-26 01:12:31 +09001276#ifndef CONFIG_NAND_DENALI_DT
Masahiro Yamada65e41452014-11-13 20:31:50 +09001277static int __board_nand_init(void)
Chin Liang See3eb3e722014-09-12 00:42:17 -05001278{
Masahiro Yamada65e41452014-11-13 20:31:50 +09001279 struct denali_nand_info *denali;
1280
1281 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1282 if (!denali)
1283 return -ENOMEM;
1284
1285 /*
Masahiro Yamada65e41452014-11-13 20:31:50 +09001286 * In the future, these base addresses should be taken from
1287 * Device Tree or platform data.
1288 */
1289 denali->flash_reg = (void __iomem *)CONFIG_SYS_NAND_REGS_BASE;
1290 denali->flash_mem = (void __iomem *)CONFIG_SYS_NAND_DATA_BASE;
1291
1292 return denali_init(denali);
1293}
1294
1295void board_nand_init(void)
1296{
1297 if (__board_nand_init() < 0)
1298 pr_warn("Failed to initialize Denali NAND controller.\n");
Chin Liang See3eb3e722014-09-12 00:42:17 -05001299}
Masahiro Yamada1d9654d2017-08-26 01:12:31 +09001300#endif