blob: 43b353a3e7ee5d2b9c65432e88d4dd3d5570b64f [file] [log] [blame]
Stefan Roese9e5c2a72018-08-16 18:05:08 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
4 *
5 * Derived from drivers/mtd/nand/spi/micron.c
6 * Copyright (c) 2016-2017 Micron Technology, Inc.
7 */
8
9#ifndef __UBOOT__
Simon Glass336d4612020-02-03 07:36:16 -070010#include <malloc.h>
Stefan Roese9e5c2a72018-08-16 18:05:08 +020011#include <linux/device.h>
12#include <linux/kernel.h>
13#endif
14#include <linux/mtd/spinand.h>
15
Stefan Roesed67fb262019-01-24 17:18:19 +010016#define SPINAND_MFR_GIGADEVICE 0xC8
17#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
18#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
Stefan Roese9e5c2a72018-08-16 18:05:08 +020019
Reto Schneider783a15b2021-02-11 13:05:48 +010020#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
21#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
Stefan Roese9e5c2a72018-08-16 18:05:08 +020022
Reto Schneider783a15b2021-02-11 13:05:48 +010023#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
24
25/* Q4 devices, QUADIO: Dummy bytes valid for 1 and 2 GBit variants */
26static SPINAND_OP_VARIANTS(gd5fxgq4_read_cache_variants,
Hauke Mehrtens12926f42021-02-11 13:05:47 +010027 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
Stefan Roese9e5c2a72018-08-16 18:05:08 +020028 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
29 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
30 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
31 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
32 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
33
Reto Schneider783a15b2021-02-11 13:05:48 +010034/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
35static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
36 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
37 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
38 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
39 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
40 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
41 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
42
Stefan Roese9e5c2a72018-08-16 18:05:08 +020043static SPINAND_OP_VARIANTS(write_cache_variants,
44 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
45 SPINAND_PROG_LOAD(true, 0, NULL, 0));
46
47static SPINAND_OP_VARIANTS(update_cache_variants,
48 SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
49 SPINAND_PROG_LOAD(false, 0, NULL, 0));
50
Reto Schneider783a15b2021-02-11 13:05:48 +010051static int gd5fxgqxxexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
Stefan Roesed67fb262019-01-24 17:18:19 +010052 struct mtd_oob_region *region)
Stefan Roese9e5c2a72018-08-16 18:05:08 +020053{
54 if (section)
55 return -ERANGE;
56
57 region->offset = 64;
58 region->length = 64;
59
60 return 0;
61}
62
Reto Schneider783a15b2021-02-11 13:05:48 +010063static int gd5fxgqxxexxg_ooblayout_free(struct mtd_info *mtd, int section,
Stefan Roesed67fb262019-01-24 17:18:19 +010064 struct mtd_oob_region *region)
Stefan Roese9e5c2a72018-08-16 18:05:08 +020065{
66 if (section)
67 return -ERANGE;
68
Stefan Roesed67fb262019-01-24 17:18:19 +010069 /* Reserve 1 bytes for the BBM. */
70 region->offset = 1;
71 region->length = 63;
Stefan Roese9e5c2a72018-08-16 18:05:08 +020072
73 return 0;
74}
75
Stefan Roesed67fb262019-01-24 17:18:19 +010076static int gd5fxgq4xexxg_ecc_get_status(struct spinand_device *spinand,
77 u8 status)
Stefan Roese9e5c2a72018-08-16 18:05:08 +020078{
Stefan Roesed67fb262019-01-24 17:18:19 +010079 u8 status2;
Reto Schneider783a15b2021-02-11 13:05:48 +010080 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
Stefan Roesed67fb262019-01-24 17:18:19 +010081 &status2);
82 int ret;
Stefan Roese9e5c2a72018-08-16 18:05:08 +020083
Stefan Roesed67fb262019-01-24 17:18:19 +010084 switch (status & STATUS_ECC_MASK) {
Stefan Roese9e5c2a72018-08-16 18:05:08 +020085 case STATUS_ECC_NO_BITFLIPS:
86 return 0;
87
Stefan Roesed67fb262019-01-24 17:18:19 +010088 case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
89 /*
90 * Read status2 register to determine a more fine grained
91 * bit error status
92 */
93 ret = spi_mem_exec_op(spinand->slave, &op);
94 if (ret)
95 return ret;
Stefan Roese9e5c2a72018-08-16 18:05:08 +020096
Stefan Roesed67fb262019-01-24 17:18:19 +010097 /*
98 * 4 ... 7 bits are flipped (1..4 can't be detected, so
99 * report the maximum of 4 in this case
100 */
101 /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
102 return ((status & STATUS_ECC_MASK) >> 2) |
103 ((status2 & STATUS_ECC_MASK) >> 4);
104
105 case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200106 return 8;
107
108 case STATUS_ECC_UNCOR_ERROR:
109 return -EBADMSG;
110
111 default:
112 break;
113 }
114
115 return -EINVAL;
116}
117
Reto Schneider783a15b2021-02-11 13:05:48 +0100118static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
119 u8 status)
120{
121 u8 status2;
122 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
123 &status2);
124 int ret;
125
126 switch (status & STATUS_ECC_MASK) {
127 case STATUS_ECC_NO_BITFLIPS:
128 return 0;
129
130 case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
131 /*
132 * Read status2 register to determine a more fine grained
133 * bit error status
134 */
135 ret = spi_mem_exec_op(spinand->slave, &op);
136 if (ret)
137 return ret;
138
139 /*
140 * 1 ... 4 bits are flipped (and corrected)
141 */
142 /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
143 return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
144
145 case STATUS_ECC_UNCOR_ERROR:
146 return -EBADMSG;
147
148 default:
149 break;
150 }
151
152 return -EINVAL;
153}
154
155static const struct mtd_ooblayout_ops gd5fxgqxxexxg_ooblayout = {
156 .ecc = gd5fxgqxxexxg_ooblayout_ecc,
157 .rfree = gd5fxgqxxexxg_ooblayout_free,
Stefan Roesed67fb262019-01-24 17:18:19 +0100158};
159
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200160static const struct spinand_info gigadevice_spinand_table[] = {
Mikhail Kshevetskiyb20913e2023-01-10 12:58:38 +0100161 SPINAND_INFO("GD5F1GQ4UExxG",
162 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xd1),
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200163 NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
Stefan Roesed67fb262019-01-24 17:18:19 +0100164 NAND_ECCREQ(8, 512),
Reto Schneider783a15b2021-02-11 13:05:48 +0100165 SPINAND_INFO_OP_VARIANTS(&gd5fxgq4_read_cache_variants,
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200166 &write_cache_variants,
167 &update_cache_variants),
168 0,
Reto Schneider783a15b2021-02-11 13:05:48 +0100169 SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
Stefan Roesed67fb262019-01-24 17:18:19 +0100170 gd5fxgq4xexxg_ecc_get_status)),
Mikhail Kshevetskiyb20913e2023-01-10 12:58:38 +0100171 SPINAND_INFO("GD5F1GQ5UExxG",
172 SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x51),
Reto Schneider783a15b2021-02-11 13:05:48 +0100173 NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
174 NAND_ECCREQ(4, 512),
175 SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
176 &write_cache_variants,
177 &update_cache_variants),
178 0,
179 SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
180 gd5fxgq5xexxg_ecc_get_status)),
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200181};
182
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200183static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200184};
185
186const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
187 .id = SPINAND_MFR_GIGADEVICE,
188 .name = "GigaDevice",
Mikhail Kshevetskiyb20913e2023-01-10 12:58:38 +0100189 .chips = gigadevice_spinand_table,
190 .nchips = ARRAY_SIZE(gigadevice_spinand_table),
Stefan Roese9e5c2a72018-08-16 18:05:08 +0200191 .ops = &gigadevice_spinand_manuf_ops,
192};