blob: cfae5dcbda0e1eada258be13405aa3f53ced18b8 [file] [log] [blame]
Stefan Roese10e8bf82014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese10e8bf82014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendraffab2122020-01-27 10:36:40 +053031#include <dma.h>
Simon Glasscd93d622020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasut26da6352016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R2372e142016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendrad6407722020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Raaa21d32018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese10e8bf82014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
T Karthik Reddy248fe9f2022-05-12 04:05:34 -060041__weak void cadence_qspi_apb_enable_linear_mode(bool enable)
42{
43 return;
44}
45
Stefan Roese10e8bf82014-11-07 12:37:49 +010046void cadence_qspi_apb_controller_enable(void *reg_base)
47{
48 unsigned int reg;
49 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +000050 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese10e8bf82014-11-07 12:37:49 +010051 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese10e8bf82014-11-07 12:37:49 +010052}
53
54void cadence_qspi_apb_controller_disable(void *reg_base)
55{
56 unsigned int reg;
57 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +000058 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese10e8bf82014-11-07 12:37:49 +010059 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese10e8bf82014-11-07 12:37:49 +010060}
61
Vignesh Raghavendraffab2122020-01-27 10:36:40 +053062void cadence_qspi_apb_dac_mode_enable(void *reg_base)
63{
64 unsigned int reg;
65
66 reg = readl(reg_base + CQSPI_REG_CONFIG);
67 reg |= CQSPI_REG_CONFIG_DIRECT;
68 writel(reg, reg_base + CQSPI_REG_CONFIG);
69}
70
Pratyush Yadav38b08522021-06-26 00:47:09 +053071static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
72 bool dtr)
73{
74 unsigned int dummy_clk;
75
Marek Vasutc2e03632021-09-14 05:21:48 +020076 if (!op->dummy.nbytes || !op->dummy.buswidth)
77 return 0;
78
Pratyush Yadav38b08522021-06-26 00:47:09 +053079 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
80 if (dtr)
81 dummy_clk /= 2;
82
83 return dummy_clk;
84}
85
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -060086static u32 cadence_qspi_calc_rdreg(struct cadence_spi_priv *priv)
Pratyush Yadav38b08522021-06-26 00:47:09 +053087{
88 u32 rdreg = 0;
89
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -060090 rdreg |= priv->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
91 rdreg |= priv->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
92 rdreg |= priv->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
Pratyush Yadav38b08522021-06-26 00:47:09 +053093
94 return rdreg;
95}
96
97static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
98{
99 switch (buswidth) {
100 case 0:
101 case 1:
102 return CQSPI_INST_TYPE_SINGLE;
103
104 case 2:
105 return CQSPI_INST_TYPE_DUAL;
106
107 case 4:
108 return CQSPI_INST_TYPE_QUAD;
109
110 case 8:
111 return CQSPI_INST_TYPE_OCTAL;
112
113 default:
114 return -ENOTSUPP;
115 }
116}
117
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600118static int cadence_qspi_set_protocol(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530119 const struct spi_mem_op *op)
120{
121 int ret;
122
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600123 priv->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530124
125 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
126 if (ret < 0)
127 return ret;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600128 priv->inst_width = ret;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530129
130 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
131 if (ret < 0)
132 return ret;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600133 priv->addr_width = ret;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530134
135 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
136 if (ret < 0)
137 return ret;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600138 priv->data_width = ret;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530139
140 return 0;
141}
142
Stefan Roese10e8bf82014-11-07 12:37:49 +0100143/* Return 1 if idle, otherwise return 0 (busy). */
144static unsigned int cadence_qspi_wait_idle(void *reg_base)
145{
146 unsigned int start, count = 0;
147 /* timeout in unit of ms */
148 unsigned int timeout = 5000;
149
150 start = get_timer(0);
151 for ( ; get_timer(start) < timeout ; ) {
152 if (CQSPI_REG_IS_IDLE(reg_base))
153 count++;
154 else
155 count = 0;
156 /*
157 * Ensure the QSPI controller is in true idle state after
158 * reading back the same idle status consecutively
159 */
160 if (count >= CQSPI_POLL_IDLE_RETRY)
161 return 1;
162 }
163
164 /* Timeout, still in busy mode. */
165 printf("QSPI: QSPI is still busy after poll for %d times.\n",
166 CQSPI_REG_RETRY);
167 return 0;
168}
169
170void cadence_qspi_apb_readdata_capture(void *reg_base,
171 unsigned int bypass, unsigned int delay)
172{
173 unsigned int reg;
174 cadence_qspi_apb_controller_disable(reg_base);
175
Phil Edworthydb37cc92016-11-29 12:58:29 +0000176 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100177
178 if (bypass)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000179 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100180 else
Phil Edworthydb37cc92016-11-29 12:58:29 +0000181 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100182
Phil Edworthydb37cc92016-11-29 12:58:29 +0000183 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
184 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100185
Phil Edworthydb37cc92016-11-29 12:58:29 +0000186 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
187 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100188
Phil Edworthydb37cc92016-11-29 12:58:29 +0000189 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100190
191 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100192}
193
194void cadence_qspi_apb_config_baudrate_div(void *reg_base,
195 unsigned int ref_clk_hz, unsigned int sclk_hz)
196{
197 unsigned int reg;
198 unsigned int div;
199
200 cadence_qspi_apb_controller_disable(reg_base);
201 reg = readl(reg_base + CQSPI_REG_CONFIG);
202 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
203
Phil Edworthy32068c42016-11-29 12:58:27 +0000204 /*
205 * The baud_div field in the config reg is 4 bits, and the ref clock is
206 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
207 * SPI clock rate is less than or equal to the requested clock rate.
208 */
209 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100210
Chin Liang See54058172016-08-07 22:50:40 +0800211 /* ensure the baud rate doesn't exceed the max value */
212 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
213 div = CQSPI_REG_CONFIG_BAUD_MASK;
214
Phil Edworthy0ceb4d92016-11-29 12:58:28 +0000215 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
216 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
217
Chin Liang See54058172016-08-07 22:50:40 +0800218 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100219 writel(reg, reg_base + CQSPI_REG_CONFIG);
220
221 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100222}
223
Phil Edworthy7d403f22016-11-29 12:58:31 +0000224void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100225{
226 unsigned int reg;
227
228 cadence_qspi_apb_controller_disable(reg_base);
229 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydb37cc92016-11-29 12:58:29 +0000230 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100231
Phil Edworthy7d403f22016-11-29 12:58:31 +0000232 if (mode & SPI_CPOL)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000233 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthy7d403f22016-11-29 12:58:31 +0000234 if (mode & SPI_CPHA)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000235 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100236
237 writel(reg, reg_base + CQSPI_REG_CONFIG);
238
239 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100240}
241
242void cadence_qspi_apb_chipselect(void *reg_base,
243 unsigned int chip_select, unsigned int decoder_enable)
244{
245 unsigned int reg;
246
247 cadence_qspi_apb_controller_disable(reg_base);
248
249 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
250 decoder_enable);
251
252 reg = readl(reg_base + CQSPI_REG_CONFIG);
253 /* docoder */
254 if (decoder_enable) {
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000255 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100256 } else {
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000257 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100258 /* Convert CS if without decoder.
259 * CS0 to 4b'1110
260 * CS1 to 4b'1101
261 * CS2 to 4b'1011
262 * CS3 to 4b'0111
263 */
264 chip_select = 0xF & ~(1 << chip_select);
265 }
266
267 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
268 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
269 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
270 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
271 writel(reg, reg_base + CQSPI_REG_CONFIG);
272
273 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100274}
275
276void cadence_qspi_apb_delay(void *reg_base,
277 unsigned int ref_clk, unsigned int sclk_hz,
278 unsigned int tshsl_ns, unsigned int tsd2d_ns,
279 unsigned int tchsh_ns, unsigned int tslch_ns)
280{
281 unsigned int ref_clk_ns;
282 unsigned int sclk_ns;
283 unsigned int tshsl, tchsh, tslch, tsd2d;
284 unsigned int reg;
285
286 cadence_qspi_apb_controller_disable(reg_base);
287
288 /* Convert to ns. */
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000289 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100290
291 /* Convert to ns. */
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000292 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100293
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000294 /* The controller adds additional delay to that programmed in the reg */
295 if (tshsl_ns >= sclk_ns + ref_clk_ns)
296 tshsl_ns -= sclk_ns + ref_clk_ns;
297 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
298 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
299 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
300 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
301 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
302 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100303
304 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
305 << CQSPI_REG_DELAY_TSHSL_LSB);
306 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
307 << CQSPI_REG_DELAY_TCHSH_LSB);
308 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
309 << CQSPI_REG_DELAY_TSLCH_LSB);
310 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
311 << CQSPI_REG_DELAY_TSD2D_LSB);
312 writel(reg, reg_base + CQSPI_REG_DELAY);
313
314 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100315}
316
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600317void cadence_qspi_apb_controller_init(struct cadence_spi_priv *priv)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100318{
319 unsigned reg;
320
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600321 cadence_qspi_apb_controller_disable(priv->regbase);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100322
323 /* Configure the device size and address bytes */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600324 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100325 /* Clear the previous value */
326 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
327 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600328 reg |= (priv->page_size << CQSPI_REG_SIZE_PAGE_LSB);
329 reg |= (priv->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
330 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100331
332 /* Configure the remap address register, no remap */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600333 writel(0, priv->regbase + CQSPI_REG_REMAP);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100334
Vikas Manochac0535c02015-07-02 18:29:43 -0700335 /* Indirect mode configurations */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600336 writel(priv->fifo_depth / 2, priv->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manochac0535c02015-07-02 18:29:43 -0700337
Stefan Roese10e8bf82014-11-07 12:37:49 +0100338 /* Disable all interrupts */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600339 writel(0, priv->regbase + CQSPI_REG_IRQMASK);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100340
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600341 cadence_qspi_apb_controller_enable(priv->regbase);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100342}
343
T Karthik Reddycf553bf2022-05-12 04:05:32 -0600344int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100345{
346 unsigned int retry = CQSPI_REG_RETRY;
347
348 /* Write the CMDCTRL without start execution. */
349 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
350 /* Start execute */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000351 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100352 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
353
354 while (retry--) {
355 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000356 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100357 break;
358 udelay(1);
359 }
360
361 if (!retry) {
362 printf("QSPI: flash command execution timeout\n");
363 return -EIO;
364 }
365
366 /* Polling QSPI idle status. */
367 if (!cadence_qspi_wait_idle(reg_base))
368 return -EIO;
369
370 return 0;
371}
372
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600373static int cadence_qspi_setup_opcode_ext(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530374 const struct spi_mem_op *op,
375 unsigned int shift)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100376{
377 unsigned int reg;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530378 u8 ext;
379
380 if (op->cmd.nbytes != 2)
381 return -EINVAL;
382
383 /* Opcode extension is the LSB. */
384 ext = op->cmd.opcode & 0xff;
385
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600386 reg = readl(priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530387 reg &= ~(0xff << shift);
388 reg |= ext << shift;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600389 writel(reg, priv->regbase + CQSPI_REG_OP_EXT_LOWER);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530390
391 return 0;
392}
393
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600394static int cadence_qspi_enable_dtr(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530395 const struct spi_mem_op *op,
396 unsigned int shift,
397 bool enable)
398{
399 unsigned int reg;
400 int ret;
401
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600402 reg = readl(priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530403
404 if (enable) {
405 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
406 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
407
408 /* Set up command opcode extension. */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600409 ret = cadence_qspi_setup_opcode_ext(priv, op, shift);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530410 if (ret)
411 return ret;
412 } else {
413 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
414 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
415 }
416
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600417 writel(reg, priv->regbase + CQSPI_REG_CONFIG);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530418
419 return 0;
420}
421
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600422int cadence_qspi_apb_command_read_setup(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530423 const struct spi_mem_op *op)
424{
425 int ret;
426 unsigned int reg;
427
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600428 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530429 if (ret)
430 return ret;
431
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600432 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
433 priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530434 if (ret)
435 return ret;
436
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600437 reg = cadence_qspi_calc_rdreg(priv);
438 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530439
440 return 0;
441}
442
443/* For command RDID, RDSR. */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600444int cadence_qspi_apb_command_read(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530445 const struct spi_mem_op *op)
446{
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600447 void *reg_base = priv->regbase;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530448 unsigned int reg;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100449 unsigned int read_len;
450 int status;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530451 unsigned int rxlen = op->data.nbytes;
452 void *rxbuf = op->data.buf.in;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530453 unsigned int dummy_clk;
454 u8 opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100455
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530456 if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
457 printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100458 return -EINVAL;
459 }
460
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600461 if (priv->dtr)
Pratyush Yadav38b08522021-06-26 00:47:09 +0530462 opcode = op->cmd.opcode >> 8;
463 else
464 opcode = op->cmd.opcode;
465
466 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
467
468 /* Set up dummy cycles. */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600469 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530470 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
471 return -ENOTSUPP;
472
473 if (dummy_clk)
474 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
475 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100476
477 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
478
479 /* 0 means 1 byte. */
480 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
481 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
482 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
483 if (status != 0)
484 return status;
485
486 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
487
488 /* Put the read value into rx_buf */
489 read_len = (rxlen > 4) ? 4 : rxlen;
490 memcpy(rxbuf, &reg, read_len);
491 rxbuf += read_len;
492
493 if (rxlen > 4) {
494 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
495
496 read_len = rxlen - read_len;
497 memcpy(rxbuf, &reg, read_len);
498 }
499 return 0;
500}
501
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600502int cadence_qspi_apb_command_write_setup(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530503 const struct spi_mem_op *op)
504{
505 int ret;
506 unsigned int reg;
507
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600508 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530509 if (ret)
510 return ret;
511
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600512 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_STIG_LSB,
513 priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530514 if (ret)
515 return ret;
516
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600517 reg = cadence_qspi_calc_rdreg(priv);
518 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530519
520 return 0;
521}
522
Stefan Roese10e8bf82014-11-07 12:37:49 +0100523/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600524int cadence_qspi_apb_command_write(struct cadence_spi_priv *priv,
Pratyush Yadav38b08522021-06-26 00:47:09 +0530525 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100526{
527 unsigned int reg = 0;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100528 unsigned int wr_data;
529 unsigned int wr_len;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530530 unsigned int txlen = op->data.nbytes;
531 const void *txbuf = op->data.buf.out;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600532 void *reg_base = priv->regbase;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530533 u32 addr;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530534 u8 opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100535
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530536 /* Reorder address to SPI bus order if only transferring address */
537 if (!txlen) {
538 addr = cpu_to_be32(op->addr.val);
539 if (op->addr.nbytes == 3)
540 addr >>= 8;
541 txbuf = &addr;
542 txlen = op->addr.nbytes;
543 }
544
545 if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
546 printf("QSPI: Invalid input arguments txlen %u\n", txlen);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100547 return -EINVAL;
548 }
549
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600550 if (priv->dtr)
Pratyush Yadav38b08522021-06-26 00:47:09 +0530551 opcode = op->cmd.opcode >> 8;
552 else
553 opcode = op->cmd.opcode;
554
555 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100556
557 if (txlen) {
558 /* writing data = yes */
559 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
560 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
561 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
562
563 wr_len = txlen > 4 ? 4 : txlen;
564 memcpy(&wr_data, txbuf, wr_len);
565 writel(wr_data, reg_base +
566 CQSPI_REG_CMDWRITEDATALOWER);
567
568 if (txlen > 4) {
569 txbuf += wr_len;
570 wr_len = txlen - wr_len;
571 memcpy(&wr_data, txbuf, wr_len);
572 writel(wr_data, reg_base +
573 CQSPI_REG_CMDWRITEDATAUPPER);
574 }
575 }
576
577 /* Execute the command */
578 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
579}
580
581/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600582int cadence_qspi_apb_read_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530583 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100584{
585 unsigned int reg;
586 unsigned int rd_reg;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100587 unsigned int dummy_clk;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530588 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530589 int ret;
590 u8 opcode;
591
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600592 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530593 if (ret)
594 return ret;
595
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600596 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_READ_LSB,
597 priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530598 if (ret)
599 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100600
601 /* Setup the indirect trigger address */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600602 writel(priv->trigger_address,
603 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100604
Stefan Roese10e8bf82014-11-07 12:37:49 +0100605 /* Configure the opcode */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600606 if (priv->dtr)
Pratyush Yadav38b08522021-06-26 00:47:09 +0530607 opcode = op->cmd.opcode >> 8;
608 else
609 opcode = op->cmd.opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100610
Pratyush Yadav38b08522021-06-26 00:47:09 +0530611 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600612 rd_reg |= cadence_qspi_calc_rdreg(priv);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100613
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600614 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100615
Stefan Roese10e8bf82014-11-07 12:37:49 +0100616 if (dummy_bytes) {
Stefan Roese10e8bf82014-11-07 12:37:49 +0100617 /* Convert to clock cycles. */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600618 dummy_clk = cadence_qspi_calc_dummy(op, priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530619
620 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
621 return -ENOTSUPP;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100622
623 if (dummy_clk)
624 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
625 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
626 }
627
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600628 writel(rd_reg, priv->regbase + CQSPI_REG_RD_INSTR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100629
630 /* set device size */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600631 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100632 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530633 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600634 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100635 return 0;
636}
637
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600638static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_priv *priv)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100639{
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600640 u32 reg = readl(priv->regbase + CQSPI_REG_SDRAMLEVEL);
Marek Vasut5a824c42016-04-27 23:38:05 +0200641 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
642 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
643}
Stefan Roese10e8bf82014-11-07 12:37:49 +0100644
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600645static int cadence_qspi_wait_for_data(struct cadence_spi_priv *priv)
Marek Vasut5a824c42016-04-27 23:38:05 +0200646{
647 unsigned int timeout = 10000;
648 u32 reg;
649
650 while (timeout--) {
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600651 reg = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut5a824c42016-04-27 23:38:05 +0200652 if (reg)
653 return reg;
654 udelay(1);
655 }
656
657 return -ETIMEDOUT;
658}
659
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530660static int
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600661cadence_qspi_apb_indirect_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530662 unsigned int n_rx, u8 *rxbuf)
Marek Vasut5a824c42016-04-27 23:38:05 +0200663{
664 unsigned int remaining = n_rx;
665 unsigned int bytes_to_read = 0;
666 int ret;
667
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600668 writel(n_rx, priv->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100669
670 /* Start the indirect read transfer */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000671 writel(CQSPI_REG_INDIRECTRD_START,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600672 priv->regbase + CQSPI_REG_INDIRECTRD);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100673
Marek Vasut5a824c42016-04-27 23:38:05 +0200674 while (remaining > 0) {
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600675 ret = cadence_qspi_wait_for_data(priv);
Marek Vasut5a824c42016-04-27 23:38:05 +0200676 if (ret < 0) {
677 printf("Indirect write timed out (%i)\n", ret);
678 goto failrd;
679 }
Stefan Roese10e8bf82014-11-07 12:37:49 +0100680
Marek Vasut5a824c42016-04-27 23:38:05 +0200681 bytes_to_read = ret;
682
683 while (bytes_to_read != 0) {
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600684 bytes_to_read *= priv->fifo_width;
Marek Vasut5a824c42016-04-27 23:38:05 +0200685 bytes_to_read = bytes_to_read > remaining ?
686 remaining : bytes_to_read;
Goldschmidt Simon948ad4f2018-01-24 10:44:05 +0530687 /*
688 * Handle non-4-byte aligned access to avoid
689 * data abort.
690 */
691 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600692 readsb(priv->ahbbase, rxbuf, bytes_to_read);
Goldschmidt Simon948ad4f2018-01-24 10:44:05 +0530693 else
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600694 readsl(priv->ahbbase, rxbuf,
Goldschmidt Simon948ad4f2018-01-24 10:44:05 +0530695 bytes_to_read >> 2);
696 rxbuf += bytes_to_read;
Marek Vasut5a824c42016-04-27 23:38:05 +0200697 remaining -= bytes_to_read;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600698 bytes_to_read = cadence_qspi_get_rd_sram_level(priv);
Marek Vasut5a824c42016-04-27 23:38:05 +0200699 }
700 }
701
702 /* Check indirect done status */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600703 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100704 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut5a824c42016-04-27 23:38:05 +0200705 if (ret) {
706 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100707 goto failrd;
708 }
709
710 /* Clear indirect completion status */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000711 writel(CQSPI_REG_INDIRECTRD_DONE,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600712 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut5a824c42016-04-27 23:38:05 +0200713
Marek Vasut846d1d92021-09-14 05:22:31 +0200714 /* Check indirect done status */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600715 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTRD,
Marek Vasut846d1d92021-09-14 05:22:31 +0200716 CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
717 if (ret) {
718 printf("Indirect read clear completion error (%i)\n", ret);
719 goto failrd;
720 }
721
Stefan Roese10e8bf82014-11-07 12:37:49 +0100722 return 0;
723
724failrd:
725 /* Cancel the indirect read */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000726 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600727 priv->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut5a824c42016-04-27 23:38:05 +0200728 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100729}
730
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600731int cadence_qspi_apb_read_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530732 const struct spi_mem_op *op)
733{
Vignesh Raghavendra0f247842019-12-05 15:46:06 +0530734 u64 from = op->addr.val;
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530735 void *buf = op->data.buf.in;
736 size_t len = op->data.nbytes;
737
T Karthik Reddy248fe9f2022-05-12 04:05:34 -0600738 if (CONFIG_IS_ENABLED(ARCH_VERSAL))
739 cadence_qspi_apb_enable_linear_mode(true);
740
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600741 if (priv->use_dac_mode && (from + len < priv->ahbsize)) {
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530742 if (len < 256 ||
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600743 dma_memcpy(buf, priv->ahbbase + from, len) < 0) {
744 memcpy_fromio(buf, priv->ahbbase + from, len);
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530745 }
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600746 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530747 return -EIO;
748 return 0;
749 }
750
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600751 return cadence_qspi_apb_indirect_read_execute(priv, len, buf);
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530752}
753
Stefan Roese10e8bf82014-11-07 12:37:49 +0100754/* Opcode + Address (3/4 bytes) */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600755int cadence_qspi_apb_write_setup(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530756 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100757{
758 unsigned int reg;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530759 int ret;
760 u8 opcode;
761
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600762 ret = cadence_qspi_set_protocol(priv, op);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530763 if (ret)
764 return ret;
765
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600766 ret = cadence_qspi_enable_dtr(priv, op, CQSPI_REG_OP_EXT_WRITE_LSB,
767 priv->dtr);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530768 if (ret)
769 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100770
Stefan Roese10e8bf82014-11-07 12:37:49 +0100771 /* Setup the indirect trigger address */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600772 writel(priv->trigger_address,
773 priv->regbase + CQSPI_REG_INDIRECTTRIGGER);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100774
Stefan Roese10e8bf82014-11-07 12:37:49 +0100775 /* Configure the opcode */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600776 if (priv->dtr)
Pratyush Yadav38b08522021-06-26 00:47:09 +0530777 opcode = op->cmd.opcode >> 8;
778 else
779 opcode = op->cmd.opcode;
780
781 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600782 reg |= priv->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
783 reg |= priv->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
784 writel(reg, priv->regbase + CQSPI_REG_WR_INSTR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100785
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600786 reg = cadence_qspi_calc_rdreg(priv);
787 writel(reg, priv->regbase + CQSPI_REG_RD_INSTR);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530788
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600789 writel(op->addr.val, priv->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100790
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600791 if (priv->dtr) {
Pratyush Yadav38b08522021-06-26 00:47:09 +0530792 /*
793 * Some flashes like the cypress Semper flash expect a 4-byte
794 * dummy address with the Read SR command in DTR mode, but this
795 * controller does not support sending address with the Read SR
796 * command. So, disable write completion polling on the
797 * controller's side. spi-nor will take care of polling the
798 * status register.
799 */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600800 reg = readl(priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530801 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600802 writel(reg, priv->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
Pratyush Yadav38b08522021-06-26 00:47:09 +0530803 }
804
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600805 reg = readl(priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100806 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530807 reg |= (op->addr.nbytes - 1);
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600808 writel(reg, priv->regbase + CQSPI_REG_SIZE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100809 return 0;
810}
811
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530812static int
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600813cadence_qspi_apb_indirect_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530814 unsigned int n_tx, const u8 *txbuf)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100815{
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600816 unsigned int page_size = priv->page_size;
Marek Vasut26da6352016-04-27 23:18:55 +0200817 unsigned int remaining = n_tx;
Vignesh Raaa21d32018-01-24 10:44:07 +0530818 const u8 *bb_txbuf = txbuf;
819 void *bounce_buf = NULL;
Marek Vasut26da6352016-04-27 23:18:55 +0200820 unsigned int write_bytes;
821 int ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100822
Vignesh Raaa21d32018-01-24 10:44:07 +0530823 /*
824 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
825 * aborts
826 */
827 if ((uintptr_t)txbuf % 4) {
828 bounce_buf = malloc(n_tx);
829 if (!bounce_buf)
830 return -ENOMEM;
831 memcpy(bounce_buf, txbuf, n_tx);
832 bb_txbuf = bounce_buf;
833 }
834
Stefan Roese10e8bf82014-11-07 12:37:49 +0100835 /* Configure the indirect read transfer bytes */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600836 writel(n_tx, priv->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100837
838 /* Start the indirect write transfer */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000839 writel(CQSPI_REG_INDIRECTWR_START,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600840 priv->regbase + CQSPI_REG_INDIRECTWR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100841
Pratyush Yadava6903aa2021-06-26 00:47:08 +0530842 /*
843 * Some delay is required for the above bit to be internally
844 * synchronized by the QSPI module.
845 */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600846 ndelay(priv->wr_delay);
Pratyush Yadava6903aa2021-06-26 00:47:08 +0530847
Marek Vasut26da6352016-04-27 23:18:55 +0200848 while (remaining > 0) {
849 write_bytes = remaining > page_size ? page_size : remaining;
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600850 writesl(priv->ahbbase, bb_txbuf, write_bytes >> 2);
Vignesh Raaa21d32018-01-24 10:44:07 +0530851 if (write_bytes % 4)
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600852 writesb(priv->ahbbase,
Vignesh Raaa21d32018-01-24 10:44:07 +0530853 bb_txbuf + rounddown(write_bytes, 4),
854 write_bytes % 4);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100855
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600856 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_SDRAMLEVEL,
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100857 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
858 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasut26da6352016-04-27 23:18:55 +0200859 if (ret) {
860 printf("Indirect write timed out (%i)\n", ret);
861 goto failwr;
862 }
Stefan Roese10e8bf82014-11-07 12:37:49 +0100863
Vignesh Raaa21d32018-01-24 10:44:07 +0530864 bb_txbuf += write_bytes;
Marek Vasut26da6352016-04-27 23:18:55 +0200865 remaining -= write_bytes;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100866 }
867
Marek Vasut26da6352016-04-27 23:18:55 +0200868 /* Check indirect done status */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600869 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100870 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasut26da6352016-04-27 23:18:55 +0200871 if (ret) {
872 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100873 goto failwr;
874 }
875
876 /* Clear indirect completion status */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000877 writel(CQSPI_REG_INDIRECTWR_DONE,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600878 priv->regbase + CQSPI_REG_INDIRECTWR);
Marek Vasut846d1d92021-09-14 05:22:31 +0200879
880 /* Check indirect done status */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600881 ret = wait_for_bit_le32(priv->regbase + CQSPI_REG_INDIRECTWR,
Marek Vasut846d1d92021-09-14 05:22:31 +0200882 CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
883 if (ret) {
884 printf("Indirect write clear completion error (%i)\n", ret);
885 goto failwr;
886 }
887
Vignesh Raaa21d32018-01-24 10:44:07 +0530888 if (bounce_buf)
889 free(bounce_buf);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100890 return 0;
891
892failwr:
893 /* Cancel the indirect write */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000894 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600895 priv->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Raaa21d32018-01-24 10:44:07 +0530896 if (bounce_buf)
897 free(bounce_buf);
Marek Vasut26da6352016-04-27 23:18:55 +0200898 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100899}
900
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600901int cadence_qspi_apb_write_execute(struct cadence_spi_priv *priv,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530902 const struct spi_mem_op *op)
903{
904 u32 to = op->addr.val;
905 const void *buf = op->data.buf.out;
906 size_t len = op->data.nbytes;
907
T Karthik Reddy248fe9f2022-05-12 04:05:34 -0600908 if (CONFIG_IS_ENABLED(ARCH_VERSAL))
909 cadence_qspi_apb_enable_linear_mode(true);
910
Pratyush Yadav38b08522021-06-26 00:47:09 +0530911 /*
912 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
913 * address (all 0s) with the read status register command in DTR mode.
914 * But this controller does not support sending dummy address bytes to
915 * the flash when it is polling the write completion register in DTR
916 * mode. So, we can not use direct mode when in DTR mode for writing
917 * data.
918 */
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600919 cadence_qspi_apb_enable_linear_mode(true);
920 if (!priv->dtr && priv->use_dac_mode && (to + len < priv->ahbsize)) {
921 memcpy_toio(priv->ahbbase + to, buf, len);
922 if (!cadence_qspi_wait_idle(priv->regbase))
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530923 return -EIO;
924 return 0;
925 }
926
Ashok Reddy Somaf7d4cab2022-08-24 05:38:47 -0600927 return cadence_qspi_apb_indirect_write_execute(priv, len, buf);
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530928}
929
Stefan Roese10e8bf82014-11-07 12:37:49 +0100930void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
931{
932 unsigned int reg;
933
934 /* enter XiP mode immediately and enable direct mode */
935 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000936 reg |= CQSPI_REG_CONFIG_ENABLE;
937 reg |= CQSPI_REG_CONFIG_DIRECT;
938 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100939 writel(reg, reg_base + CQSPI_REG_CONFIG);
940
941 /* keep the XiP mode */
942 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
943
944 /* Enable mode bit at devrd */
945 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
946 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
947 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
948}