blob: c00755050e1ccdd149f2247dd4845572ede5f7a2 [file] [log] [blame]
Stefan Roese10e8bf82014-11-07 12:37:49 +01001/*
2 * Copyright (C) 2012 Altera Corporation <www.altera.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * - Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * - Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * - Neither the name of the Altera Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060029#include <log.h>
Stefan Roese10e8bf82014-11-07 12:37:49 +010030#include <asm/io.h>
Vignesh Raghavendraffab2122020-01-27 10:36:40 +053031#include <dma.h>
Simon Glasscd93d622020-05-10 11:40:13 -060032#include <linux/bitops.h>
Simon Glassc05ed002020-05-10 11:40:11 -060033#include <linux/delay.h>
Masahiro Yamada1221ce42016-09-21 11:28:55 +090034#include <linux/errno.h>
Marek Vasut26da6352016-04-27 23:18:55 +020035#include <wait_bit.h>
Vignesh R2372e142016-07-06 10:20:56 +053036#include <spi.h>
Vignesh Raghavendrad6407722020-01-27 10:36:39 +053037#include <spi-mem.h>
Vignesh Raaa21d32018-01-24 10:44:07 +053038#include <malloc.h>
Stefan Roese10e8bf82014-11-07 12:37:49 +010039#include "cadence_qspi.h"
40
T Karthik Reddy248fe9f2022-05-12 04:05:34 -060041__weak void cadence_qspi_apb_enable_linear_mode(bool enable)
42{
43 return;
44}
45
Stefan Roese10e8bf82014-11-07 12:37:49 +010046void cadence_qspi_apb_controller_enable(void *reg_base)
47{
48 unsigned int reg;
49 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +000050 reg |= CQSPI_REG_CONFIG_ENABLE;
Stefan Roese10e8bf82014-11-07 12:37:49 +010051 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese10e8bf82014-11-07 12:37:49 +010052}
53
54void cadence_qspi_apb_controller_disable(void *reg_base)
55{
56 unsigned int reg;
57 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +000058 reg &= ~CQSPI_REG_CONFIG_ENABLE;
Stefan Roese10e8bf82014-11-07 12:37:49 +010059 writel(reg, reg_base + CQSPI_REG_CONFIG);
Stefan Roese10e8bf82014-11-07 12:37:49 +010060}
61
Vignesh Raghavendraffab2122020-01-27 10:36:40 +053062void cadence_qspi_apb_dac_mode_enable(void *reg_base)
63{
64 unsigned int reg;
65
66 reg = readl(reg_base + CQSPI_REG_CONFIG);
67 reg |= CQSPI_REG_CONFIG_DIRECT;
68 writel(reg, reg_base + CQSPI_REG_CONFIG);
69}
70
Pratyush Yadav38b08522021-06-26 00:47:09 +053071static unsigned int cadence_qspi_calc_dummy(const struct spi_mem_op *op,
72 bool dtr)
73{
74 unsigned int dummy_clk;
75
Marek Vasutc2e03632021-09-14 05:21:48 +020076 if (!op->dummy.nbytes || !op->dummy.buswidth)
77 return 0;
78
Pratyush Yadav38b08522021-06-26 00:47:09 +053079 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
80 if (dtr)
81 dummy_clk /= 2;
82
83 return dummy_clk;
84}
85
86static u32 cadence_qspi_calc_rdreg(struct cadence_spi_plat *plat)
87{
88 u32 rdreg = 0;
89
90 rdreg |= plat->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
91 rdreg |= plat->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
92 rdreg |= plat->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
93
94 return rdreg;
95}
96
97static int cadence_qspi_buswidth_to_inst_type(u8 buswidth)
98{
99 switch (buswidth) {
100 case 0:
101 case 1:
102 return CQSPI_INST_TYPE_SINGLE;
103
104 case 2:
105 return CQSPI_INST_TYPE_DUAL;
106
107 case 4:
108 return CQSPI_INST_TYPE_QUAD;
109
110 case 8:
111 return CQSPI_INST_TYPE_OCTAL;
112
113 default:
114 return -ENOTSUPP;
115 }
116}
117
118static int cadence_qspi_set_protocol(struct cadence_spi_plat *plat,
119 const struct spi_mem_op *op)
120{
121 int ret;
122
123 plat->dtr = op->data.dtr && op->cmd.dtr && op->addr.dtr;
124
125 ret = cadence_qspi_buswidth_to_inst_type(op->cmd.buswidth);
126 if (ret < 0)
127 return ret;
128 plat->inst_width = ret;
129
130 ret = cadence_qspi_buswidth_to_inst_type(op->addr.buswidth);
131 if (ret < 0)
132 return ret;
133 plat->addr_width = ret;
134
135 ret = cadence_qspi_buswidth_to_inst_type(op->data.buswidth);
136 if (ret < 0)
137 return ret;
138 plat->data_width = ret;
139
140 return 0;
141}
142
Stefan Roese10e8bf82014-11-07 12:37:49 +0100143/* Return 1 if idle, otherwise return 0 (busy). */
144static unsigned int cadence_qspi_wait_idle(void *reg_base)
145{
146 unsigned int start, count = 0;
147 /* timeout in unit of ms */
148 unsigned int timeout = 5000;
149
150 start = get_timer(0);
151 for ( ; get_timer(start) < timeout ; ) {
152 if (CQSPI_REG_IS_IDLE(reg_base))
153 count++;
154 else
155 count = 0;
156 /*
157 * Ensure the QSPI controller is in true idle state after
158 * reading back the same idle status consecutively
159 */
160 if (count >= CQSPI_POLL_IDLE_RETRY)
161 return 1;
162 }
163
164 /* Timeout, still in busy mode. */
165 printf("QSPI: QSPI is still busy after poll for %d times.\n",
166 CQSPI_REG_RETRY);
167 return 0;
168}
169
170void cadence_qspi_apb_readdata_capture(void *reg_base,
171 unsigned int bypass, unsigned int delay)
172{
173 unsigned int reg;
174 cadence_qspi_apb_controller_disable(reg_base);
175
Phil Edworthydb37cc92016-11-29 12:58:29 +0000176 reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100177
178 if (bypass)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000179 reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100180 else
Phil Edworthydb37cc92016-11-29 12:58:29 +0000181 reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100182
Phil Edworthydb37cc92016-11-29 12:58:29 +0000183 reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
184 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100185
Phil Edworthydb37cc92016-11-29 12:58:29 +0000186 reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
187 << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100188
Phil Edworthydb37cc92016-11-29 12:58:29 +0000189 writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100190
191 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100192}
193
194void cadence_qspi_apb_config_baudrate_div(void *reg_base,
195 unsigned int ref_clk_hz, unsigned int sclk_hz)
196{
197 unsigned int reg;
198 unsigned int div;
199
200 cadence_qspi_apb_controller_disable(reg_base);
201 reg = readl(reg_base + CQSPI_REG_CONFIG);
202 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
203
Phil Edworthy32068c42016-11-29 12:58:27 +0000204 /*
205 * The baud_div field in the config reg is 4 bits, and the ref clock is
206 * divided by 2 * (baud_div + 1). Round up the divider to ensure the
207 * SPI clock rate is less than or equal to the requested clock rate.
208 */
209 div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100210
Chin Liang See54058172016-08-07 22:50:40 +0800211 /* ensure the baud rate doesn't exceed the max value */
212 if (div > CQSPI_REG_CONFIG_BAUD_MASK)
213 div = CQSPI_REG_CONFIG_BAUD_MASK;
214
Phil Edworthy0ceb4d92016-11-29 12:58:28 +0000215 debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
216 ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
217
Chin Liang See54058172016-08-07 22:50:40 +0800218 reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100219 writel(reg, reg_base + CQSPI_REG_CONFIG);
220
221 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100222}
223
Phil Edworthy7d403f22016-11-29 12:58:31 +0000224void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100225{
226 unsigned int reg;
227
228 cadence_qspi_apb_controller_disable(reg_base);
229 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthydb37cc92016-11-29 12:58:29 +0000230 reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100231
Phil Edworthy7d403f22016-11-29 12:58:31 +0000232 if (mode & SPI_CPOL)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000233 reg |= CQSPI_REG_CONFIG_CLK_POL;
Phil Edworthy7d403f22016-11-29 12:58:31 +0000234 if (mode & SPI_CPHA)
Phil Edworthydb37cc92016-11-29 12:58:29 +0000235 reg |= CQSPI_REG_CONFIG_CLK_PHA;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100236
237 writel(reg, reg_base + CQSPI_REG_CONFIG);
238
239 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100240}
241
242void cadence_qspi_apb_chipselect(void *reg_base,
243 unsigned int chip_select, unsigned int decoder_enable)
244{
245 unsigned int reg;
246
247 cadence_qspi_apb_controller_disable(reg_base);
248
249 debug("%s : chipselect %d decode %d\n", __func__, chip_select,
250 decoder_enable);
251
252 reg = readl(reg_base + CQSPI_REG_CONFIG);
253 /* docoder */
254 if (decoder_enable) {
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000255 reg |= CQSPI_REG_CONFIG_DECODE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100256 } else {
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000257 reg &= ~CQSPI_REG_CONFIG_DECODE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100258 /* Convert CS if without decoder.
259 * CS0 to 4b'1110
260 * CS1 to 4b'1101
261 * CS2 to 4b'1011
262 * CS3 to 4b'0111
263 */
264 chip_select = 0xF & ~(1 << chip_select);
265 }
266
267 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
268 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
269 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
270 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
271 writel(reg, reg_base + CQSPI_REG_CONFIG);
272
273 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100274}
275
276void cadence_qspi_apb_delay(void *reg_base,
277 unsigned int ref_clk, unsigned int sclk_hz,
278 unsigned int tshsl_ns, unsigned int tsd2d_ns,
279 unsigned int tchsh_ns, unsigned int tslch_ns)
280{
281 unsigned int ref_clk_ns;
282 unsigned int sclk_ns;
283 unsigned int tshsl, tchsh, tslch, tsd2d;
284 unsigned int reg;
285
286 cadence_qspi_apb_controller_disable(reg_base);
287
288 /* Convert to ns. */
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000289 ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100290
291 /* Convert to ns. */
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000292 sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100293
Phil Edworthy22e63ff2016-11-29 12:58:33 +0000294 /* The controller adds additional delay to that programmed in the reg */
295 if (tshsl_ns >= sclk_ns + ref_clk_ns)
296 tshsl_ns -= sclk_ns + ref_clk_ns;
297 if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
298 tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
299 tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
300 tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
301 tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
302 tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100303
304 reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
305 << CQSPI_REG_DELAY_TSHSL_LSB);
306 reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
307 << CQSPI_REG_DELAY_TCHSH_LSB);
308 reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
309 << CQSPI_REG_DELAY_TSLCH_LSB);
310 reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
311 << CQSPI_REG_DELAY_TSD2D_LSB);
312 writel(reg, reg_base + CQSPI_REG_DELAY);
313
314 cadence_qspi_apb_controller_enable(reg_base);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100315}
316
Simon Glass8a8d24b2020-12-03 16:55:23 -0700317void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100318{
319 unsigned reg;
320
321 cadence_qspi_apb_controller_disable(plat->regbase);
322
323 /* Configure the device size and address bytes */
324 reg = readl(plat->regbase + CQSPI_REG_SIZE);
325 /* Clear the previous value */
326 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
327 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
328 reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
329 reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
330 writel(reg, plat->regbase + CQSPI_REG_SIZE);
331
332 /* Configure the remap address register, no remap */
333 writel(0, plat->regbase + CQSPI_REG_REMAP);
334
Vikas Manochac0535c02015-07-02 18:29:43 -0700335 /* Indirect mode configurations */
Jason Rush15a70a52018-01-23 17:13:09 -0600336 writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
Vikas Manochac0535c02015-07-02 18:29:43 -0700337
Stefan Roese10e8bf82014-11-07 12:37:49 +0100338 /* Disable all interrupts */
339 writel(0, plat->regbase + CQSPI_REG_IRQMASK);
340
341 cadence_qspi_apb_controller_enable(plat->regbase);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100342}
343
T Karthik Reddycf553bf2022-05-12 04:05:32 -0600344int cadence_qspi_apb_exec_flash_cmd(void *reg_base, unsigned int reg)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100345{
346 unsigned int retry = CQSPI_REG_RETRY;
347
348 /* Write the CMDCTRL without start execution. */
349 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
350 /* Start execute */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000351 reg |= CQSPI_REG_CMDCTRL_EXECUTE;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100352 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
353
354 while (retry--) {
355 reg = readl(reg_base + CQSPI_REG_CMDCTRL);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000356 if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100357 break;
358 udelay(1);
359 }
360
361 if (!retry) {
362 printf("QSPI: flash command execution timeout\n");
363 return -EIO;
364 }
365
366 /* Polling QSPI idle status. */
367 if (!cadence_qspi_wait_idle(reg_base))
368 return -EIO;
369
370 return 0;
371}
372
Pratyush Yadav38b08522021-06-26 00:47:09 +0530373static int cadence_qspi_setup_opcode_ext(struct cadence_spi_plat *plat,
374 const struct spi_mem_op *op,
375 unsigned int shift)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100376{
377 unsigned int reg;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530378 u8 ext;
379
380 if (op->cmd.nbytes != 2)
381 return -EINVAL;
382
383 /* Opcode extension is the LSB. */
384 ext = op->cmd.opcode & 0xff;
385
386 reg = readl(plat->regbase + CQSPI_REG_OP_EXT_LOWER);
387 reg &= ~(0xff << shift);
388 reg |= ext << shift;
389 writel(reg, plat->regbase + CQSPI_REG_OP_EXT_LOWER);
390
391 return 0;
392}
393
394static int cadence_qspi_enable_dtr(struct cadence_spi_plat *plat,
395 const struct spi_mem_op *op,
396 unsigned int shift,
397 bool enable)
398{
399 unsigned int reg;
400 int ret;
401
402 reg = readl(plat->regbase + CQSPI_REG_CONFIG);
403
404 if (enable) {
405 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
406 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
407
408 /* Set up command opcode extension. */
409 ret = cadence_qspi_setup_opcode_ext(plat, op, shift);
410 if (ret)
411 return ret;
412 } else {
413 reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
414 reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
415 }
416
417 writel(reg, plat->regbase + CQSPI_REG_CONFIG);
418
419 return 0;
420}
421
422int cadence_qspi_apb_command_read_setup(struct cadence_spi_plat *plat,
423 const struct spi_mem_op *op)
424{
425 int ret;
426 unsigned int reg;
427
428 ret = cadence_qspi_set_protocol(plat, op);
429 if (ret)
430 return ret;
431
432 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
433 plat->dtr);
434 if (ret)
435 return ret;
436
437 reg = cadence_qspi_calc_rdreg(plat);
438 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
439
440 return 0;
441}
442
443/* For command RDID, RDSR. */
444int cadence_qspi_apb_command_read(struct cadence_spi_plat *plat,
445 const struct spi_mem_op *op)
446{
447 void *reg_base = plat->regbase;
448 unsigned int reg;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100449 unsigned int read_len;
450 int status;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530451 unsigned int rxlen = op->data.nbytes;
452 void *rxbuf = op->data.buf.in;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530453 unsigned int dummy_clk;
454 u8 opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100455
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530456 if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
457 printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100458 return -EINVAL;
459 }
460
Pratyush Yadav38b08522021-06-26 00:47:09 +0530461 if (plat->dtr)
462 opcode = op->cmd.opcode >> 8;
463 else
464 opcode = op->cmd.opcode;
465
466 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
467
468 /* Set up dummy cycles. */
469 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
470 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
471 return -ENOTSUPP;
472
473 if (dummy_clk)
474 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
475 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100476
477 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
478
479 /* 0 means 1 byte. */
480 reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
481 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
482 status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
483 if (status != 0)
484 return status;
485
486 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
487
488 /* Put the read value into rx_buf */
489 read_len = (rxlen > 4) ? 4 : rxlen;
490 memcpy(rxbuf, &reg, read_len);
491 rxbuf += read_len;
492
493 if (rxlen > 4) {
494 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
495
496 read_len = rxlen - read_len;
497 memcpy(rxbuf, &reg, read_len);
498 }
499 return 0;
500}
501
Pratyush Yadav38b08522021-06-26 00:47:09 +0530502int cadence_qspi_apb_command_write_setup(struct cadence_spi_plat *plat,
503 const struct spi_mem_op *op)
504{
505 int ret;
506 unsigned int reg;
507
508 ret = cadence_qspi_set_protocol(plat, op);
509 if (ret)
510 return ret;
511
512 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_STIG_LSB,
513 plat->dtr);
514 if (ret)
515 return ret;
516
517 reg = cadence_qspi_calc_rdreg(plat);
518 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
519
520 return 0;
521}
522
Stefan Roese10e8bf82014-11-07 12:37:49 +0100523/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
Pratyush Yadav38b08522021-06-26 00:47:09 +0530524int cadence_qspi_apb_command_write(struct cadence_spi_plat *plat,
525 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100526{
527 unsigned int reg = 0;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100528 unsigned int wr_data;
529 unsigned int wr_len;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530530 unsigned int txlen = op->data.nbytes;
531 const void *txbuf = op->data.buf.out;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530532 void *reg_base = plat->regbase;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530533 u32 addr;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530534 u8 opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100535
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530536 /* Reorder address to SPI bus order if only transferring address */
537 if (!txlen) {
538 addr = cpu_to_be32(op->addr.val);
539 if (op->addr.nbytes == 3)
540 addr >>= 8;
541 txbuf = &addr;
542 txlen = op->addr.nbytes;
543 }
544
545 if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
546 printf("QSPI: Invalid input arguments txlen %u\n", txlen);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100547 return -EINVAL;
548 }
549
Pratyush Yadav38b08522021-06-26 00:47:09 +0530550 if (plat->dtr)
551 opcode = op->cmd.opcode >> 8;
552 else
553 opcode = op->cmd.opcode;
554
555 reg |= opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100556
557 if (txlen) {
558 /* writing data = yes */
559 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
560 reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
561 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
562
563 wr_len = txlen > 4 ? 4 : txlen;
564 memcpy(&wr_data, txbuf, wr_len);
565 writel(wr_data, reg_base +
566 CQSPI_REG_CMDWRITEDATALOWER);
567
568 if (txlen > 4) {
569 txbuf += wr_len;
570 wr_len = txlen - wr_len;
571 memcpy(&wr_data, txbuf, wr_len);
572 writel(wr_data, reg_base +
573 CQSPI_REG_CMDWRITEDATAUPPER);
574 }
575 }
576
577 /* Execute the command */
578 return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
579}
580
581/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
Simon Glass8a8d24b2020-12-03 16:55:23 -0700582int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530583 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100584{
585 unsigned int reg;
586 unsigned int rd_reg;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100587 unsigned int dummy_clk;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530588 unsigned int dummy_bytes = op->dummy.nbytes;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530589 int ret;
590 u8 opcode;
591
592 ret = cadence_qspi_set_protocol(plat, op);
593 if (ret)
594 return ret;
595
596 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_READ_LSB,
597 plat->dtr);
598 if (ret)
599 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100600
601 /* Setup the indirect trigger address */
Jason Rush15a70a52018-01-23 17:13:09 -0600602 writel(plat->trigger_address,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100603 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
604
Stefan Roese10e8bf82014-11-07 12:37:49 +0100605 /* Configure the opcode */
Pratyush Yadav38b08522021-06-26 00:47:09 +0530606 if (plat->dtr)
607 opcode = op->cmd.opcode >> 8;
608 else
609 opcode = op->cmd.opcode;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100610
Pratyush Yadav38b08522021-06-26 00:47:09 +0530611 rd_reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
612 rd_reg |= cadence_qspi_calc_rdreg(plat);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100613
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530614 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100615
Stefan Roese10e8bf82014-11-07 12:37:49 +0100616 if (dummy_bytes) {
Stefan Roese10e8bf82014-11-07 12:37:49 +0100617 /* Convert to clock cycles. */
Pratyush Yadav38b08522021-06-26 00:47:09 +0530618 dummy_clk = cadence_qspi_calc_dummy(op, plat->dtr);
619
620 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
621 return -ENOTSUPP;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100622
623 if (dummy_clk)
624 rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
625 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
626 }
627
628 writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
629
630 /* set device size */
631 reg = readl(plat->regbase + CQSPI_REG_SIZE);
632 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530633 reg |= (op->addr.nbytes - 1);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100634 writel(reg, plat->regbase + CQSPI_REG_SIZE);
635 return 0;
636}
637
Simon Glass8a8d24b2020-12-03 16:55:23 -0700638static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_plat *plat)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100639{
Marek Vasut5a824c42016-04-27 23:38:05 +0200640 u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
641 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
642 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
643}
Stefan Roese10e8bf82014-11-07 12:37:49 +0100644
Simon Glass8a8d24b2020-12-03 16:55:23 -0700645static int cadence_qspi_wait_for_data(struct cadence_spi_plat *plat)
Marek Vasut5a824c42016-04-27 23:38:05 +0200646{
647 unsigned int timeout = 10000;
648 u32 reg;
649
650 while (timeout--) {
651 reg = cadence_qspi_get_rd_sram_level(plat);
652 if (reg)
653 return reg;
654 udelay(1);
655 }
656
657 return -ETIMEDOUT;
658}
659
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530660static int
Simon Glass8a8d24b2020-12-03 16:55:23 -0700661cadence_qspi_apb_indirect_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530662 unsigned int n_rx, u8 *rxbuf)
Marek Vasut5a824c42016-04-27 23:38:05 +0200663{
664 unsigned int remaining = n_rx;
665 unsigned int bytes_to_read = 0;
666 int ret;
667
668 writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100669
670 /* Start the indirect read transfer */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000671 writel(CQSPI_REG_INDIRECTRD_START,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100672 plat->regbase + CQSPI_REG_INDIRECTRD);
673
Marek Vasut5a824c42016-04-27 23:38:05 +0200674 while (remaining > 0) {
675 ret = cadence_qspi_wait_for_data(plat);
676 if (ret < 0) {
677 printf("Indirect write timed out (%i)\n", ret);
678 goto failrd;
679 }
Stefan Roese10e8bf82014-11-07 12:37:49 +0100680
Marek Vasut5a824c42016-04-27 23:38:05 +0200681 bytes_to_read = ret;
682
683 while (bytes_to_read != 0) {
Jason Rush15a70a52018-01-23 17:13:09 -0600684 bytes_to_read *= plat->fifo_width;
Marek Vasut5a824c42016-04-27 23:38:05 +0200685 bytes_to_read = bytes_to_read > remaining ?
686 remaining : bytes_to_read;
Goldschmidt Simon948ad4f2018-01-24 10:44:05 +0530687 /*
688 * Handle non-4-byte aligned access to avoid
689 * data abort.
690 */
691 if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
692 readsb(plat->ahbbase, rxbuf, bytes_to_read);
693 else
694 readsl(plat->ahbbase, rxbuf,
695 bytes_to_read >> 2);
696 rxbuf += bytes_to_read;
Marek Vasut5a824c42016-04-27 23:38:05 +0200697 remaining -= bytes_to_read;
698 bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
699 }
700 }
701
702 /* Check indirect done status */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100703 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
704 CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
Marek Vasut5a824c42016-04-27 23:38:05 +0200705 if (ret) {
706 printf("Indirect read completion error (%i)\n", ret);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100707 goto failrd;
708 }
709
710 /* Clear indirect completion status */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000711 writel(CQSPI_REG_INDIRECTRD_DONE,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100712 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut5a824c42016-04-27 23:38:05 +0200713
Marek Vasut846d1d92021-09-14 05:22:31 +0200714 /* Check indirect done status */
715 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
716 CQSPI_REG_INDIRECTRD_DONE, 0, 10, 0);
717 if (ret) {
718 printf("Indirect read clear completion error (%i)\n", ret);
719 goto failrd;
720 }
721
Stefan Roese10e8bf82014-11-07 12:37:49 +0100722 return 0;
723
724failrd:
725 /* Cancel the indirect read */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000726 writel(CQSPI_REG_INDIRECTRD_CANCEL,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100727 plat->regbase + CQSPI_REG_INDIRECTRD);
Marek Vasut5a824c42016-04-27 23:38:05 +0200728 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100729}
730
Simon Glass8a8d24b2020-12-03 16:55:23 -0700731int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530732 const struct spi_mem_op *op)
733{
Vignesh Raghavendra0f247842019-12-05 15:46:06 +0530734 u64 from = op->addr.val;
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530735 void *buf = op->data.buf.in;
736 size_t len = op->data.nbytes;
737
T Karthik Reddy248fe9f2022-05-12 04:05:34 -0600738 if (CONFIG_IS_ENABLED(ARCH_VERSAL))
739 cadence_qspi_apb_enable_linear_mode(true);
740
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530741 if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
742 if (len < 256 ||
743 dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
744 memcpy_fromio(buf, plat->ahbbase + from, len);
745 }
746 if (!cadence_qspi_wait_idle(plat->regbase))
747 return -EIO;
748 return 0;
749 }
750
751 return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
752}
753
Stefan Roese10e8bf82014-11-07 12:37:49 +0100754/* Opcode + Address (3/4 bytes) */
Simon Glass8a8d24b2020-12-03 16:55:23 -0700755int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530756 const struct spi_mem_op *op)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100757{
758 unsigned int reg;
Pratyush Yadav38b08522021-06-26 00:47:09 +0530759 int ret;
760 u8 opcode;
761
762 ret = cadence_qspi_set_protocol(plat, op);
763 if (ret)
764 return ret;
765
766 ret = cadence_qspi_enable_dtr(plat, op, CQSPI_REG_OP_EXT_WRITE_LSB,
767 plat->dtr);
768 if (ret)
769 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100770
Stefan Roese10e8bf82014-11-07 12:37:49 +0100771 /* Setup the indirect trigger address */
Jason Rush15a70a52018-01-23 17:13:09 -0600772 writel(plat->trigger_address,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100773 plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
774
Stefan Roese10e8bf82014-11-07 12:37:49 +0100775 /* Configure the opcode */
Pratyush Yadav38b08522021-06-26 00:47:09 +0530776 if (plat->dtr)
777 opcode = op->cmd.opcode >> 8;
778 else
779 opcode = op->cmd.opcode;
780
781 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
782 reg |= plat->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
783 reg |= plat->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100784 writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
785
Pratyush Yadav38b08522021-06-26 00:47:09 +0530786 reg = cadence_qspi_calc_rdreg(plat);
787 writel(reg, plat->regbase + CQSPI_REG_RD_INSTR);
788
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530789 writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100790
Pratyush Yadav38b08522021-06-26 00:47:09 +0530791 if (plat->dtr) {
792 /*
793 * Some flashes like the cypress Semper flash expect a 4-byte
794 * dummy address with the Read SR command in DTR mode, but this
795 * controller does not support sending address with the Read SR
796 * command. So, disable write completion polling on the
797 * controller's side. spi-nor will take care of polling the
798 * status register.
799 */
800 reg = readl(plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
801 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
802 writel(reg, plat->regbase + CQSPI_REG_WR_COMPLETION_CTRL);
803 }
804
Stefan Roese10e8bf82014-11-07 12:37:49 +0100805 reg = readl(plat->regbase + CQSPI_REG_SIZE);
806 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
Vignesh Raghavendrad6407722020-01-27 10:36:39 +0530807 reg |= (op->addr.nbytes - 1);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100808 writel(reg, plat->regbase + CQSPI_REG_SIZE);
809 return 0;
810}
811
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530812static int
Simon Glass8a8d24b2020-12-03 16:55:23 -0700813cadence_qspi_apb_indirect_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530814 unsigned int n_tx, const u8 *txbuf)
Stefan Roese10e8bf82014-11-07 12:37:49 +0100815{
Marek Vasut26da6352016-04-27 23:18:55 +0200816 unsigned int page_size = plat->page_size;
817 unsigned int remaining = n_tx;
Vignesh Raaa21d32018-01-24 10:44:07 +0530818 const u8 *bb_txbuf = txbuf;
819 void *bounce_buf = NULL;
Marek Vasut26da6352016-04-27 23:18:55 +0200820 unsigned int write_bytes;
821 int ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100822
Vignesh Raaa21d32018-01-24 10:44:07 +0530823 /*
824 * Use bounce buffer for non 32 bit aligned txbuf to avoid data
825 * aborts
826 */
827 if ((uintptr_t)txbuf % 4) {
828 bounce_buf = malloc(n_tx);
829 if (!bounce_buf)
830 return -ENOMEM;
831 memcpy(bounce_buf, txbuf, n_tx);
832 bb_txbuf = bounce_buf;
833 }
834
Stefan Roese10e8bf82014-11-07 12:37:49 +0100835 /* Configure the indirect read transfer bytes */
Marek Vasut26da6352016-04-27 23:18:55 +0200836 writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100837
838 /* Start the indirect write transfer */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000839 writel(CQSPI_REG_INDIRECTWR_START,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100840 plat->regbase + CQSPI_REG_INDIRECTWR);
841
Pratyush Yadava6903aa2021-06-26 00:47:08 +0530842 /*
843 * Some delay is required for the above bit to be internally
844 * synchronized by the QSPI module.
845 */
846 ndelay(plat->wr_delay);
847
Marek Vasut26da6352016-04-27 23:18:55 +0200848 while (remaining > 0) {
849 write_bytes = remaining > page_size ? page_size : remaining;
Vignesh Raaa21d32018-01-24 10:44:07 +0530850 writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
851 if (write_bytes % 4)
852 writesb(plat->ahbbase,
853 bb_txbuf + rounddown(write_bytes, 4),
854 write_bytes % 4);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100855
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100856 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
857 CQSPI_REG_SDRAMLEVEL_WR_MASK <<
858 CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
Marek Vasut26da6352016-04-27 23:18:55 +0200859 if (ret) {
860 printf("Indirect write timed out (%i)\n", ret);
861 goto failwr;
862 }
Stefan Roese10e8bf82014-11-07 12:37:49 +0100863
Vignesh Raaa21d32018-01-24 10:44:07 +0530864 bb_txbuf += write_bytes;
Marek Vasut26da6352016-04-27 23:18:55 +0200865 remaining -= write_bytes;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100866 }
867
Marek Vasut26da6352016-04-27 23:18:55 +0200868 /* Check indirect done status */
Álvaro Fernández Rojas48263502018-01-23 17:14:55 +0100869 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
870 CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
Marek Vasut26da6352016-04-27 23:18:55 +0200871 if (ret) {
872 printf("Indirect write completion error (%i)\n", ret);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100873 goto failwr;
874 }
875
876 /* Clear indirect completion status */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000877 writel(CQSPI_REG_INDIRECTWR_DONE,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100878 plat->regbase + CQSPI_REG_INDIRECTWR);
Marek Vasut846d1d92021-09-14 05:22:31 +0200879
880 /* Check indirect done status */
881 ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
882 CQSPI_REG_INDIRECTWR_DONE, 0, 10, 0);
883 if (ret) {
884 printf("Indirect write clear completion error (%i)\n", ret);
885 goto failwr;
886 }
887
Vignesh Raaa21d32018-01-24 10:44:07 +0530888 if (bounce_buf)
889 free(bounce_buf);
Stefan Roese10e8bf82014-11-07 12:37:49 +0100890 return 0;
891
892failwr:
893 /* Cancel the indirect write */
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000894 writel(CQSPI_REG_INDIRECTWR_CANCEL,
Stefan Roese10e8bf82014-11-07 12:37:49 +0100895 plat->regbase + CQSPI_REG_INDIRECTWR);
Vignesh Raaa21d32018-01-24 10:44:07 +0530896 if (bounce_buf)
897 free(bounce_buf);
Marek Vasut26da6352016-04-27 23:18:55 +0200898 return ret;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100899}
900
Simon Glass8a8d24b2020-12-03 16:55:23 -0700901int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530902 const struct spi_mem_op *op)
903{
904 u32 to = op->addr.val;
905 const void *buf = op->data.buf.out;
906 size_t len = op->data.nbytes;
907
T Karthik Reddy248fe9f2022-05-12 04:05:34 -0600908 if (CONFIG_IS_ENABLED(ARCH_VERSAL))
909 cadence_qspi_apb_enable_linear_mode(true);
910
Pratyush Yadav38b08522021-06-26 00:47:09 +0530911 /*
912 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
913 * address (all 0s) with the read status register command in DTR mode.
914 * But this controller does not support sending dummy address bytes to
915 * the flash when it is polling the write completion register in DTR
916 * mode. So, we can not use direct mode when in DTR mode for writing
917 * data.
918 */
919 if (!plat->dtr && plat->use_dac_mode && (to + len < plat->ahbsize)) {
Vignesh Raghavendraffab2122020-01-27 10:36:40 +0530920 memcpy_toio(plat->ahbbase + to, buf, len);
921 if (!cadence_qspi_wait_idle(plat->regbase))
922 return -EIO;
923 return 0;
924 }
925
926 return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
927}
928
Stefan Roese10e8bf82014-11-07 12:37:49 +0100929void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
930{
931 unsigned int reg;
932
933 /* enter XiP mode immediately and enable direct mode */
934 reg = readl(reg_base + CQSPI_REG_CONFIG);
Phil Edworthy7e76c4b2016-11-29 12:58:30 +0000935 reg |= CQSPI_REG_CONFIG_ENABLE;
936 reg |= CQSPI_REG_CONFIG_DIRECT;
937 reg |= CQSPI_REG_CONFIG_XIP_IMM;
Stefan Roese10e8bf82014-11-07 12:37:49 +0100938 writel(reg, reg_base + CQSPI_REG_CONFIG);
939
940 /* keep the XiP mode */
941 writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
942
943 /* Enable mode bit at devrd */
944 reg = readl(reg_base + CQSPI_REG_RD_INSTR);
945 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
946 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
947}