blob: 83fe6330a128fc26b09c0e3b5a8439cad43a3238 [file] [log] [blame]
Suneel Garapati7853cc02020-07-30 13:56:18 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6#include <clk.h>
7#include <dm.h>
8#include <malloc.h>
9#include <spi.h>
10#include <spi-mem.h>
11#include <watchdog.h>
12#include <asm/io.h>
13#include <asm/unaligned.h>
14#include <linux/bitfield.h>
15#include <linux/compat.h>
16#include <linux/delay.h>
17
18#define OCTEON_SPI_MAX_BYTES 9
19#define OCTEON_SPI_MAX_CLOCK_HZ 50000000
20
21#define OCTEON_SPI_NUM_CS 4
22
23#define OCTEON_SPI_CS_VALID(cs) ((cs) < OCTEON_SPI_NUM_CS)
24
25#define MPI_CFG 0x0000
26#define MPI_STS 0x0008
27#define MPI_TX 0x0010
28#define MPI_XMIT 0x0018
29#define MPI_WIDE_DAT 0x0040
30#define MPI_IO_CTL 0x0048
31#define MPI_DAT(X) (0x0080 + ((X) << 3))
32#define MPI_WIDE_BUF(X) (0x0800 + ((X) << 3))
33#define MPI_CYA_CFG 0x1000
34#define MPI_CLKEN 0x1080
35
36#define MPI_CFG_ENABLE BIT_ULL(0)
37#define MPI_CFG_IDLELO BIT_ULL(1)
38#define MPI_CFG_CLK_CONT BIT_ULL(2)
39#define MPI_CFG_WIREOR BIT_ULL(3)
40#define MPI_CFG_LSBFIRST BIT_ULL(4)
41#define MPI_CFG_CS_STICKY BIT_ULL(5)
42#define MPI_CFG_CSHI BIT_ULL(7)
43#define MPI_CFG_IDLECLKS GENMASK_ULL(9, 8)
44#define MPI_CFG_TRITX BIT_ULL(10)
45#define MPI_CFG_CSLATE BIT_ULL(11)
46#define MPI_CFG_CSENA0 BIT_ULL(12)
47#define MPI_CFG_CSENA1 BIT_ULL(13)
48#define MPI_CFG_CSENA2 BIT_ULL(14)
49#define MPI_CFG_CSENA3 BIT_ULL(15)
50#define MPI_CFG_CLKDIV GENMASK_ULL(28, 16)
51#define MPI_CFG_LEGACY_DIS BIT_ULL(31)
52#define MPI_CFG_IOMODE GENMASK_ULL(35, 34)
53#define MPI_CFG_TB100_EN BIT_ULL(49)
54
55#define MPI_DAT_DATA GENMASK_ULL(7, 0)
56
57#define MPI_STS_BUSY BIT_ULL(0)
58#define MPI_STS_MPI_INTR BIT_ULL(1)
59#define MPI_STS_RXNUM GENMASK_ULL(12, 8)
60
61#define MPI_TX_TOTNUM GENMASK_ULL(4, 0)
62#define MPI_TX_TXNUM GENMASK_ULL(12, 8)
63#define MPI_TX_LEAVECS BIT_ULL(16)
64#define MPI_TX_CSID GENMASK_ULL(21, 20)
65
66#define MPI_XMIT_TOTNUM GENMASK_ULL(10, 0)
67#define MPI_XMIT_TXNUM GENMASK_ULL(30, 20)
68#define MPI_XMIT_BUF_SEL BIT_ULL(59)
69#define MPI_XMIT_LEAVECS BIT_ULL(60)
70#define MPI_XMIT_CSID GENMASK_ULL(62, 61)
71
72/* Used on Octeon TX2 */
73void board_acquire_flash_arb(bool acquire);
74
75/* Local driver data structure */
76struct octeon_spi {
77 void __iomem *base; /* Register base address */
78 struct clk clk;
79 u32 clkdiv; /* Clock divisor for device speed */
80};
81
82static u64 octeon_spi_set_mpicfg(struct udevice *dev)
83{
84 struct dm_spi_slave_platdata *slave = dev_get_parent_platdata(dev);
85 struct udevice *bus = dev_get_parent(dev);
86 struct octeon_spi *priv = dev_get_priv(bus);
87 u64 mpi_cfg;
88 uint max_speed = slave->max_hz;
89 bool cpha, cpol;
90
91 if (!max_speed)
92 max_speed = 12500000;
93 if (max_speed > OCTEON_SPI_MAX_CLOCK_HZ)
94 max_speed = OCTEON_SPI_MAX_CLOCK_HZ;
95
96 debug("\n slave params %d %d %d\n", slave->cs,
97 slave->max_hz, slave->mode);
98 cpha = !!(slave->mode & SPI_CPHA);
99 cpol = !!(slave->mode & SPI_CPOL);
100
101 mpi_cfg = FIELD_PREP(MPI_CFG_CLKDIV, priv->clkdiv & 0x1fff) |
102 FIELD_PREP(MPI_CFG_CSHI, !!(slave->mode & SPI_CS_HIGH)) |
103 FIELD_PREP(MPI_CFG_LSBFIRST, !!(slave->mode & SPI_LSB_FIRST)) |
104 FIELD_PREP(MPI_CFG_WIREOR, !!(slave->mode & SPI_3WIRE)) |
105 FIELD_PREP(MPI_CFG_IDLELO, cpha != cpol) |
106 FIELD_PREP(MPI_CFG_CSLATE, cpha) |
107 MPI_CFG_CSENA0 | MPI_CFG_CSENA1 |
108 MPI_CFG_CSENA2 | MPI_CFG_CSENA1 |
109 MPI_CFG_ENABLE;
110
111 debug("\n mpi_cfg %llx\n", mpi_cfg);
112 return mpi_cfg;
113}
114
115/**
116 * Wait until the SPI bus is ready
117 *
118 * @param dev SPI device to wait for
119 */
120static void octeon_spi_wait_ready(struct udevice *dev)
121{
122 struct udevice *bus = dev_get_parent(dev);
123 struct octeon_spi *priv = dev_get_priv(bus);
124 void *base = priv->base;
125 u64 mpi_sts;
126
127 do {
128 mpi_sts = readq(base + MPI_STS);
129 WATCHDOG_RESET();
130 } while (mpi_sts & MPI_STS_BUSY);
131
132 debug("%s(%s)\n", __func__, dev->name);
133}
134
135/**
136 * Claim the bus for a slave device
137 *
138 * @param dev SPI bus
139 *
140 * @return 0 for success, -EINVAL if chip select is invalid
141 */
142static int octeon_spi_claim_bus(struct udevice *dev)
143{
144 struct udevice *bus = dev_get_parent(dev);
145 struct octeon_spi *priv = dev_get_priv(bus);
146 void *base = priv->base;
147 u64 mpi_cfg;
148
149 debug("\n\n%s(%s)\n", __func__, dev->name);
150 if (!OCTEON_SPI_CS_VALID(spi_chip_select(dev)))
151 return -EINVAL;
152
153 if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2))
154 board_acquire_flash_arb(true);
155
156 mpi_cfg = readq(base + MPI_CFG);
157 mpi_cfg &= ~MPI_CFG_TRITX;
158 mpi_cfg |= MPI_CFG_ENABLE;
159 writeq(mpi_cfg, base + MPI_CFG);
160 mpi_cfg = readq(base + MPI_CFG);
161 udelay(5); /** Wait for bus to settle */
162
163 return 0;
164}
165
166/**
167 * Release the bus to a slave device
168 *
169 * @param dev SPI bus
170 *
171 * @return 0 for success, -EINVAL if chip select is invalid
172 */
173static int octeon_spi_release_bus(struct udevice *dev)
174{
175 struct udevice *bus = dev_get_parent(dev);
176 struct octeon_spi *priv = dev_get_priv(bus);
177 void *base = priv->base;
178 u64 mpi_cfg;
179
180 debug("%s(%s)\n\n", __func__, dev->name);
181 if (!OCTEON_SPI_CS_VALID(spi_chip_select(dev)))
182 return -EINVAL;
183
184 if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2))
185 board_acquire_flash_arb(false);
186
187 mpi_cfg = readq(base + MPI_CFG);
188 mpi_cfg &= ~MPI_CFG_ENABLE;
189 writeq(mpi_cfg, base + MPI_CFG);
190 mpi_cfg = readq(base + MPI_CFG);
191 udelay(1);
192
193 return 0;
194}
195
196static int octeon_spi_xfer(struct udevice *dev, unsigned int bitlen,
197 const void *dout, void *din, unsigned long flags)
198{
199 struct udevice *bus = dev_get_parent(dev);
200 struct octeon_spi *priv = dev_get_priv(bus);
201 void *base = priv->base;
202 u64 mpi_tx;
203 u64 mpi_cfg;
204 u64 wide_dat = 0;
205 int len = bitlen / 8;
206 int i;
207 const u8 *tx_data = dout;
208 u8 *rx_data = din;
209 int cs = spi_chip_select(dev);
210
211 if (!OCTEON_SPI_CS_VALID(cs))
212 return -EINVAL;
213
214 debug("\n %s(%s, %u, %p, %p, 0x%lx), cs: %d\n",
215 __func__, dev->name, bitlen, dout, din, flags, cs);
216
217 mpi_cfg = octeon_spi_set_mpicfg(dev);
218 if (mpi_cfg != readq(base + MPI_CFG)) {
219 writeq(mpi_cfg, base + MPI_CFG);
220 mpi_cfg = readq(base + MPI_CFG);
221 udelay(10);
222 }
223
224 debug("\n mpi_cfg upd %llx\n", mpi_cfg);
225
226 /*
227 * Start by writing and reading 8 bytes at a time. While we can support
228 * up to 10, it's easier to just use 8 with the MPI_WIDE_DAT register.
229 */
230 while (len > 8) {
231 if (tx_data) {
232 wide_dat = get_unaligned((u64 *)tx_data);
233 debug(" tx: %016llx \t", (unsigned long long)wide_dat);
234 tx_data += 8;
235 writeq(wide_dat, base + MPI_WIDE_DAT);
236 }
237
238 mpi_tx = FIELD_PREP(MPI_TX_CSID, cs) |
239 FIELD_PREP(MPI_TX_LEAVECS, 1) |
240 FIELD_PREP(MPI_TX_TXNUM, tx_data ? 8 : 0) |
241 FIELD_PREP(MPI_TX_TOTNUM, 8);
242 writeq(mpi_tx, base + MPI_TX);
243
244 octeon_spi_wait_ready(dev);
245
246 debug("\n ");
247
248 if (rx_data) {
249 wide_dat = readq(base + MPI_WIDE_DAT);
250 debug(" rx: %016llx\t", (unsigned long long)wide_dat);
251 *(u64 *)rx_data = wide_dat;
252 rx_data += 8;
253 }
254 len -= 8;
255 }
256
257 debug("\n ");
258
259 /* Write and read the rest of the data */
260 if (tx_data) {
261 for (i = 0; i < len; i++) {
262 debug(" tx: %02x\n", *tx_data);
263 writeq(*tx_data++, base + MPI_DAT(i));
264 }
265 }
266
267 mpi_tx = FIELD_PREP(MPI_TX_CSID, cs) |
268 FIELD_PREP(MPI_TX_LEAVECS, !(flags & SPI_XFER_END)) |
269 FIELD_PREP(MPI_TX_TXNUM, tx_data ? len : 0) |
270 FIELD_PREP(MPI_TX_TOTNUM, len);
271 writeq(mpi_tx, base + MPI_TX);
272
273 octeon_spi_wait_ready(dev);
274
275 debug("\n ");
276
277 if (rx_data) {
278 for (i = 0; i < len; i++) {
279 *rx_data = readq(base + MPI_DAT(i)) & 0xff;
280 debug(" rx: %02x\n", *rx_data);
281 rx_data++;
282 }
283 }
284
285 return 0;
286}
287
288static int octeontx2_spi_xfer(struct udevice *dev, unsigned int bitlen,
289 const void *dout, void *din, unsigned long flags)
290{
291 struct udevice *bus = dev_get_parent(dev);
292 struct octeon_spi *priv = dev_get_priv(bus);
293 void *base = priv->base;
294 u64 mpi_xmit;
295 u64 mpi_cfg;
296 u64 wide_dat = 0;
297 int len = bitlen / 8;
298 int rem;
299 int i;
300 const u8 *tx_data = dout;
301 u8 *rx_data = din;
302 int cs = spi_chip_select(dev);
303
304 if (!OCTEON_SPI_CS_VALID(cs))
305 return -EINVAL;
306
307 debug("\n %s(%s, %u, %p, %p, 0x%lx), cs: %d\n",
308 __func__, dev->name, bitlen, dout, din, flags, cs);
309
310 mpi_cfg = octeon_spi_set_mpicfg(dev);
311
312 mpi_cfg |= MPI_CFG_TRITX | MPI_CFG_LEGACY_DIS | MPI_CFG_CS_STICKY |
313 MPI_CFG_TB100_EN;
314
315 mpi_cfg &= ~MPI_CFG_IOMODE;
316 if (flags & (SPI_TX_DUAL | SPI_RX_DUAL))
317 mpi_cfg |= FIELD_PREP(MPI_CFG_IOMODE, 2);
318 if (flags & (SPI_TX_QUAD | SPI_RX_QUAD))
319 mpi_cfg |= FIELD_PREP(MPI_CFG_IOMODE, 3);
320
321 if (mpi_cfg != readq(base + MPI_CFG)) {
322 writeq(mpi_cfg, base + MPI_CFG);
323 mpi_cfg = readq(base + MPI_CFG);
324 udelay(10);
325 }
326
327 debug("\n mpi_cfg upd %llx\n\n", mpi_cfg);
328
329 /* Start by writing or reading 1024 bytes at a time. */
330 while (len > 1024) {
331 if (tx_data) {
332 /* 8 bytes per iteration */
333 for (i = 0; i < 128; i++) {
334 wide_dat = get_unaligned((u64 *)tx_data);
335 debug(" tx: %016llx \t",
336 (unsigned long long)wide_dat);
337 if ((i % 4) == 3)
338 debug("\n");
339 tx_data += 8;
340 writeq(wide_dat, base + MPI_WIDE_BUF(i));
341 }
342 }
343
344 mpi_xmit = FIELD_PREP(MPI_XMIT_CSID, cs) | MPI_XMIT_LEAVECS |
345 FIELD_PREP(MPI_XMIT_TXNUM, tx_data ? 1024 : 0) |
346 FIELD_PREP(MPI_XMIT_TOTNUM, 1024);
347 writeq(mpi_xmit, base + MPI_XMIT);
348
349 octeon_spi_wait_ready(dev);
350
351 debug("\n ");
352
353 if (rx_data) {
354 /* 8 bytes per iteration */
355 for (i = 0; i < 128; i++) {
356 wide_dat = readq(base + MPI_WIDE_BUF(i));
357 debug(" rx: %016llx\t",
358 (unsigned long long)wide_dat);
359 if ((i % 4) == 3)
360 debug("\n");
361 *(u64 *)rx_data = wide_dat;
362 rx_data += 8;
363 }
364 }
365 len -= 1024;
366 }
367
368 if (tx_data) {
369 rem = len % 8;
370 /* 8 bytes per iteration */
371 for (i = 0; i < len / 8; i++) {
372 wide_dat = get_unaligned((u64 *)tx_data);
373 debug(" tx: %016llx \t",
374 (unsigned long long)wide_dat);
375 if ((i % 4) == 3)
376 debug("\n");
377 tx_data += 8;
378 writeq(wide_dat, base + MPI_WIDE_BUF(i));
379 }
380 if (rem) {
381 memcpy(&wide_dat, tx_data, rem);
382 debug(" rtx: %016llx\t", wide_dat);
383 writeq(wide_dat, base + MPI_WIDE_BUF(i));
384 }
385 }
386
387 mpi_xmit = FIELD_PREP(MPI_XMIT_CSID, cs) |
388 FIELD_PREP(MPI_XMIT_LEAVECS, !(flags & SPI_XFER_END)) |
389 FIELD_PREP(MPI_XMIT_TXNUM, tx_data ? len : 0) |
390 FIELD_PREP(MPI_XMIT_TOTNUM, len);
391 writeq(mpi_xmit, base + MPI_XMIT);
392
393 octeon_spi_wait_ready(dev);
394
395 debug("\n ");
396
397 if (rx_data) {
398 rem = len % 8;
399 /* 8 bytes per iteration */
400 for (i = 0; i < len / 8; i++) {
401 wide_dat = readq(base + MPI_WIDE_BUF(i));
402 debug(" rx: %016llx\t",
403 (unsigned long long)wide_dat);
404 if ((i % 4) == 3)
405 debug("\n");
406 *(u64 *)rx_data = wide_dat;
407 rx_data += 8;
408 }
409 if (rem) {
410 wide_dat = readq(base + MPI_WIDE_BUF(i));
411 debug(" rrx: %016llx\t",
412 (unsigned long long)wide_dat);
413 memcpy(rx_data, &wide_dat, rem);
414 rx_data += rem;
415 }
416 }
417
418 return 0;
419}
420
421static bool octeon_spi_supports_op(struct spi_slave *slave,
422 const struct spi_mem_op *op)
423{
424 /* For now, support only below combinations
425 * 1-1-1
426 * 1-1-2 1-2-2
427 * 1-1-4 1-4-4
428 */
429 if (op->cmd.buswidth != 1)
430 return false;
431 return true;
432}
433
434static int octeon_spi_exec_op(struct spi_slave *slave,
435 const struct spi_mem_op *op)
436{
437 unsigned long flags = SPI_XFER_BEGIN;
438 const void *tx;
439 void *rx;
440 u8 opcode, *buf;
441 u8 *addr;
442 int i, temp, ret;
443
444 if (op->cmd.buswidth != 1)
445 return -ENOTSUPP;
446
447 /* Send CMD */
448 i = 0;
449 opcode = op->cmd.opcode;
450
451 if (!op->data.nbytes && !op->addr.nbytes && !op->dummy.nbytes)
452 flags |= SPI_XFER_END;
453
454 ret = octeontx2_spi_xfer(slave->dev, 8, (void *)&opcode, NULL, flags);
455 if (ret < 0)
456 return ret;
457
458 /* Send Address and dummy */
459 if (op->addr.nbytes) {
460 /* Alloc buffer for address+dummy */
461 buf = (u8 *)calloc(1, op->addr.nbytes + op->dummy.nbytes);
462 if (!buf) {
463 printf("%s Out of memory\n", __func__);
464 return -ENOMEM;
465 }
466 addr = (u8 *)&op->addr.val;
467 for (temp = 0; temp < op->addr.nbytes; temp++)
468 buf[i++] = *(u8 *)(addr + op->addr.nbytes - 1 - temp);
469 for (temp = 0; temp < op->dummy.nbytes; temp++)
470 buf[i++] = 0xff;
471 if (op->addr.buswidth == 2)
472 flags |= SPI_RX_DUAL;
473 if (op->addr.buswidth == 4)
474 flags |= SPI_RX_QUAD;
475
476 if (!op->data.nbytes)
477 flags |= SPI_XFER_END;
478 ret = octeontx2_spi_xfer(slave->dev, i * 8, (void *)buf, NULL,
479 flags);
480 free(buf);
481 if (ret < 0)
482 return ret;
483 }
484 if (!op->data.nbytes)
485 return 0;
486
487 /* Send/Receive Data */
488 flags |= SPI_XFER_END;
489 if (op->data.buswidth == 2)
490 flags |= SPI_RX_DUAL;
491 if (op->data.buswidth == 4)
492 flags |= SPI_RX_QUAD;
493
494 rx = (op->data.dir == SPI_MEM_DATA_IN) ? op->data.buf.in : NULL;
495 tx = (op->data.dir == SPI_MEM_DATA_OUT) ? op->data.buf.out : NULL;
496
497 ret = octeontx2_spi_xfer(slave->dev, (op->data.nbytes * 8), tx, rx,
498 flags);
499 return ret;
500}
501
502static const struct spi_controller_mem_ops octeontx2_spi_mem_ops = {
503 .supports_op = octeon_spi_supports_op,
504 .exec_op = octeon_spi_exec_op,
505};
506
507/**
508 * Set the speed of the SPI bus
509 *
510 * @param bus bus to set
511 * @param max_hz maximum speed supported
512 */
513static int octeon_spi_set_speed(struct udevice *bus, uint max_hz)
514{
515 struct octeon_spi *priv = dev_get_priv(bus);
516 ulong clk_rate;
517 u32 calc_hz;
518
519 if (max_hz > OCTEON_SPI_MAX_CLOCK_HZ)
520 max_hz = OCTEON_SPI_MAX_CLOCK_HZ;
521
522 clk_rate = clk_get_rate(&priv->clk);
523 if (IS_ERR_VALUE(clk_rate))
524 return -EINVAL;
525
526 debug("%s(%s, %u, %lu)\n", __func__, bus->name, max_hz, clk_rate);
527
528 priv->clkdiv = clk_rate / (2 * max_hz);
529 while (1) {
530 calc_hz = clk_rate / (2 * priv->clkdiv);
531 if (calc_hz <= max_hz)
532 break;
533 priv->clkdiv += 1;
534 }
535
536 if (priv->clkdiv > 8191)
537 return -EINVAL;
538
539 debug("%s: clkdiv=%d\n", __func__, priv->clkdiv);
540
541 return 0;
542}
543
544static int octeon_spi_set_mode(struct udevice *bus, uint mode)
545{
546 /* We don't set it here */
547 return 0;
548}
549
550static struct dm_spi_ops octeon_spi_ops = {
551 .claim_bus = octeon_spi_claim_bus,
552 .release_bus = octeon_spi_release_bus,
553 .set_speed = octeon_spi_set_speed,
554 .set_mode = octeon_spi_set_mode,
555 .xfer = octeon_spi_xfer,
556};
557
558static int octeon_spi_probe(struct udevice *dev)
559{
560 struct octeon_spi *priv = dev_get_priv(dev);
561 int ret;
562
563 /* Octeon TX & TX2 use PCI based probing */
564 if (device_is_compatible(dev, "cavium,thunder-8190-spi")) {
565 pci_dev_t bdf = dm_pci_get_bdf(dev);
566
567 debug("SPI PCI device: %x\n", bdf);
568 priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
569 PCI_REGION_MEM);
570 /* Add base offset */
571 priv->base += 0x1000;
572
573 /*
574 * Octeon TX2 needs a different xfer function and supports
575 * mem_ops
576 */
577 if (device_is_compatible(dev, "cavium,thunderx-spi")) {
578 octeon_spi_ops.xfer = octeontx2_spi_xfer;
579 octeon_spi_ops.mem_ops = &octeontx2_spi_mem_ops;
580 }
581 } else {
582 priv->base = dev_remap_addr(dev);
583 }
584
585 ret = clk_get_by_index(dev, 0, &priv->clk);
586 if (ret < 0)
587 return ret;
588
589 ret = clk_enable(&priv->clk);
590 if (ret)
591 return ret;
592
593 debug("SPI bus %s %d at %p\n", dev->name, dev->seq, priv->base);
594
595 return 0;
596}
597
598static const struct udevice_id octeon_spi_ids[] = {
599 /* MIPS Octeon */
600 { .compatible = "cavium,octeon-3010-spi" },
601 /* ARM Octeon TX / TX2 */
602 { .compatible = "cavium,thunder-8190-spi" },
603 { }
604};
605
606U_BOOT_DRIVER(octeon_spi) = {
607 .name = "spi_octeon",
608 .id = UCLASS_SPI,
609 .of_match = octeon_spi_ids,
610 .probe = octeon_spi_probe,
611 .priv_auto_alloc_size = sizeof(struct octeon_spi),
612 .ops = &octeon_spi_ops,
613};