blob: a7c0fc593fb17f0cf9ef7ce39808edf0dd76fe5e [file] [log] [blame]
Weijie Gaob34a2362022-09-09 19:59:45 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: SkyLake.Huang <skylake.huang@mediatek.com>
6 */
7
8#include <clk.h>
9#include <cpu_func.h>
10#include <div64.h>
11#include <dm.h>
12#include <spi.h>
13#include <spi-mem.h>
14#include <stdbool.h>
15#include <watchdog.h>
16#include <dm/device.h>
17#include <dm/device_compat.h>
18#include <dm/devres.h>
19#include <dm/pinctrl.h>
20#include <linux/bitops.h>
21#include <linux/completion.h>
22#include <linux/dma-mapping.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
Tom Rini15713fc2022-10-28 20:27:08 -040025#include <linux/sizes.h>
Weijie Gaob34a2362022-09-09 19:59:45 +080026
27#define SPI_CFG0_REG 0x0000
28#define SPI_CFG1_REG 0x0004
29#define SPI_TX_SRC_REG 0x0008
30#define SPI_RX_DST_REG 0x000c
31#define SPI_TX_DATA_REG 0x0010
32#define SPI_RX_DATA_REG 0x0014
33#define SPI_CMD_REG 0x0018
34#define SPI_IRQ_REG 0x001c
35#define SPI_STATUS_REG 0x0020
36#define SPI_PAD_SEL_REG 0x0024
37#define SPI_CFG2_REG 0x0028
38#define SPI_TX_SRC_REG_64 0x002c
39#define SPI_RX_DST_REG_64 0x0030
40#define SPI_CFG3_IPM_REG 0x0040
41
42#define SPI_CFG0_SCK_HIGH_OFFSET 0
43#define SPI_CFG0_SCK_LOW_OFFSET 8
44#define SPI_CFG0_CS_HOLD_OFFSET 16
45#define SPI_CFG0_CS_SETUP_OFFSET 24
46#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
47#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
48
49#define SPI_CFG1_CS_IDLE_OFFSET 0
50#define SPI_CFG1_PACKET_LOOP_OFFSET 8
51#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
52#define SPI_CFG1_GET_TICKDLY_OFFSET 29
53
54#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
55#define SPI_CFG1_CS_IDLE_MASK 0xff
56#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
57#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
58#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
59#define SPI_CFG2_SCK_HIGH_OFFSET 0
60#define SPI_CFG2_SCK_LOW_OFFSET 16
61#define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
62#define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
63
64#define SPI_CMD_ACT BIT(0)
65#define SPI_CMD_RESUME BIT(1)
66#define SPI_CMD_RST BIT(2)
67#define SPI_CMD_PAUSE_EN BIT(4)
68#define SPI_CMD_DEASSERT BIT(5)
69#define SPI_CMD_SAMPLE_SEL BIT(6)
70#define SPI_CMD_CS_POL BIT(7)
71#define SPI_CMD_CPHA BIT(8)
72#define SPI_CMD_CPOL BIT(9)
73#define SPI_CMD_RX_DMA BIT(10)
74#define SPI_CMD_TX_DMA BIT(11)
75#define SPI_CMD_TXMSBF BIT(12)
76#define SPI_CMD_RXMSBF BIT(13)
77#define SPI_CMD_RX_ENDIAN BIT(14)
78#define SPI_CMD_TX_ENDIAN BIT(15)
79#define SPI_CMD_FINISH_IE BIT(16)
80#define SPI_CMD_PAUSE_IE BIT(17)
81#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
82#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
83#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
84
85#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
86
87#define PIN_MODE_CFG(x) ((x) / 2)
88
89#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
90#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
91#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
92#define SPI_CFG3_IPM_XMODE_EN BIT(4)
93#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
94#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
95#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
96#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
97
98#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
99#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
100#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
101#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
102
103#define MT8173_SPI_MAX_PAD_SEL 3
104
105#define MTK_SPI_PAUSE_INT_STATUS 0x2
106
107#define MTK_SPI_IDLE 0
108#define MTK_SPI_PAUSED 1
109
110#define MTK_SPI_MAX_FIFO_SIZE 32U
111#define MTK_SPI_PACKET_SIZE 1024
112#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
113#define MTK_SPI_IPM_PACKET_LOOP SZ_256
114
115#define MTK_SPI_32BITS_MASK 0xffffffff
116
117#define DMA_ADDR_EXT_BITS 36
118#define DMA_ADDR_DEF_BITS 32
119
120#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
121
122/* struct mtk_spim_capability
123 * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
124 * @dma_ext: Some IC support DMA addr extension
125 * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
126 * @support_quad: Whether quad mode is supported
127 */
128struct mtk_spim_capability {
129 bool enhance_timing;
130 bool dma_ext;
131 bool ipm_design;
132 bool support_quad;
133};
134
135/* struct mtk_spim_priv
136 * @base: Base address of the spi controller
137 * @state: Controller state
138 * @sel_clk: Pad clock
139 * @spi_clk: Core clock
140 * @xfer_len: Current length of data for transfer
141 * @hw_cap: Controller capabilities
142 * @tick_dly: Used to postpone SPI sampling time
143 * @sample_sel: Sample edge of MISO
144 * @dev: udevice of this spi controller
145 * @tx_dma: Tx DMA address
146 * @rx_dma: Rx DMA address
147 */
148struct mtk_spim_priv {
149 void __iomem *base;
150 u32 state;
151 struct clk sel_clk, spi_clk;
152 u32 xfer_len;
153 struct mtk_spim_capability hw_cap;
154 u32 tick_dly;
155 u32 sample_sel;
156
157 struct device *dev;
158 dma_addr_t tx_dma;
159 dma_addr_t rx_dma;
160};
161
162static void mtk_spim_reset(struct mtk_spim_priv *priv)
163{
164 /* set the software reset bit in SPI_CMD_REG. */
165 setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
166 clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
167}
168
169static int mtk_spim_hw_init(struct spi_slave *slave)
170{
171 struct udevice *bus = dev_get_parent(slave->dev);
172 struct mtk_spim_priv *priv = dev_get_priv(bus);
173 u16 cpha, cpol;
174 u32 reg_val;
175
176 cpha = slave->mode & SPI_CPHA ? 1 : 0;
177 cpol = slave->mode & SPI_CPOL ? 1 : 0;
178
179 if (priv->hw_cap.enhance_timing) {
180 if (priv->hw_cap.ipm_design) {
181 /* CFG3 reg only used for spi-mem,
182 * here write to default value
183 */
184 writel(0x0, priv->base + SPI_CFG3_IPM_REG);
185 clrsetbits_le32(priv->base + SPI_CMD_REG,
186 SPI_CMD_IPM_GET_TICKDLY_MASK,
187 priv->tick_dly <<
188 SPI_CMD_IPM_GET_TICKDLY_OFFSET);
189 } else {
190 clrsetbits_le32(priv->base + SPI_CFG1_REG,
191 SPI_CFG1_GET_TICKDLY_MASK,
192 priv->tick_dly <<
193 SPI_CFG1_GET_TICKDLY_OFFSET);
194 }
195 }
196
197 reg_val = readl(priv->base + SPI_CMD_REG);
198 if (priv->hw_cap.ipm_design) {
199 /* SPI transfer without idle time until packet length done */
200 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
201 if (slave->mode & SPI_LOOP)
202 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
203 else
204 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
205 }
206
207 if (cpha)
208 reg_val |= SPI_CMD_CPHA;
209 else
210 reg_val &= ~SPI_CMD_CPHA;
211 if (cpol)
212 reg_val |= SPI_CMD_CPOL;
213 else
214 reg_val &= ~SPI_CMD_CPOL;
215
216 /* set the mlsbx and mlsbtx */
217 if (slave->mode & SPI_LSB_FIRST) {
218 reg_val &= ~SPI_CMD_TXMSBF;
219 reg_val &= ~SPI_CMD_RXMSBF;
220 } else {
221 reg_val |= SPI_CMD_TXMSBF;
222 reg_val |= SPI_CMD_RXMSBF;
223 }
224
225 /* do not reverse tx/rx endian */
226 reg_val &= ~SPI_CMD_TX_ENDIAN;
227 reg_val &= ~SPI_CMD_RX_ENDIAN;
228
229 if (priv->hw_cap.enhance_timing) {
230 /* set CS polarity */
231 if (slave->mode & SPI_CS_HIGH)
232 reg_val |= SPI_CMD_CS_POL;
233 else
234 reg_val &= ~SPI_CMD_CS_POL;
235
236 if (priv->sample_sel)
237 reg_val |= SPI_CMD_SAMPLE_SEL;
238 else
239 reg_val &= ~SPI_CMD_SAMPLE_SEL;
240 }
241
242 /* disable dma mode */
243 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
244
245 /* disable deassert mode */
246 reg_val &= ~SPI_CMD_DEASSERT;
247
248 writel(reg_val, priv->base + SPI_CMD_REG);
249
250 return 0;
251}
252
253static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
254 u32 speed_hz)
255{
256 u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
257
258 spi_clk_hz = clk_get_rate(&priv->spi_clk);
259 if (speed_hz <= spi_clk_hz / 4)
260 div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
261 else
262 div = 4;
263
264 sck_time = (div + 1) / 2;
265 cs_time = sck_time * 2;
266
267 if (priv->hw_cap.enhance_timing) {
268 reg_val = ((sck_time - 1) & 0xffff)
269 << SPI_CFG2_SCK_HIGH_OFFSET;
270 reg_val |= ((sck_time - 1) & 0xffff)
271 << SPI_CFG2_SCK_LOW_OFFSET;
272 writel(reg_val, priv->base + SPI_CFG2_REG);
273
274 reg_val = ((cs_time - 1) & 0xffff)
275 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
276 reg_val |= ((cs_time - 1) & 0xffff)
277 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
278 writel(reg_val, priv->base + SPI_CFG0_REG);
279 } else {
280 reg_val = ((sck_time - 1) & 0xff)
281 << SPI_CFG0_SCK_HIGH_OFFSET;
282 reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
283 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
284 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
285 writel(reg_val, priv->base + SPI_CFG0_REG);
286 }
287
288 reg_val = readl(priv->base + SPI_CFG1_REG);
289 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
290 reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
291 writel(reg_val, priv->base + SPI_CFG1_REG);
292}
293
294/**
295 * mtk_spim_setup_packet() - setup packet format.
296 * @priv: controller priv
297 *
298 * This controller sents/receives data in packets. The packet size is
299 * configurable.
300 *
301 * This function calculates the maximum packet size available for current
302 * data, and calculates the number of packets required to sent/receive data
303 * as much as possible.
304 */
305static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
306{
307 u32 packet_size, packet_loop, reg_val;
308
309 /* Calculate maximum packet size */
310 if (priv->hw_cap.ipm_design)
311 packet_size = min_t(u32,
312 priv->xfer_len,
313 MTK_SPI_IPM_PACKET_SIZE);
314 else
315 packet_size = min_t(u32,
316 priv->xfer_len,
317 MTK_SPI_PACKET_SIZE);
318
319 /* Calculates number of packets to sent/receive */
320 packet_loop = priv->xfer_len / packet_size;
321
322 reg_val = readl(priv->base + SPI_CFG1_REG);
323 if (priv->hw_cap.ipm_design)
324 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
325 else
326 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
327
328 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
329
330 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
331
332 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
333
334 writel(reg_val, priv->base + SPI_CFG1_REG);
335}
336
337static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
338{
339 u32 cmd;
340
341 cmd = readl(priv->base + SPI_CMD_REG);
342 if (priv->state == MTK_SPI_IDLE)
343 cmd |= SPI_CMD_ACT;
344 else
345 cmd |= SPI_CMD_RESUME;
346 writel(cmd, priv->base + SPI_CMD_REG);
347}
348
349static bool mtk_spim_supports_op(struct spi_slave *slave,
350 const struct spi_mem_op *op)
351{
352 struct udevice *bus = dev_get_parent(slave->dev);
353 struct mtk_spim_priv *priv = dev_get_priv(bus);
354
355 if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
356 op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
357 op->data.buswidth > 4)
358 return false;
359
360 if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
361 op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
362 op->data.buswidth > 2))
363 return false;
364
365 if (op->addr.nbytes && op->dummy.nbytes &&
366 op->addr.buswidth != op->dummy.buswidth)
367 return false;
368
369 if (op->addr.nbytes + op->dummy.nbytes > 16)
370 return false;
371
372 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
373 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
374 MTK_SPI_IPM_PACKET_LOOP ||
375 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
376 return false;
377 }
378
379 return true;
380}
381
382static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
383 const struct spi_mem_op *op)
384{
385 writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
386 priv->base + SPI_TX_SRC_REG);
387
388 if (priv->hw_cap.dma_ext)
389 writel((u32)(priv->tx_dma >> 32),
390 priv->base + SPI_TX_SRC_REG_64);
391
392 if (op->data.dir == SPI_MEM_DATA_IN) {
393 writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
394 priv->base + SPI_RX_DST_REG);
395
396 if (priv->hw_cap.dma_ext)
397 writel((u32)(priv->rx_dma >> 32),
398 priv->base + SPI_RX_DST_REG_64);
399 }
400}
401
402static int mtk_spim_transfer_wait(struct spi_slave *slave,
403 const struct spi_mem_op *op)
404{
405 struct udevice *bus = dev_get_parent(slave->dev);
406 struct mtk_spim_priv *priv = dev_get_priv(bus);
407 u32 sck_l, sck_h, spi_bus_clk, clk_count, reg;
408 ulong us = 1;
409 int ret = 0;
410
411 if (op->data.dir == SPI_MEM_NO_DATA)
412 clk_count = 32;
413 else
414 clk_count = op->data.nbytes;
415
416 spi_bus_clk = clk_get_rate(&priv->spi_clk);
417 sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
418 sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
419 do_div(spi_bus_clk, sck_l + sck_h + 2);
420
421 us = CLK_TO_US(spi_bus_clk, clk_count * 8);
422 us += 1000 * 1000; /* 1s tolerance */
423
424 if (us > UINT_MAX)
425 us = UINT_MAX;
426
427 ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
428 reg & 0x1, us);
429 if (ret < 0) {
430 dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
431 return -ETIMEDOUT;
432 }
433
434 return 0;
435}
436
437static int mtk_spim_exec_op(struct spi_slave *slave,
438 const struct spi_mem_op *op)
439{
440 struct udevice *bus = dev_get_parent(slave->dev);
441 struct mtk_spim_priv *priv = dev_get_priv(bus);
442 u32 reg_val, nio = 1, tx_size;
443 char *tx_tmp_buf;
444 char *rx_tmp_buf;
445 int i, ret = 0;
446
447 mtk_spim_reset(priv);
448 mtk_spim_hw_init(slave);
449 mtk_spim_prepare_transfer(priv, slave->max_hz);
450
451 reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
452 /* opcode byte len */
453 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
454 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
455
456 /* addr & dummy byte len */
457 if (op->addr.nbytes || op->dummy.nbytes)
458 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
459 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
460
461 /* data byte len */
462 if (!op->data.nbytes) {
463 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
464 writel(0, priv->base + SPI_CFG1_REG);
465 } else {
466 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
467 priv->xfer_len = op->data.nbytes;
468 mtk_spim_setup_packet(priv);
469 }
470
471 if (op->addr.nbytes || op->dummy.nbytes) {
472 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
473 reg_val |= SPI_CFG3_IPM_XMODE_EN;
474 else
475 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
476 }
477
478 if (op->addr.buswidth == 2 ||
479 op->dummy.buswidth == 2 ||
480 op->data.buswidth == 2)
481 nio = 2;
482 else if (op->addr.buswidth == 4 ||
483 op->dummy.buswidth == 4 ||
484 op->data.buswidth == 4)
485 nio = 4;
486
487 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
488 reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
489
490 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
491 if (op->data.dir == SPI_MEM_DATA_IN)
492 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
493 else
494 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
495 writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
496
497 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
498 if (op->data.dir == SPI_MEM_DATA_OUT)
499 tx_size += op->data.nbytes;
500
501 tx_size = max(tx_size, (u32)32);
502
503 /* Fill up tx data */
504 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
505 if (!tx_tmp_buf) {
506 ret = -ENOMEM;
507 goto exit;
508 }
509
510 tx_tmp_buf[0] = op->cmd.opcode;
511
512 if (op->addr.nbytes) {
513 for (i = 0; i < op->addr.nbytes; i++)
514 tx_tmp_buf[i + 1] = op->addr.val >>
515 (8 * (op->addr.nbytes - i - 1));
516 }
517
518 if (op->dummy.nbytes)
519 memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
520 op->dummy.nbytes);
521
522 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
523 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
524 op->data.buf.out, op->data.nbytes);
525 /* Finish filling up tx data */
526
527 priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
528 if (dma_mapping_error(priv->dev, priv->tx_dma)) {
529 ret = -ENOMEM;
530 goto tx_free;
531 }
532
533 if (op->data.dir == SPI_MEM_DATA_IN) {
534 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
535 rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
536 if (!rx_tmp_buf) {
537 ret = -ENOMEM;
538 goto tx_unmap;
539 }
540 } else {
541 rx_tmp_buf = op->data.buf.in;
542 }
543
544 priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
545 DMA_FROM_DEVICE);
546 if (dma_mapping_error(priv->dev, priv->rx_dma)) {
547 ret = -ENOMEM;
548 goto rx_free;
549 }
550 }
551
552 reg_val = readl(priv->base + SPI_CMD_REG);
553 reg_val |= SPI_CMD_TX_DMA;
554 if (op->data.dir == SPI_MEM_DATA_IN)
555 reg_val |= SPI_CMD_RX_DMA;
556
557 writel(reg_val, priv->base + SPI_CMD_REG);
558
559 mtk_spim_setup_dma_xfer(priv, op);
560
561 mtk_spim_enable_transfer(priv);
562
563 /* Wait for the interrupt. */
564 ret = mtk_spim_transfer_wait(slave, op);
565 if (ret)
566 goto rx_unmap;
567
568 if (op->data.dir == SPI_MEM_DATA_IN &&
569 !IS_ALIGNED((size_t)op->data.buf.in, 4))
570 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
571
572rx_unmap:
573 /* spi disable dma */
574 reg_val = readl(priv->base + SPI_CMD_REG);
575 reg_val &= ~SPI_CMD_TX_DMA;
576 if (op->data.dir == SPI_MEM_DATA_IN)
577 reg_val &= ~SPI_CMD_RX_DMA;
578 writel(reg_val, priv->base + SPI_CMD_REG);
579
580 writel(0, priv->base + SPI_TX_SRC_REG);
581 writel(0, priv->base + SPI_RX_DST_REG);
582
583 if (op->data.dir == SPI_MEM_DATA_IN)
584 dma_unmap_single(priv->rx_dma,
585 op->data.nbytes, DMA_FROM_DEVICE);
586rx_free:
587 if (op->data.dir == SPI_MEM_DATA_IN &&
588 !IS_ALIGNED((size_t)op->data.buf.in, 4))
589 kfree(rx_tmp_buf);
590tx_unmap:
591 dma_unmap_single(priv->tx_dma,
592 tx_size, DMA_TO_DEVICE);
593tx_free:
594 kfree(tx_tmp_buf);
595exit:
596 return ret;
597}
598
599static int mtk_spim_adjust_op_size(struct spi_slave *slave,
600 struct spi_mem_op *op)
601{
602 int opcode_len;
603
604 if (!op->data.nbytes)
605 return 0;
606
607 if (op->data.dir != SPI_MEM_NO_DATA) {
608 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
609 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
610 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
611 /* force data buffer dma-aligned. */
612 op->data.nbytes -= op->data.nbytes % 4;
613 }
614 }
615
616 return 0;
617}
618
619static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
620{
621 int ret;
622
623 priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
624 priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
625 priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
626 priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
627
628 ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
629 if (ret < 0)
630 dev_err(priv->dev, "tick dly not set.\n");
631
632 ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
633 if (ret < 0)
634 dev_err(priv->dev, "sample sel not set.\n");
635
636 return ret;
637}
638
639static int mtk_spim_probe(struct udevice *dev)
640{
641 struct mtk_spim_priv *priv = dev_get_priv(dev);
642 int ret;
643
644 priv->base = (void __iomem *)devfdt_get_addr(dev);
645 if (!priv->base)
646 return -EINVAL;
647
648 mtk_spim_get_attr(priv, dev);
649
650 ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
651 if (ret < 0) {
652 dev_err(dev, "failed to get sel-clk\n");
653 return ret;
654 }
655
656 ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
657 if (ret < 0) {
658 dev_err(dev, "failed to get spi-clk\n");
659 return ret;
660 }
661
662 clk_enable(&priv->sel_clk);
663 clk_enable(&priv->spi_clk);
664
665 return 0;
666}
667
668static int mtk_spim_set_speed(struct udevice *dev, uint speed)
669{
670 return 0;
671}
672
673static int mtk_spim_set_mode(struct udevice *dev, uint mode)
674{
675 return 0;
676}
677
678static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
679 .adjust_op_size = mtk_spim_adjust_op_size,
680 .supports_op = mtk_spim_supports_op,
681 .exec_op = mtk_spim_exec_op
682};
683
684static const struct dm_spi_ops mtk_spim_ops = {
685 .mem_ops = &mtk_spim_mem_ops,
686 .set_speed = mtk_spim_set_speed,
687 .set_mode = mtk_spim_set_mode,
688};
689
690static const struct udevice_id mtk_spim_ids[] = {
691 { .compatible = "mediatek,ipm-spi" },
692 {}
693};
694
695U_BOOT_DRIVER(mtk_spim) = {
696 .name = "mtk_spim",
697 .id = UCLASS_SPI,
698 .of_match = mtk_spim_ids,
699 .ops = &mtk_spim_ops,
700 .priv_auto = sizeof(struct mtk_spim_priv),
701 .probe = mtk_spim_probe,
702};