blob: 55eb8a54f5946dd814d22b5916f44157a790218b [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung757bff42012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung757bff42012-10-15 19:10:29 +00006 */
7
Alexey Brodkin2a7a2102013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass1c87ffe2015-08-06 20:16:27 -060010#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glasscf92e052015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tan79975992018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassc05ed002020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko2b157012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko61f47c82024-08-07 22:14:15 -050023/* Internal DMA Controller (IDMAC) descriptor for 32-bit addressing mode */
24struct dwmci_idmac32 {
25 u32 des0; /* Control descriptor */
26 u32 des1; /* Buffer size */
27 u32 des2; /* Buffer physical address */
28 u32 des3; /* Next descriptor physical address */
Sam Protsenko96ea8902024-08-07 22:14:08 -050029} __aligned(ARCH_DMA_MINALIGN);
30
Sam Protsenkoe760a242024-08-07 22:14:16 -050031/* Internal DMA Controller (IDMAC) descriptor for 64-bit addressing mode */
32struct dwmci_idmac64 {
33 u32 des0; /* Control descriptor */
34 u32 des1; /* Reserved */
35 u32 des2; /* Buffer sizes */
36 u32 des3; /* Reserved */
37 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1 */
38 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1 */
39 u32 des6; /* Lower 32-bits of Next Descriptor Address */
40 u32 des7; /* Upper 32-bits of Next Descriptor Address */
41} __aligned(ARCH_DMA_MINALIGN);
42
43/* Register offsets for DW MMC blocks with 32-bit IDMAC */
44static const struct dwmci_idmac_regs dwmci_idmac_regs32 = {
45 .dbaddrl = DWMCI_DBADDR,
46 .idsts = DWMCI_IDSTS,
47 .idinten = DWMCI_IDINTEN,
48 .dscaddrl = DWMCI_DSCADDR,
49 .bufaddrl = DWMCI_BUFADDR,
50};
51
52/* Register offsets for DW MMC blocks with 64-bit IDMAC */
53static const struct dwmci_idmac_regs dwmci_idmac_regs64 = {
54 .dbaddrl = DWMCI_DBADDRL,
55 .dbaddru = DWMCI_DBADDRU,
56 .idsts = DWMCI_IDSTS64,
57 .idinten = DWMCI_IDINTEN64,
58 .dscaddrl = DWMCI_DSCADDRL,
59 .dscaddru = DWMCI_DSCADDRU,
60 .bufaddrl = DWMCI_BUFADDRL,
61 .bufaddru = DWMCI_BUFADDRU,
62};
63
Jaehoon Chung757bff42012-10-15 19:10:29 +000064static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
65{
66 unsigned long timeout = 1000;
67 u32 ctrl;
68
69 dwmci_writel(host, DWMCI_CTRL, value);
70
71 while (timeout--) {
72 ctrl = dwmci_readl(host, DWMCI_CTRL);
73 if (!(ctrl & DWMCI_RESET_ALL))
74 return 1;
75 }
76 return 0;
77}
78
Sam Protsenko61f47c82024-08-07 22:14:15 -050079static void dwmci_set_idma_desc32(struct dwmci_idmac32 *desc, u32 control,
80 u32 buf_size, u32 buf_addr)
Jaehoon Chung757bff42012-10-15 19:10:29 +000081{
Sam Protsenko61f47c82024-08-07 22:14:15 -050082 phys_addr_t desc_phys = virt_to_phys(desc);
83 u32 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac32);
Jaehoon Chung757bff42012-10-15 19:10:29 +000084
Sam Protsenko61f47c82024-08-07 22:14:15 -050085 desc->des0 = control;
86 desc->des1 = buf_size;
87 desc->des2 = buf_addr;
88 desc->des3 = next_desc_phys;
Jaehoon Chung757bff42012-10-15 19:10:29 +000089}
90
Sam Protsenkoe760a242024-08-07 22:14:16 -050091static void dwmci_set_idma_desc64(struct dwmci_idmac64 *desc, u32 control,
92 u32 buf_size, u64 buf_addr)
93{
94 phys_addr_t desc_phys = virt_to_phys(desc);
95 u64 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac64);
96
97 desc->des0 = control;
98 desc->des1 = 0;
99 desc->des2 = buf_size;
100 desc->des3 = 0;
101 desc->des4 = buf_addr & 0xffffffff;
102 desc->des5 = buf_addr >> 32;
103 desc->des6 = next_desc_phys & 0xffffffff;
104 desc->des7 = next_desc_phys >> 32;
105}
106
107static void dwmci_prepare_desc(struct dwmci_host *host, struct mmc_data *data,
108 void *cur_idmac, void *bounce_buffer)
Jaehoon Chung757bff42012-10-15 19:10:29 +0000109{
Sam Protsenko61f47c82024-08-07 22:14:15 -0500110 struct dwmci_idmac32 *desc32 = cur_idmac;
Sam Protsenkoe760a242024-08-07 22:14:16 -0500111 struct dwmci_idmac64 *desc64 = cur_idmac;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400112 ulong data_start, data_end;
Sam Protsenko6e175172024-08-07 22:14:14 -0500113 unsigned int blk_cnt, i;
Ley Foon Tan79975992018-12-20 17:55:41 +0800114
Jaehoon Chung757bff42012-10-15 19:10:29 +0000115 data_start = (ulong)cur_idmac;
Sam Protsenko6e175172024-08-07 22:14:14 -0500116 blk_cnt = data->blocks;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000117
Sam Protsenko6e175172024-08-07 22:14:14 -0500118 for (i = 0;; i++) {
Sam Protsenko61f47c82024-08-07 22:14:15 -0500119 phys_addr_t buf_phys = virt_to_phys(bounce_buffer);
Sam Protsenko6e175172024-08-07 22:14:14 -0500120 unsigned int flags, cnt;
121
122 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH;
123 if (i == 0)
124 flags |= DWMCI_IDMAC_FS;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000125 if (blk_cnt <= 8) {
126 flags |= DWMCI_IDMAC_LD;
127 cnt = data->blocksize * blk_cnt;
128 } else
129 cnt = data->blocksize * 8;
130
Sam Protsenkoe760a242024-08-07 22:14:16 -0500131 if (host->dma_64bit_address) {
132 dwmci_set_idma_desc64(desc64, flags, cnt,
133 buf_phys + i * PAGE_SIZE);
134 desc64++;
135 } else {
136 dwmci_set_idma_desc32(desc32, flags, cnt,
137 buf_phys + i * PAGE_SIZE);
138 desc32++;
139 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000140
Mischa Jonker21bd5762013-07-26 16:18:40 +0200141 if (blk_cnt <= 8)
Jaehoon Chung757bff42012-10-15 19:10:29 +0000142 break;
143 blk_cnt -= 8;
Sam Protsenko6e175172024-08-07 22:14:14 -0500144 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000145
Sam Protsenkoe760a242024-08-07 22:14:16 -0500146 if (host->dma_64bit_address)
147 data_end = (ulong)desc64;
148 else
149 data_end = (ulong)desc32;
Marek Vasutbdb5df12019-02-13 20:16:20 +0100150 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Sam Protsenko6e175172024-08-07 22:14:14 -0500151}
152
153static void dwmci_prepare_data(struct dwmci_host *host,
154 struct mmc_data *data,
Sam Protsenkoe760a242024-08-07 22:14:16 -0500155 void *cur_idmac,
Sam Protsenko6e175172024-08-07 22:14:14 -0500156 void *bounce_buffer)
157{
Sam Protsenkoe760a242024-08-07 22:14:16 -0500158 const u32 idmacl = virt_to_phys(cur_idmac) & 0xffffffff;
159 const u32 idmacu = (u64)virt_to_phys(cur_idmac) >> 32;
Sam Protsenko6e175172024-08-07 22:14:14 -0500160 unsigned long ctrl;
161
162 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
163
164 /* Clear IDMAC interrupt */
Sam Protsenkoe760a242024-08-07 22:14:16 -0500165 dwmci_writel(host, host->regs->idsts, 0xffffffff);
Sam Protsenko6e175172024-08-07 22:14:14 -0500166
Sam Protsenkoe760a242024-08-07 22:14:16 -0500167 dwmci_writel(host, host->regs->dbaddrl, idmacl);
168 if (host->dma_64bit_address)
169 dwmci_writel(host, host->regs->dbaddru, idmacu);
Sam Protsenko6e175172024-08-07 22:14:14 -0500170
Sam Protsenkoe760a242024-08-07 22:14:16 -0500171 dwmci_prepare_desc(host, data, cur_idmac, bounce_buffer);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000172
173 ctrl = dwmci_readl(host, DWMCI_CTRL);
174 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
175 dwmci_writel(host, DWMCI_CTRL, ctrl);
176
177 ctrl = dwmci_readl(host, DWMCI_BMOD);
178 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
179 dwmci_writel(host, DWMCI_BMOD, ctrl);
180
181 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
182 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
183}
184
Heiko Stuebner05fa06b2018-09-21 10:59:45 +0200185static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
186{
187 u32 timeout = 20000;
188
189 *len = dwmci_readl(host, DWMCI_STATUS);
190 while (--timeout && (*len & bit)) {
191 udelay(200);
192 *len = dwmci_readl(host, DWMCI_STATUS);
193 }
194
195 if (!timeout) {
196 debug("%s: FIFO underflow timeout\n", __func__);
197 return -ETIMEDOUT;
198 }
199
200 return 0;
201}
202
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100203static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
204{
205 unsigned int timeout;
206
Kever Yangc077c052019-08-29 15:42:41 +0800207 timeout = size * 8; /* counting in bits */
208 timeout *= 10; /* wait 10 times as long */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100209 timeout /= mmc->clock;
210 timeout /= mmc->bus_width;
211 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yangc077c052019-08-29 15:42:41 +0800212 timeout *= 1000; /* counting in msec */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100213 timeout = (timeout < 1000) ? 1000 : timeout;
214
215 return timeout;
216}
217
Sam Protsenko02529242024-08-07 22:14:12 -0500218static int dwmci_data_transfer_fifo(struct dwmci_host *host,
219 struct mmc_data *data, u32 mask)
huang linf382eb82015-11-17 14:20:21 +0800220{
Sam Protsenko02529242024-08-07 22:14:12 -0500221 const u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
222 RX_WMARK_SHIFT) + 1) * 2;
223 const u32 int_rx = mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
224 const u32 int_tx = mask & DWMCI_INTMSK_TXDR;
huang linf382eb82015-11-17 14:20:21 +0800225 int ret = 0;
Sam Protsenko02529242024-08-07 22:14:12 -0500226 u32 len = 0, size, i;
227 u32 *buf;
huang lina65f51b2015-11-17 14:20:22 +0800228
Sam Protsenko02529242024-08-07 22:14:12 -0500229 size = (data->blocksize * data->blocks) / 4;
230 if (!host->fifo_mode || !size)
231 return 0;
232
huang lina65f51b2015-11-17 14:20:22 +0800233 if (data->flags == MMC_DATA_READ)
234 buf = (unsigned int *)data->dest;
235 else
236 buf = (unsigned int *)data->src;
huang linf382eb82015-11-17 14:20:21 +0800237
Sam Protsenko02529242024-08-07 22:14:12 -0500238 if (data->flags == MMC_DATA_READ && int_rx) {
239 dwmci_writel(host, DWMCI_RINTSTS, int_rx);
240 while (size) {
241 ret = dwmci_fifo_ready(host, DWMCI_FIFO_EMPTY, &len);
242 if (ret < 0)
243 break;
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100244
Sam Protsenko02529242024-08-07 22:14:12 -0500245 len = (len >> DWMCI_FIFO_SHIFT) & DWMCI_FIFO_MASK;
246 len = min(size, len);
247 for (i = 0; i < len; i++)
248 *buf++ = dwmci_readl(host, DWMCI_DATA);
249 size = size > len ? (size - len) : 0;
250 }
251 } else if (data->flags == MMC_DATA_WRITE && int_tx) {
252 while (size) {
253 ret = dwmci_fifo_ready(host, DWMCI_FIFO_FULL, &len);
254 if (ret < 0)
255 break;
256
257 len = fifo_depth - ((len >> DWMCI_FIFO_SHIFT) &
258 DWMCI_FIFO_MASK);
259 len = min(size, len);
260 for (i = 0; i < len; i++)
261 dwmci_writel(host, DWMCI_DATA, *buf++);
262 size = size > len ? (size - len) : 0;
263 }
264 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_TXDR);
265 }
266
267 return ret;
268}
269
270static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
271{
272 struct mmc *mmc = host->mmc;
273 int ret = 0;
274 u32 timeout, mask, size;
275 ulong start = get_timer(0);
276
277 size = data->blocksize * data->blocks;
278 timeout = dwmci_get_timeout(mmc, size);
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100279
huang linf382eb82015-11-17 14:20:21 +0800280 for (;;) {
281 mask = dwmci_readl(host, DWMCI_RINTSTS);
282 /* Error during data transfer. */
283 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
284 debug("%s: DATA ERROR!\n", __func__);
285 ret = -EINVAL;
286 break;
287 }
288
Sam Protsenko02529242024-08-07 22:14:12 -0500289 ret = dwmci_data_transfer_fifo(host, data, mask);
huang lina65f51b2015-11-17 14:20:22 +0800290
huang linf382eb82015-11-17 14:20:21 +0800291 /* Data arrived correctly. */
292 if (mask & DWMCI_INTMSK_DTO) {
293 ret = 0;
294 break;
295 }
296
297 /* Check for timeout. */
298 if (get_timer(start) > timeout) {
299 debug("%s: Timeout waiting for data!\n",
300 __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900301 ret = -ETIMEDOUT;
huang linf382eb82015-11-17 14:20:21 +0800302 break;
303 }
304 }
305
306 dwmci_writel(host, DWMCI_RINTSTS, mask);
307
308 return ret;
309}
310
Sam Protsenko95e42a52024-08-07 22:14:13 -0500311static int dwmci_dma_transfer(struct dwmci_host *host, uint flags,
312 struct bounce_buffer *bbstate)
313{
314 int ret;
315 u32 mask, ctrl;
316
317 if (flags == MMC_DATA_READ)
318 mask = DWMCI_IDINTEN_RI;
319 else
320 mask = DWMCI_IDINTEN_TI;
321
Sam Protsenkoe760a242024-08-07 22:14:16 -0500322 ret = wait_for_bit_le32(host->ioaddr + host->regs->idsts,
Sam Protsenko95e42a52024-08-07 22:14:13 -0500323 mask, true, 1000, false);
324 if (ret)
325 debug("%s: DWMCI_IDINTEN mask 0x%x timeout\n", __func__, mask);
326
327 /* Clear interrupts */
Sam Protsenkoe760a242024-08-07 22:14:16 -0500328 dwmci_writel(host, host->regs->idsts, DWMCI_IDINTEN_MASK);
Sam Protsenko95e42a52024-08-07 22:14:13 -0500329
330 ctrl = dwmci_readl(host, DWMCI_CTRL);
331 ctrl &= ~DWMCI_DMA_EN;
332 dwmci_writel(host, DWMCI_CTRL, ctrl);
333
334 bounce_buffer_stop(bbstate);
335 return ret;
336}
337
Jaehoon Chung757bff42012-10-15 19:10:29 +0000338static int dwmci_set_transfer_mode(struct dwmci_host *host,
339 struct mmc_data *data)
340{
341 unsigned long mode;
342
343 mode = DWMCI_CMD_DATA_EXP;
344 if (data->flags & MMC_DATA_WRITE)
345 mode |= DWMCI_CMD_RW;
346
347 return mode;
348}
349
Sam Protsenko2015f242024-08-07 22:14:09 -0500350static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
351{
352 unsigned int timeout = 500; /* msec */
353 ulong start;
354
355 start = get_timer(0);
356 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
357 if (get_timer(start) > timeout) {
358 debug("%s: Timeout on data busy, continue anyway\n",
359 __func__);
360 break;
361 }
362 }
363}
364
Sam Protsenkoe760a242024-08-07 22:14:16 -0500365static int dwmci_send_cmd_common(struct dwmci_host *host, struct mmc_cmd *cmd,
366 struct mmc_data *data, void *cur_idmac)
Simon Glass691272f2016-06-12 23:30:23 -0600367{
Sam Protsenkoe760a242024-08-07 22:14:16 -0500368 int ret, flags = 0, i;
Alexander Graf9b5b8b62016-03-04 01:09:52 +0100369 u32 retry = 100000;
Sam Protsenko95e42a52024-08-07 22:14:13 -0500370 u32 mask;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400371 struct bounce_buffer bbstate;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000372
Sam Protsenko2015f242024-08-07 22:14:09 -0500373 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000374 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
375
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400376 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800377 if (host->fifo_mode) {
378 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
379 dwmci_writel(host, DWMCI_BYTCNT,
380 data->blocksize * data->blocks);
381 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400382 } else {
huang lina65f51b2015-11-17 14:20:22 +0800383 if (data->flags == MMC_DATA_READ) {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100384 ret = bounce_buffer_start(&bbstate,
385 (void*)data->dest,
huang lina65f51b2015-11-17 14:20:22 +0800386 data->blocksize *
387 data->blocks, GEN_BB_WRITE);
388 } else {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100389 ret = bounce_buffer_start(&bbstate,
390 (void*)data->src,
huang lina65f51b2015-11-17 14:20:22 +0800391 data->blocksize *
392 data->blocks, GEN_BB_READ);
393 }
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100394
395 if (ret)
396 return ret;
397
huang lina65f51b2015-11-17 14:20:22 +0800398 dwmci_prepare_data(host, data, cur_idmac,
399 bbstate.bounce_buffer);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400400 }
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400401 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000402
Jaehoon Chung757bff42012-10-15 19:10:29 +0000403 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
404
405 if (data)
406 flags = dwmci_set_transfer_mode(host, data);
407
408 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keeping66d0b7e2021-12-07 16:09:35 +0000409 return -EBUSY;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000410
411 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
412 flags |= DWMCI_CMD_ABORT_STOP;
413 else
414 flags |= DWMCI_CMD_PRV_DAT_WAIT;
415
416 if (cmd->resp_type & MMC_RSP_PRESENT) {
417 flags |= DWMCI_CMD_RESP_EXP;
418 if (cmd->resp_type & MMC_RSP_136)
419 flags |= DWMCI_CMD_RESP_LENGTH;
420 }
421
422 if (cmd->resp_type & MMC_RSP_CRC)
423 flags |= DWMCI_CMD_CHECK_CRC;
424
425 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
426
427 debug("Sending CMD%d\n",cmd->cmdidx);
428
429 dwmci_writel(host, DWMCI_CMD, flags);
430
431 for (i = 0; i < retry; i++) {
432 mask = dwmci_readl(host, DWMCI_RINTSTS);
433 if (mask & DWMCI_INTMSK_CDONE) {
434 if (!data)
435 dwmci_writel(host, DWMCI_RINTSTS, mask);
436 break;
437 }
438 }
439
Pavel Machekf33c9302014-09-05 12:49:48 +0200440 if (i == retry) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600441 debug("%s: Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900442 return -ETIMEDOUT;
Pavel Machekf33c9302014-09-05 12:49:48 +0200443 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000444
445 if (mask & DWMCI_INTMSK_RTO) {
Pavel Machekf33c9302014-09-05 12:49:48 +0200446 /*
447 * Timeout here is not necessarily fatal. (e)MMC cards
448 * will splat here when they receive CMD55 as they do
449 * not support this command and that is exactly the way
450 * to tell them apart from SD cards. Thus, this output
451 * below shall be debug(). eMMC cards also do not favor
452 * CMD8, please keep that in mind.
453 */
454 debug("%s: Response Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900455 return -ETIMEDOUT;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000456 } else if (mask & DWMCI_INTMSK_RE) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600457 debug("%s: Response Error.\n", __func__);
458 return -EIO;
Marek Vasut26cc40d2018-11-06 23:42:11 +0100459 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
460 (mask & DWMCI_INTMSK_RCRC)) {
461 debug("%s: Response CRC Error.\n", __func__);
462 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000463 }
464
Jaehoon Chung757bff42012-10-15 19:10:29 +0000465 if (cmd->resp_type & MMC_RSP_PRESENT) {
466 if (cmd->resp_type & MMC_RSP_136) {
467 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
468 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
469 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
470 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
471 } else {
472 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
473 }
474 }
475
476 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800477 ret = dwmci_data_transfer(host, data);
Sam Protsenko95e42a52024-08-07 22:14:13 -0500478 if (!host->fifo_mode)
479 ret = dwmci_dma_transfer(host, data->flags, &bbstate);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000480 }
481
482 udelay(100);
483
Marek Vasut9042d972015-07-27 22:39:38 +0200484 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000485}
486
Sam Protsenkoe760a242024-08-07 22:14:16 -0500487#ifdef CONFIG_DM_MMC
488static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
489 struct mmc_data *data)
490{
491 struct mmc *mmc = mmc_get_mmc_dev(dev);
492#else
493static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
494 struct mmc_data *data)
495{
496#endif
497 struct dwmci_host *host = mmc->priv;
498 const size_t buf_size = data ? DIV_ROUND_UP(data->blocks, 8) : 0;
499
500 if (host->dma_64bit_address) {
501 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac64, idmac, buf_size);
502 return dwmci_send_cmd_common(host, cmd, data, idmac);
503 } else {
504 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac32, idmac, buf_size);
505 return dwmci_send_cmd_common(host, cmd, data, idmac);
506 }
507}
508
Sam Protsenko454fda92024-08-07 22:14:11 -0500509static int dwmci_control_clken(struct dwmci_host *host, bool on)
510{
511 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
512 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
513 int timeout = 10000;
514 u32 status;
515
516 dwmci_writel(host, DWMCI_CLKENA, val);
517
518 /* Inform CIU */
519 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
520 do {
521 status = dwmci_readl(host, DWMCI_CMD);
522 if (timeout-- < 0) {
523 debug("%s: Timeout!\n", __func__);
524 return -ETIMEDOUT;
525 }
526 } while (status & DWMCI_CMD_START);
527
528 return 0;
529}
530
531/*
532 * Update the clock divider.
533 *
534 * To prevent a clock glitch keep the clock stopped during the update of
535 * clock divider and clock source.
536 */
537static int dwmci_update_div(struct dwmci_host *host, u32 div)
538{
539 int ret;
540
541 /* Disable clock */
542 ret = dwmci_control_clken(host, false);
543 if (ret)
544 return ret;
545
546 /* Set clock to desired speed */
547 dwmci_writel(host, DWMCI_CLKDIV, div);
548 dwmci_writel(host, DWMCI_CLKSRC, 0);
549
550 /* Enable clock */
551 return dwmci_control_clken(host, true);
552}
553
Jaehoon Chung757bff42012-10-15 19:10:29 +0000554static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
555{
Sam Protsenko454fda92024-08-07 22:14:11 -0500556 u32 div;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000557 unsigned long sclk;
Sam Protsenko454fda92024-08-07 22:14:11 -0500558 int ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000559
Amar9c50e352013-04-27 11:42:54 +0530560 if ((freq == host->clock) || (freq == 0))
Jaehoon Chung757bff42012-10-15 19:10:29 +0000561 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000562 /*
Pavel Machekf33c9302014-09-05 12:49:48 +0200563 * If host->get_mmc_clk isn't defined,
Jaehoon Chung757bff42012-10-15 19:10:29 +0000564 * then assume that host->bus_hz is source clock value.
Pavel Machekf33c9302014-09-05 12:49:48 +0200565 * host->bus_hz should be set by user.
Jaehoon Chung757bff42012-10-15 19:10:29 +0000566 */
Jaehoon Chungb44fe832013-10-06 18:59:31 +0900567 if (host->get_mmc_clk)
Simon Glasse3563f22015-08-30 16:55:15 -0600568 sclk = host->get_mmc_clk(host, freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000569 else if (host->bus_hz)
570 sclk = host->bus_hz;
571 else {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600572 debug("%s: Didn't get source clock value.\n", __func__);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000573 return -EINVAL;
574 }
575
Chin Liang See6ace1532014-06-10 01:26:52 -0500576 if (sclk == freq)
577 div = 0; /* bypass mode */
578 else
579 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000580
Sam Protsenko454fda92024-08-07 22:14:11 -0500581 ret = dwmci_update_div(host, div);
582 if (ret)
583 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000584
585 host->clock = freq;
586
587 return 0;
588}
589
Simon Glasse7881d82017-07-29 11:35:31 -0600590#ifdef CONFIG_DM_MMC
Jaehoon Chung56283472016-06-28 15:52:21 +0900591static int dwmci_set_ios(struct udevice *dev)
Simon Glass691272f2016-06-12 23:30:23 -0600592{
593 struct mmc *mmc = mmc_get_mmc_dev(dev);
594#else
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900595static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung757bff42012-10-15 19:10:29 +0000596{
Simon Glass691272f2016-06-12 23:30:23 -0600597#endif
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900598 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
599 u32 ctype, regs;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000600
Pavel Machekf33c9302014-09-05 12:49:48 +0200601 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000602
603 dwmci_setup_bus(host, mmc->clock);
604 switch (mmc->bus_width) {
605 case 8:
606 ctype = DWMCI_CTYPE_8BIT;
607 break;
608 case 4:
609 ctype = DWMCI_CTYPE_4BIT;
610 break;
611 default:
612 ctype = DWMCI_CTYPE_1BIT;
613 break;
614 }
615
616 dwmci_writel(host, DWMCI_CTYPE, ctype);
617
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900618 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov2b8a9692014-12-01 06:59:12 -0600619 if (mmc->ddr_mode)
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900620 regs |= DWMCI_DDR_MODE;
621 else
Jaehoon Chungafc9e2b2015-01-14 17:37:53 +0900622 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900623
624 dwmci_writel(host, DWMCI_UHS_REG, regs);
625
Siew Chin Limd456dfb2020-12-24 18:21:03 +0800626 if (host->clksel) {
627 int ret;
628
629 ret = host->clksel(host);
630 if (ret)
631 return ret;
632 }
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900633
Urja Rannikko2b157012019-05-13 13:25:27 +0000634#if CONFIG_IS_ENABLED(DM_REGULATOR)
635 if (mmc->vqmmc_supply) {
636 int ret;
637
Jonas Karlman01b29172023-07-19 21:21:00 +0000638 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
639 if (ret)
640 return ret;
641
Urja Rannikko2b157012019-05-13 13:25:27 +0000642 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
643 regulator_set_value(mmc->vqmmc_supply, 1800000);
644 else
645 regulator_set_value(mmc->vqmmc_supply, 3300000);
646
647 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
648 if (ret)
649 return ret;
650 }
651#endif
652
Simon Glass691272f2016-06-12 23:30:23 -0600653 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000654}
655
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500656static void dwmci_init_fifo(struct dwmci_host *host)
657{
658 if (!host->fifoth_val) {
659 u32 fifo_size;
660
661 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
662 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
663 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
664 TX_WMARK(fifo_size / 2);
665 }
666
667 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
668}
669
Sam Protsenkoe760a242024-08-07 22:14:16 -0500670static void dwmci_init_dma(struct dwmci_host *host)
671{
672 int addr_config;
673
674 if (host->fifo_mode)
675 return;
676
677 addr_config = (dwmci_readl(host, DWMCI_HCON) >> 27) & 0x1;
678 if (addr_config == 1) {
679 host->dma_64bit_address = true;
680 host->regs = &dwmci_idmac_regs64;
681 debug("%s: IDMAC supports 64-bit address mode\n", __func__);
682 } else {
683 host->dma_64bit_address = false;
684 host->regs = &dwmci_idmac_regs32;
685 debug("%s: IDMAC supports 32-bit address mode\n", __func__);
686 }
687
688 dwmci_writel(host, host->regs->idinten, DWMCI_IDINTEN_MASK);
689}
690
Jaehoon Chung757bff42012-10-15 19:10:29 +0000691static int dwmci_init(struct mmc *mmc)
692{
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200693 struct dwmci_host *host = mmc->priv;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000694
Jaehoon Chung18ab6752013-11-29 20:08:57 +0900695 if (host->board_init)
696 host->board_init(host);
Rajeshwari Shinde6f0b7ca2013-10-29 12:53:13 +0530697
Jaehoon Chung757bff42012-10-15 19:10:29 +0000698 dwmci_writel(host, DWMCI_PWREN, 1);
699
700 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600701 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
702 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000703 }
704
Amar9c50e352013-04-27 11:42:54 +0530705 /* Enumerate at 400KHz */
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200706 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar9c50e352013-04-27 11:42:54 +0530707
Jaehoon Chung757bff42012-10-15 19:10:29 +0000708 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
709 dwmci_writel(host, DWMCI_INTMASK, 0);
710
711 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
712
Jaehoon Chung757bff42012-10-15 19:10:29 +0000713 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500714 dwmci_init_fifo(host);
Sam Protsenkoe760a242024-08-07 22:14:16 -0500715 dwmci_init_dma(host);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000716
717 dwmci_writel(host, DWMCI_CLKENA, 0);
718 dwmci_writel(host, DWMCI_CLKSRC, 0);
719
720 return 0;
721}
722
Simon Glasse7881d82017-07-29 11:35:31 -0600723#ifdef CONFIG_DM_MMC
Simon Glass691272f2016-06-12 23:30:23 -0600724int dwmci_probe(struct udevice *dev)
725{
726 struct mmc *mmc = mmc_get_mmc_dev(dev);
727
728 return dwmci_init(mmc);
729}
730
731const struct dm_mmc_ops dm_dwmci_ops = {
732 .send_cmd = dwmci_send_cmd,
733 .set_ios = dwmci_set_ios,
734};
735
736#else
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200737static const struct mmc_ops dwmci_ops = {
738 .send_cmd = dwmci_send_cmd,
739 .set_ios = dwmci_set_ios,
740 .init = dwmci_init,
741};
Simon Glass691272f2016-06-12 23:30:23 -0600742#endif
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200743
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900744void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
745 u32 max_clk, u32 min_clk)
Simon Glass5e6ff812016-05-14 14:03:07 -0600746{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900747 cfg->name = host->name;
Simon Glasse7881d82017-07-29 11:35:31 -0600748#ifndef CONFIG_DM_MMC
Simon Glass5e6ff812016-05-14 14:03:07 -0600749 cfg->ops = &dwmci_ops;
Simon Glass691272f2016-06-12 23:30:23 -0600750#endif
Simon Glass5e6ff812016-05-14 14:03:07 -0600751 cfg->f_min = min_clk;
752 cfg->f_max = max_clk;
753
754 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
755
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900756 cfg->host_caps = host->caps;
Simon Glass5e6ff812016-05-14 14:03:07 -0600757
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900758 if (host->buswidth == 8) {
Simon Glass5e6ff812016-05-14 14:03:07 -0600759 cfg->host_caps |= MMC_MODE_8BIT;
760 cfg->host_caps &= ~MMC_MODE_4BIT;
761 } else {
762 cfg->host_caps |= MMC_MODE_4BIT;
763 cfg->host_caps &= ~MMC_MODE_8BIT;
764 }
765 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
766
767 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
768}
769
770#ifdef CONFIG_BLK
771int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
772{
773 return mmc_bind(dev, mmc, cfg);
774}
775#else
Jaehoon Chung757bff42012-10-15 19:10:29 +0000776int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
777{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900778 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000779
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200780 host->mmc = mmc_create(&host->cfg, host);
781 if (host->mmc == NULL)
782 return -1;
783
784 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000785}
Simon Glass5e6ff812016-05-14 14:03:07 -0600786#endif