blob: 3e7073f7de1bb55510016362990b9b45b9b412c1 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung757bff42012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung757bff42012-10-15 19:10:29 +00006 */
7
Alexey Brodkin2a7a2102013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass1c87ffe2015-08-06 20:16:27 -060010#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glasscf92e052015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tan79975992018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassc05ed002020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko2b157012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko61f47c82024-08-07 22:14:15 -050023/* Internal DMA Controller (IDMAC) descriptor for 32-bit addressing mode */
24struct dwmci_idmac32 {
25 u32 des0; /* Control descriptor */
26 u32 des1; /* Buffer size */
27 u32 des2; /* Buffer physical address */
28 u32 des3; /* Next descriptor physical address */
Sam Protsenko96ea8902024-08-07 22:14:08 -050029} __aligned(ARCH_DMA_MINALIGN);
30
Jaehoon Chung757bff42012-10-15 19:10:29 +000031static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
32{
33 unsigned long timeout = 1000;
34 u32 ctrl;
35
36 dwmci_writel(host, DWMCI_CTRL, value);
37
38 while (timeout--) {
39 ctrl = dwmci_readl(host, DWMCI_CTRL);
40 if (!(ctrl & DWMCI_RESET_ALL))
41 return 1;
42 }
43 return 0;
44}
45
Sam Protsenko61f47c82024-08-07 22:14:15 -050046static void dwmci_set_idma_desc32(struct dwmci_idmac32 *desc, u32 control,
47 u32 buf_size, u32 buf_addr)
Jaehoon Chung757bff42012-10-15 19:10:29 +000048{
Sam Protsenko61f47c82024-08-07 22:14:15 -050049 phys_addr_t desc_phys = virt_to_phys(desc);
50 u32 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac32);
Jaehoon Chung757bff42012-10-15 19:10:29 +000051
Sam Protsenko61f47c82024-08-07 22:14:15 -050052 desc->des0 = control;
53 desc->des1 = buf_size;
54 desc->des2 = buf_addr;
55 desc->des3 = next_desc_phys;
Jaehoon Chung757bff42012-10-15 19:10:29 +000056}
57
Sam Protsenko6e175172024-08-07 22:14:14 -050058static void dwmci_prepare_desc(struct mmc_data *data,
Sam Protsenko61f47c82024-08-07 22:14:15 -050059 struct dwmci_idmac32 *cur_idmac,
Alexey Brodkin2a7a2102013-12-26 15:29:07 +040060 void *bounce_buffer)
Jaehoon Chung757bff42012-10-15 19:10:29 +000061{
Sam Protsenko61f47c82024-08-07 22:14:15 -050062 struct dwmci_idmac32 *desc32 = cur_idmac;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +040063 ulong data_start, data_end;
Sam Protsenko6e175172024-08-07 22:14:14 -050064 unsigned int blk_cnt, i;
Ley Foon Tan79975992018-12-20 17:55:41 +080065
Jaehoon Chung757bff42012-10-15 19:10:29 +000066 data_start = (ulong)cur_idmac;
Sam Protsenko6e175172024-08-07 22:14:14 -050067 blk_cnt = data->blocks;
Jaehoon Chung757bff42012-10-15 19:10:29 +000068
Sam Protsenko6e175172024-08-07 22:14:14 -050069 for (i = 0;; i++) {
Sam Protsenko61f47c82024-08-07 22:14:15 -050070 phys_addr_t buf_phys = virt_to_phys(bounce_buffer);
Sam Protsenko6e175172024-08-07 22:14:14 -050071 unsigned int flags, cnt;
72
73 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH;
74 if (i == 0)
75 flags |= DWMCI_IDMAC_FS;
Jaehoon Chung757bff42012-10-15 19:10:29 +000076 if (blk_cnt <= 8) {
77 flags |= DWMCI_IDMAC_LD;
78 cnt = data->blocksize * blk_cnt;
79 } else
80 cnt = data->blocksize * 8;
81
Sam Protsenko61f47c82024-08-07 22:14:15 -050082 dwmci_set_idma_desc32(desc32, flags, cnt,
83 buf_phys + i * PAGE_SIZE);
84 desc32++;
Jaehoon Chung757bff42012-10-15 19:10:29 +000085
Mischa Jonker21bd5762013-07-26 16:18:40 +020086 if (blk_cnt <= 8)
Jaehoon Chung757bff42012-10-15 19:10:29 +000087 break;
88 blk_cnt -= 8;
Sam Protsenko6e175172024-08-07 22:14:14 -050089 }
Jaehoon Chung757bff42012-10-15 19:10:29 +000090
Sam Protsenko61f47c82024-08-07 22:14:15 -050091 data_end = (ulong)desc32;
Marek Vasutbdb5df12019-02-13 20:16:20 +010092 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Sam Protsenko6e175172024-08-07 22:14:14 -050093}
94
95static void dwmci_prepare_data(struct dwmci_host *host,
96 struct mmc_data *data,
Sam Protsenko61f47c82024-08-07 22:14:15 -050097 struct dwmci_idmac32 *cur_idmac,
Sam Protsenko6e175172024-08-07 22:14:14 -050098 void *bounce_buffer)
99{
100 unsigned long ctrl;
101
102 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
103
104 /* Clear IDMAC interrupt */
105 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
106
107 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
108
109 dwmci_prepare_desc(data, cur_idmac, bounce_buffer);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000110
111 ctrl = dwmci_readl(host, DWMCI_CTRL);
112 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
113 dwmci_writel(host, DWMCI_CTRL, ctrl);
114
115 ctrl = dwmci_readl(host, DWMCI_BMOD);
116 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
117 dwmci_writel(host, DWMCI_BMOD, ctrl);
118
119 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
120 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
121}
122
Heiko Stuebner05fa06b2018-09-21 10:59:45 +0200123static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
124{
125 u32 timeout = 20000;
126
127 *len = dwmci_readl(host, DWMCI_STATUS);
128 while (--timeout && (*len & bit)) {
129 udelay(200);
130 *len = dwmci_readl(host, DWMCI_STATUS);
131 }
132
133 if (!timeout) {
134 debug("%s: FIFO underflow timeout\n", __func__);
135 return -ETIMEDOUT;
136 }
137
138 return 0;
139}
140
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100141static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
142{
143 unsigned int timeout;
144
Kever Yangc077c052019-08-29 15:42:41 +0800145 timeout = size * 8; /* counting in bits */
146 timeout *= 10; /* wait 10 times as long */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100147 timeout /= mmc->clock;
148 timeout /= mmc->bus_width;
149 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yangc077c052019-08-29 15:42:41 +0800150 timeout *= 1000; /* counting in msec */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100151 timeout = (timeout < 1000) ? 1000 : timeout;
152
153 return timeout;
154}
155
Sam Protsenko02529242024-08-07 22:14:12 -0500156static int dwmci_data_transfer_fifo(struct dwmci_host *host,
157 struct mmc_data *data, u32 mask)
huang linf382eb82015-11-17 14:20:21 +0800158{
Sam Protsenko02529242024-08-07 22:14:12 -0500159 const u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
160 RX_WMARK_SHIFT) + 1) * 2;
161 const u32 int_rx = mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
162 const u32 int_tx = mask & DWMCI_INTMSK_TXDR;
huang linf382eb82015-11-17 14:20:21 +0800163 int ret = 0;
Sam Protsenko02529242024-08-07 22:14:12 -0500164 u32 len = 0, size, i;
165 u32 *buf;
huang lina65f51b2015-11-17 14:20:22 +0800166
Sam Protsenko02529242024-08-07 22:14:12 -0500167 size = (data->blocksize * data->blocks) / 4;
168 if (!host->fifo_mode || !size)
169 return 0;
170
huang lina65f51b2015-11-17 14:20:22 +0800171 if (data->flags == MMC_DATA_READ)
172 buf = (unsigned int *)data->dest;
173 else
174 buf = (unsigned int *)data->src;
huang linf382eb82015-11-17 14:20:21 +0800175
Sam Protsenko02529242024-08-07 22:14:12 -0500176 if (data->flags == MMC_DATA_READ && int_rx) {
177 dwmci_writel(host, DWMCI_RINTSTS, int_rx);
178 while (size) {
179 ret = dwmci_fifo_ready(host, DWMCI_FIFO_EMPTY, &len);
180 if (ret < 0)
181 break;
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100182
Sam Protsenko02529242024-08-07 22:14:12 -0500183 len = (len >> DWMCI_FIFO_SHIFT) & DWMCI_FIFO_MASK;
184 len = min(size, len);
185 for (i = 0; i < len; i++)
186 *buf++ = dwmci_readl(host, DWMCI_DATA);
187 size = size > len ? (size - len) : 0;
188 }
189 } else if (data->flags == MMC_DATA_WRITE && int_tx) {
190 while (size) {
191 ret = dwmci_fifo_ready(host, DWMCI_FIFO_FULL, &len);
192 if (ret < 0)
193 break;
194
195 len = fifo_depth - ((len >> DWMCI_FIFO_SHIFT) &
196 DWMCI_FIFO_MASK);
197 len = min(size, len);
198 for (i = 0; i < len; i++)
199 dwmci_writel(host, DWMCI_DATA, *buf++);
200 size = size > len ? (size - len) : 0;
201 }
202 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_TXDR);
203 }
204
205 return ret;
206}
207
208static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
209{
210 struct mmc *mmc = host->mmc;
211 int ret = 0;
212 u32 timeout, mask, size;
213 ulong start = get_timer(0);
214
215 size = data->blocksize * data->blocks;
216 timeout = dwmci_get_timeout(mmc, size);
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100217
huang linf382eb82015-11-17 14:20:21 +0800218 for (;;) {
219 mask = dwmci_readl(host, DWMCI_RINTSTS);
220 /* Error during data transfer. */
221 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
222 debug("%s: DATA ERROR!\n", __func__);
223 ret = -EINVAL;
224 break;
225 }
226
Sam Protsenko02529242024-08-07 22:14:12 -0500227 ret = dwmci_data_transfer_fifo(host, data, mask);
huang lina65f51b2015-11-17 14:20:22 +0800228
huang linf382eb82015-11-17 14:20:21 +0800229 /* Data arrived correctly. */
230 if (mask & DWMCI_INTMSK_DTO) {
231 ret = 0;
232 break;
233 }
234
235 /* Check for timeout. */
236 if (get_timer(start) > timeout) {
237 debug("%s: Timeout waiting for data!\n",
238 __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900239 ret = -ETIMEDOUT;
huang linf382eb82015-11-17 14:20:21 +0800240 break;
241 }
242 }
243
244 dwmci_writel(host, DWMCI_RINTSTS, mask);
245
246 return ret;
247}
248
Sam Protsenko95e42a52024-08-07 22:14:13 -0500249static int dwmci_dma_transfer(struct dwmci_host *host, uint flags,
250 struct bounce_buffer *bbstate)
251{
252 int ret;
253 u32 mask, ctrl;
254
255 if (flags == MMC_DATA_READ)
256 mask = DWMCI_IDINTEN_RI;
257 else
258 mask = DWMCI_IDINTEN_TI;
259
260 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
261 mask, true, 1000, false);
262 if (ret)
263 debug("%s: DWMCI_IDINTEN mask 0x%x timeout\n", __func__, mask);
264
265 /* Clear interrupts */
266 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
267
268 ctrl = dwmci_readl(host, DWMCI_CTRL);
269 ctrl &= ~DWMCI_DMA_EN;
270 dwmci_writel(host, DWMCI_CTRL, ctrl);
271
272 bounce_buffer_stop(bbstate);
273 return ret;
274}
275
Jaehoon Chung757bff42012-10-15 19:10:29 +0000276static int dwmci_set_transfer_mode(struct dwmci_host *host,
277 struct mmc_data *data)
278{
279 unsigned long mode;
280
281 mode = DWMCI_CMD_DATA_EXP;
282 if (data->flags & MMC_DATA_WRITE)
283 mode |= DWMCI_CMD_RW;
284
285 return mode;
286}
287
Sam Protsenko2015f242024-08-07 22:14:09 -0500288static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
289{
290 unsigned int timeout = 500; /* msec */
291 ulong start;
292
293 start = get_timer(0);
294 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
295 if (get_timer(start) > timeout) {
296 debug("%s: Timeout on data busy, continue anyway\n",
297 __func__);
298 break;
299 }
300 }
301}
302
Simon Glasse7881d82017-07-29 11:35:31 -0600303#ifdef CONFIG_DM_MMC
Jaehoon Chung56283472016-06-28 15:52:21 +0900304static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
Simon Glass691272f2016-06-12 23:30:23 -0600305 struct mmc_data *data)
306{
307 struct mmc *mmc = mmc_get_mmc_dev(dev);
308#else
Jaehoon Chung757bff42012-10-15 19:10:29 +0000309static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
310 struct mmc_data *data)
311{
Simon Glass691272f2016-06-12 23:30:23 -0600312#endif
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200313 struct dwmci_host *host = mmc->priv;
Sam Protsenko61f47c82024-08-07 22:14:15 -0500314 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac32, cur_idmac,
Mischa Jonker21bd5762013-07-26 16:18:40 +0200315 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
Marek Vasut9042d972015-07-27 22:39:38 +0200316 int ret = 0, flags = 0, i;
Alexander Graf9b5b8b62016-03-04 01:09:52 +0100317 u32 retry = 100000;
Sam Protsenko95e42a52024-08-07 22:14:13 -0500318 u32 mask;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400319 struct bounce_buffer bbstate;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000320
Sam Protsenko2015f242024-08-07 22:14:09 -0500321 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000322 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
323
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400324 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800325 if (host->fifo_mode) {
326 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
327 dwmci_writel(host, DWMCI_BYTCNT,
328 data->blocksize * data->blocks);
329 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400330 } else {
huang lina65f51b2015-11-17 14:20:22 +0800331 if (data->flags == MMC_DATA_READ) {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100332 ret = bounce_buffer_start(&bbstate,
333 (void*)data->dest,
huang lina65f51b2015-11-17 14:20:22 +0800334 data->blocksize *
335 data->blocks, GEN_BB_WRITE);
336 } else {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100337 ret = bounce_buffer_start(&bbstate,
338 (void*)data->src,
huang lina65f51b2015-11-17 14:20:22 +0800339 data->blocksize *
340 data->blocks, GEN_BB_READ);
341 }
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100342
343 if (ret)
344 return ret;
345
huang lina65f51b2015-11-17 14:20:22 +0800346 dwmci_prepare_data(host, data, cur_idmac,
347 bbstate.bounce_buffer);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400348 }
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400349 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000350
Jaehoon Chung757bff42012-10-15 19:10:29 +0000351 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
352
353 if (data)
354 flags = dwmci_set_transfer_mode(host, data);
355
356 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keeping66d0b7e2021-12-07 16:09:35 +0000357 return -EBUSY;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000358
359 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
360 flags |= DWMCI_CMD_ABORT_STOP;
361 else
362 flags |= DWMCI_CMD_PRV_DAT_WAIT;
363
364 if (cmd->resp_type & MMC_RSP_PRESENT) {
365 flags |= DWMCI_CMD_RESP_EXP;
366 if (cmd->resp_type & MMC_RSP_136)
367 flags |= DWMCI_CMD_RESP_LENGTH;
368 }
369
370 if (cmd->resp_type & MMC_RSP_CRC)
371 flags |= DWMCI_CMD_CHECK_CRC;
372
373 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
374
375 debug("Sending CMD%d\n",cmd->cmdidx);
376
377 dwmci_writel(host, DWMCI_CMD, flags);
378
379 for (i = 0; i < retry; i++) {
380 mask = dwmci_readl(host, DWMCI_RINTSTS);
381 if (mask & DWMCI_INTMSK_CDONE) {
382 if (!data)
383 dwmci_writel(host, DWMCI_RINTSTS, mask);
384 break;
385 }
386 }
387
Pavel Machekf33c9302014-09-05 12:49:48 +0200388 if (i == retry) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600389 debug("%s: Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900390 return -ETIMEDOUT;
Pavel Machekf33c9302014-09-05 12:49:48 +0200391 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000392
393 if (mask & DWMCI_INTMSK_RTO) {
Pavel Machekf33c9302014-09-05 12:49:48 +0200394 /*
395 * Timeout here is not necessarily fatal. (e)MMC cards
396 * will splat here when they receive CMD55 as they do
397 * not support this command and that is exactly the way
398 * to tell them apart from SD cards. Thus, this output
399 * below shall be debug(). eMMC cards also do not favor
400 * CMD8, please keep that in mind.
401 */
402 debug("%s: Response Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900403 return -ETIMEDOUT;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000404 } else if (mask & DWMCI_INTMSK_RE) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600405 debug("%s: Response Error.\n", __func__);
406 return -EIO;
Marek Vasut26cc40d2018-11-06 23:42:11 +0100407 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
408 (mask & DWMCI_INTMSK_RCRC)) {
409 debug("%s: Response CRC Error.\n", __func__);
410 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000411 }
412
Jaehoon Chung757bff42012-10-15 19:10:29 +0000413 if (cmd->resp_type & MMC_RSP_PRESENT) {
414 if (cmd->resp_type & MMC_RSP_136) {
415 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
416 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
417 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
418 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
419 } else {
420 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
421 }
422 }
423
424 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800425 ret = dwmci_data_transfer(host, data);
Sam Protsenko95e42a52024-08-07 22:14:13 -0500426 if (!host->fifo_mode)
427 ret = dwmci_dma_transfer(host, data->flags, &bbstate);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000428 }
429
430 udelay(100);
431
Marek Vasut9042d972015-07-27 22:39:38 +0200432 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000433}
434
Sam Protsenko454fda92024-08-07 22:14:11 -0500435static int dwmci_control_clken(struct dwmci_host *host, bool on)
436{
437 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
438 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
439 int timeout = 10000;
440 u32 status;
441
442 dwmci_writel(host, DWMCI_CLKENA, val);
443
444 /* Inform CIU */
445 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
446 do {
447 status = dwmci_readl(host, DWMCI_CMD);
448 if (timeout-- < 0) {
449 debug("%s: Timeout!\n", __func__);
450 return -ETIMEDOUT;
451 }
452 } while (status & DWMCI_CMD_START);
453
454 return 0;
455}
456
457/*
458 * Update the clock divider.
459 *
460 * To prevent a clock glitch keep the clock stopped during the update of
461 * clock divider and clock source.
462 */
463static int dwmci_update_div(struct dwmci_host *host, u32 div)
464{
465 int ret;
466
467 /* Disable clock */
468 ret = dwmci_control_clken(host, false);
469 if (ret)
470 return ret;
471
472 /* Set clock to desired speed */
473 dwmci_writel(host, DWMCI_CLKDIV, div);
474 dwmci_writel(host, DWMCI_CLKSRC, 0);
475
476 /* Enable clock */
477 return dwmci_control_clken(host, true);
478}
479
Jaehoon Chung757bff42012-10-15 19:10:29 +0000480static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
481{
Sam Protsenko454fda92024-08-07 22:14:11 -0500482 u32 div;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000483 unsigned long sclk;
Sam Protsenko454fda92024-08-07 22:14:11 -0500484 int ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000485
Amar9c50e352013-04-27 11:42:54 +0530486 if ((freq == host->clock) || (freq == 0))
Jaehoon Chung757bff42012-10-15 19:10:29 +0000487 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000488 /*
Pavel Machekf33c9302014-09-05 12:49:48 +0200489 * If host->get_mmc_clk isn't defined,
Jaehoon Chung757bff42012-10-15 19:10:29 +0000490 * then assume that host->bus_hz is source clock value.
Pavel Machekf33c9302014-09-05 12:49:48 +0200491 * host->bus_hz should be set by user.
Jaehoon Chung757bff42012-10-15 19:10:29 +0000492 */
Jaehoon Chungb44fe832013-10-06 18:59:31 +0900493 if (host->get_mmc_clk)
Simon Glasse3563f22015-08-30 16:55:15 -0600494 sclk = host->get_mmc_clk(host, freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000495 else if (host->bus_hz)
496 sclk = host->bus_hz;
497 else {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600498 debug("%s: Didn't get source clock value.\n", __func__);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000499 return -EINVAL;
500 }
501
Chin Liang See6ace1532014-06-10 01:26:52 -0500502 if (sclk == freq)
503 div = 0; /* bypass mode */
504 else
505 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000506
Sam Protsenko454fda92024-08-07 22:14:11 -0500507 ret = dwmci_update_div(host, div);
508 if (ret)
509 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000510
511 host->clock = freq;
512
513 return 0;
514}
515
Simon Glasse7881d82017-07-29 11:35:31 -0600516#ifdef CONFIG_DM_MMC
Jaehoon Chung56283472016-06-28 15:52:21 +0900517static int dwmci_set_ios(struct udevice *dev)
Simon Glass691272f2016-06-12 23:30:23 -0600518{
519 struct mmc *mmc = mmc_get_mmc_dev(dev);
520#else
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900521static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung757bff42012-10-15 19:10:29 +0000522{
Simon Glass691272f2016-06-12 23:30:23 -0600523#endif
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900524 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
525 u32 ctype, regs;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000526
Pavel Machekf33c9302014-09-05 12:49:48 +0200527 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000528
529 dwmci_setup_bus(host, mmc->clock);
530 switch (mmc->bus_width) {
531 case 8:
532 ctype = DWMCI_CTYPE_8BIT;
533 break;
534 case 4:
535 ctype = DWMCI_CTYPE_4BIT;
536 break;
537 default:
538 ctype = DWMCI_CTYPE_1BIT;
539 break;
540 }
541
542 dwmci_writel(host, DWMCI_CTYPE, ctype);
543
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900544 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov2b8a9692014-12-01 06:59:12 -0600545 if (mmc->ddr_mode)
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900546 regs |= DWMCI_DDR_MODE;
547 else
Jaehoon Chungafc9e2b2015-01-14 17:37:53 +0900548 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900549
550 dwmci_writel(host, DWMCI_UHS_REG, regs);
551
Siew Chin Limd456dfb2020-12-24 18:21:03 +0800552 if (host->clksel) {
553 int ret;
554
555 ret = host->clksel(host);
556 if (ret)
557 return ret;
558 }
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900559
Urja Rannikko2b157012019-05-13 13:25:27 +0000560#if CONFIG_IS_ENABLED(DM_REGULATOR)
561 if (mmc->vqmmc_supply) {
562 int ret;
563
Jonas Karlman01b29172023-07-19 21:21:00 +0000564 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
565 if (ret)
566 return ret;
567
Urja Rannikko2b157012019-05-13 13:25:27 +0000568 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
569 regulator_set_value(mmc->vqmmc_supply, 1800000);
570 else
571 regulator_set_value(mmc->vqmmc_supply, 3300000);
572
573 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
574 if (ret)
575 return ret;
576 }
577#endif
578
Simon Glass691272f2016-06-12 23:30:23 -0600579 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000580}
581
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500582static void dwmci_init_fifo(struct dwmci_host *host)
583{
584 if (!host->fifoth_val) {
585 u32 fifo_size;
586
587 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
588 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
589 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
590 TX_WMARK(fifo_size / 2);
591 }
592
593 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
594}
595
Jaehoon Chung757bff42012-10-15 19:10:29 +0000596static int dwmci_init(struct mmc *mmc)
597{
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200598 struct dwmci_host *host = mmc->priv;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000599
Jaehoon Chung18ab6752013-11-29 20:08:57 +0900600 if (host->board_init)
601 host->board_init(host);
Rajeshwari Shinde6f0b7ca2013-10-29 12:53:13 +0530602
Jaehoon Chung757bff42012-10-15 19:10:29 +0000603 dwmci_writel(host, DWMCI_PWREN, 1);
604
605 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600606 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
607 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000608 }
609
Amar9c50e352013-04-27 11:42:54 +0530610 /* Enumerate at 400KHz */
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200611 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar9c50e352013-04-27 11:42:54 +0530612
Jaehoon Chung757bff42012-10-15 19:10:29 +0000613 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
614 dwmci_writel(host, DWMCI_INTMASK, 0);
615
616 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
617
618 dwmci_writel(host, DWMCI_IDINTEN, 0);
619 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500620 dwmci_init_fifo(host);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000621
622 dwmci_writel(host, DWMCI_CLKENA, 0);
623 dwmci_writel(host, DWMCI_CLKSRC, 0);
624
Ley Foon Tan79975992018-12-20 17:55:41 +0800625 if (!host->fifo_mode)
626 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
627
Jaehoon Chung757bff42012-10-15 19:10:29 +0000628 return 0;
629}
630
Simon Glasse7881d82017-07-29 11:35:31 -0600631#ifdef CONFIG_DM_MMC
Simon Glass691272f2016-06-12 23:30:23 -0600632int dwmci_probe(struct udevice *dev)
633{
634 struct mmc *mmc = mmc_get_mmc_dev(dev);
635
636 return dwmci_init(mmc);
637}
638
639const struct dm_mmc_ops dm_dwmci_ops = {
640 .send_cmd = dwmci_send_cmd,
641 .set_ios = dwmci_set_ios,
642};
643
644#else
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200645static const struct mmc_ops dwmci_ops = {
646 .send_cmd = dwmci_send_cmd,
647 .set_ios = dwmci_set_ios,
648 .init = dwmci_init,
649};
Simon Glass691272f2016-06-12 23:30:23 -0600650#endif
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200651
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900652void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
653 u32 max_clk, u32 min_clk)
Simon Glass5e6ff812016-05-14 14:03:07 -0600654{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900655 cfg->name = host->name;
Simon Glasse7881d82017-07-29 11:35:31 -0600656#ifndef CONFIG_DM_MMC
Simon Glass5e6ff812016-05-14 14:03:07 -0600657 cfg->ops = &dwmci_ops;
Simon Glass691272f2016-06-12 23:30:23 -0600658#endif
Simon Glass5e6ff812016-05-14 14:03:07 -0600659 cfg->f_min = min_clk;
660 cfg->f_max = max_clk;
661
662 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
663
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900664 cfg->host_caps = host->caps;
Simon Glass5e6ff812016-05-14 14:03:07 -0600665
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900666 if (host->buswidth == 8) {
Simon Glass5e6ff812016-05-14 14:03:07 -0600667 cfg->host_caps |= MMC_MODE_8BIT;
668 cfg->host_caps &= ~MMC_MODE_4BIT;
669 } else {
670 cfg->host_caps |= MMC_MODE_4BIT;
671 cfg->host_caps &= ~MMC_MODE_8BIT;
672 }
673 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
674
675 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
676}
677
678#ifdef CONFIG_BLK
679int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
680{
681 return mmc_bind(dev, mmc, cfg);
682}
683#else
Jaehoon Chung757bff42012-10-15 19:10:29 +0000684int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
685{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900686 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000687
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200688 host->mmc = mmc_create(&host->cfg, host);
689 if (host->mmc == NULL)
690 return -1;
691
692 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000693}
Simon Glass5e6ff812016-05-14 14:03:07 -0600694#endif