blob: 8fc26399034b68b48c5937dc61bbe1f5c7824405 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung757bff42012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung757bff42012-10-15 19:10:29 +00006 */
7
Alexey Brodkin2a7a2102013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass1c87ffe2015-08-06 20:16:27 -060010#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glasscf92e052015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tan79975992018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass90526e92020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassc05ed002020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko2b157012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung757bff42012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko96ea8902024-08-07 22:14:08 -050023struct dwmci_idmac {
24 u32 flags;
25 u32 cnt;
26 u32 addr;
27 u32 next_addr;
28} __aligned(ARCH_DMA_MINALIGN);
29
Jaehoon Chung757bff42012-10-15 19:10:29 +000030static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
31{
32 unsigned long timeout = 1000;
33 u32 ctrl;
34
35 dwmci_writel(host, DWMCI_CTRL, value);
36
37 while (timeout--) {
38 ctrl = dwmci_readl(host, DWMCI_CTRL);
39 if (!(ctrl & DWMCI_RESET_ALL))
40 return 1;
41 }
42 return 0;
43}
44
45static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
46 u32 desc0, u32 desc1, u32 desc2)
47{
48 struct dwmci_idmac *desc = idmac;
49
50 desc->flags = desc0;
51 desc->cnt = desc1;
52 desc->addr = desc2;
Prabhakar Kushwaha41f7be32015-10-25 13:18:25 +053053 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
Jaehoon Chung757bff42012-10-15 19:10:29 +000054}
55
56static void dwmci_prepare_data(struct dwmci_host *host,
Alexey Brodkin2a7a2102013-12-26 15:29:07 +040057 struct mmc_data *data,
58 struct dwmci_idmac *cur_idmac,
59 void *bounce_buffer)
Jaehoon Chung757bff42012-10-15 19:10:29 +000060{
61 unsigned long ctrl;
62 unsigned int i = 0, flags, cnt, blk_cnt;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +040063 ulong data_start, data_end;
Jaehoon Chung757bff42012-10-15 19:10:29 +000064
Jaehoon Chung757bff42012-10-15 19:10:29 +000065 blk_cnt = data->blocks;
66
67 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
68
Ley Foon Tan79975992018-12-20 17:55:41 +080069 /* Clear IDMAC interrupt */
70 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
71
Jaehoon Chung757bff42012-10-15 19:10:29 +000072 data_start = (ulong)cur_idmac;
Prabhakar Kushwaha41f7be32015-10-25 13:18:25 +053073 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
Jaehoon Chung757bff42012-10-15 19:10:29 +000074
Jaehoon Chung757bff42012-10-15 19:10:29 +000075 do {
76 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
77 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
78 if (blk_cnt <= 8) {
79 flags |= DWMCI_IDMAC_LD;
80 cnt = data->blocksize * blk_cnt;
81 } else
82 cnt = data->blocksize * 8;
83
84 dwmci_set_idma_desc(cur_idmac, flags, cnt,
Prabhakar Kushwaha41f7be32015-10-25 13:18:25 +053085 (ulong)bounce_buffer + (i * PAGE_SIZE));
Jaehoon Chung757bff42012-10-15 19:10:29 +000086
Marek Vasutbdb5df12019-02-13 20:16:20 +010087 cur_idmac++;
Mischa Jonker21bd5762013-07-26 16:18:40 +020088 if (blk_cnt <= 8)
Jaehoon Chung757bff42012-10-15 19:10:29 +000089 break;
90 blk_cnt -= 8;
Jaehoon Chung757bff42012-10-15 19:10:29 +000091 i++;
92 } while(1);
93
94 data_end = (ulong)cur_idmac;
Marek Vasutbdb5df12019-02-13 20:16:20 +010095 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Jaehoon Chung757bff42012-10-15 19:10:29 +000096
97 ctrl = dwmci_readl(host, DWMCI_CTRL);
98 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
99 dwmci_writel(host, DWMCI_CTRL, ctrl);
100
101 ctrl = dwmci_readl(host, DWMCI_BMOD);
102 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
103 dwmci_writel(host, DWMCI_BMOD, ctrl);
104
105 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
106 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
107}
108
Heiko Stuebner05fa06b2018-09-21 10:59:45 +0200109static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
110{
111 u32 timeout = 20000;
112
113 *len = dwmci_readl(host, DWMCI_STATUS);
114 while (--timeout && (*len & bit)) {
115 udelay(200);
116 *len = dwmci_readl(host, DWMCI_STATUS);
117 }
118
119 if (!timeout) {
120 debug("%s: FIFO underflow timeout\n", __func__);
121 return -ETIMEDOUT;
122 }
123
124 return 0;
125}
126
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100127static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
128{
129 unsigned int timeout;
130
Kever Yangc077c052019-08-29 15:42:41 +0800131 timeout = size * 8; /* counting in bits */
132 timeout *= 10; /* wait 10 times as long */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100133 timeout /= mmc->clock;
134 timeout /= mmc->bus_width;
135 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yangc077c052019-08-29 15:42:41 +0800136 timeout *= 1000; /* counting in msec */
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100137 timeout = (timeout < 1000) ? 1000 : timeout;
138
139 return timeout;
140}
141
Sam Protsenko02529242024-08-07 22:14:12 -0500142static int dwmci_data_transfer_fifo(struct dwmci_host *host,
143 struct mmc_data *data, u32 mask)
huang linf382eb82015-11-17 14:20:21 +0800144{
Sam Protsenko02529242024-08-07 22:14:12 -0500145 const u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
146 RX_WMARK_SHIFT) + 1) * 2;
147 const u32 int_rx = mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
148 const u32 int_tx = mask & DWMCI_INTMSK_TXDR;
huang linf382eb82015-11-17 14:20:21 +0800149 int ret = 0;
Sam Protsenko02529242024-08-07 22:14:12 -0500150 u32 len = 0, size, i;
151 u32 *buf;
huang lina65f51b2015-11-17 14:20:22 +0800152
Sam Protsenko02529242024-08-07 22:14:12 -0500153 size = (data->blocksize * data->blocks) / 4;
154 if (!host->fifo_mode || !size)
155 return 0;
156
huang lina65f51b2015-11-17 14:20:22 +0800157 if (data->flags == MMC_DATA_READ)
158 buf = (unsigned int *)data->dest;
159 else
160 buf = (unsigned int *)data->src;
huang linf382eb82015-11-17 14:20:21 +0800161
Sam Protsenko02529242024-08-07 22:14:12 -0500162 if (data->flags == MMC_DATA_READ && int_rx) {
163 dwmci_writel(host, DWMCI_RINTSTS, int_rx);
164 while (size) {
165 ret = dwmci_fifo_ready(host, DWMCI_FIFO_EMPTY, &len);
166 if (ret < 0)
167 break;
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100168
Sam Protsenko02529242024-08-07 22:14:12 -0500169 len = (len >> DWMCI_FIFO_SHIFT) & DWMCI_FIFO_MASK;
170 len = min(size, len);
171 for (i = 0; i < len; i++)
172 *buf++ = dwmci_readl(host, DWMCI_DATA);
173 size = size > len ? (size - len) : 0;
174 }
175 } else if (data->flags == MMC_DATA_WRITE && int_tx) {
176 while (size) {
177 ret = dwmci_fifo_ready(host, DWMCI_FIFO_FULL, &len);
178 if (ret < 0)
179 break;
180
181 len = fifo_depth - ((len >> DWMCI_FIFO_SHIFT) &
182 DWMCI_FIFO_MASK);
183 len = min(size, len);
184 for (i = 0; i < len; i++)
185 dwmci_writel(host, DWMCI_DATA, *buf++);
186 size = size > len ? (size - len) : 0;
187 }
188 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_TXDR);
189 }
190
191 return ret;
192}
193
194static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
195{
196 struct mmc *mmc = host->mmc;
197 int ret = 0;
198 u32 timeout, mask, size;
199 ulong start = get_timer(0);
200
201 size = data->blocksize * data->blocks;
202 timeout = dwmci_get_timeout(mmc, size);
Marek Vasut4e16f0a2019-03-23 03:32:24 +0100203
huang linf382eb82015-11-17 14:20:21 +0800204 for (;;) {
205 mask = dwmci_readl(host, DWMCI_RINTSTS);
206 /* Error during data transfer. */
207 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
208 debug("%s: DATA ERROR!\n", __func__);
209 ret = -EINVAL;
210 break;
211 }
212
Sam Protsenko02529242024-08-07 22:14:12 -0500213 ret = dwmci_data_transfer_fifo(host, data, mask);
huang lina65f51b2015-11-17 14:20:22 +0800214
huang linf382eb82015-11-17 14:20:21 +0800215 /* Data arrived correctly. */
216 if (mask & DWMCI_INTMSK_DTO) {
217 ret = 0;
218 break;
219 }
220
221 /* Check for timeout. */
222 if (get_timer(start) > timeout) {
223 debug("%s: Timeout waiting for data!\n",
224 __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900225 ret = -ETIMEDOUT;
huang linf382eb82015-11-17 14:20:21 +0800226 break;
227 }
228 }
229
230 dwmci_writel(host, DWMCI_RINTSTS, mask);
231
232 return ret;
233}
234
Sam Protsenko95e42a52024-08-07 22:14:13 -0500235static int dwmci_dma_transfer(struct dwmci_host *host, uint flags,
236 struct bounce_buffer *bbstate)
237{
238 int ret;
239 u32 mask, ctrl;
240
241 if (flags == MMC_DATA_READ)
242 mask = DWMCI_IDINTEN_RI;
243 else
244 mask = DWMCI_IDINTEN_TI;
245
246 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
247 mask, true, 1000, false);
248 if (ret)
249 debug("%s: DWMCI_IDINTEN mask 0x%x timeout\n", __func__, mask);
250
251 /* Clear interrupts */
252 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
253
254 ctrl = dwmci_readl(host, DWMCI_CTRL);
255 ctrl &= ~DWMCI_DMA_EN;
256 dwmci_writel(host, DWMCI_CTRL, ctrl);
257
258 bounce_buffer_stop(bbstate);
259 return ret;
260}
261
Jaehoon Chung757bff42012-10-15 19:10:29 +0000262static int dwmci_set_transfer_mode(struct dwmci_host *host,
263 struct mmc_data *data)
264{
265 unsigned long mode;
266
267 mode = DWMCI_CMD_DATA_EXP;
268 if (data->flags & MMC_DATA_WRITE)
269 mode |= DWMCI_CMD_RW;
270
271 return mode;
272}
273
Sam Protsenko2015f242024-08-07 22:14:09 -0500274static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
275{
276 unsigned int timeout = 500; /* msec */
277 ulong start;
278
279 start = get_timer(0);
280 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
281 if (get_timer(start) > timeout) {
282 debug("%s: Timeout on data busy, continue anyway\n",
283 __func__);
284 break;
285 }
286 }
287}
288
Simon Glasse7881d82017-07-29 11:35:31 -0600289#ifdef CONFIG_DM_MMC
Jaehoon Chung56283472016-06-28 15:52:21 +0900290static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
Simon Glass691272f2016-06-12 23:30:23 -0600291 struct mmc_data *data)
292{
293 struct mmc *mmc = mmc_get_mmc_dev(dev);
294#else
Jaehoon Chung757bff42012-10-15 19:10:29 +0000295static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
296 struct mmc_data *data)
297{
Simon Glass691272f2016-06-12 23:30:23 -0600298#endif
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200299 struct dwmci_host *host = mmc->priv;
Mischa Jonker2136d222013-07-26 14:08:14 +0200300 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
Mischa Jonker21bd5762013-07-26 16:18:40 +0200301 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
Marek Vasut9042d972015-07-27 22:39:38 +0200302 int ret = 0, flags = 0, i;
Alexander Graf9b5b8b62016-03-04 01:09:52 +0100303 u32 retry = 100000;
Sam Protsenko95e42a52024-08-07 22:14:13 -0500304 u32 mask;
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400305 struct bounce_buffer bbstate;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000306
Sam Protsenko2015f242024-08-07 22:14:09 -0500307 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000308 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
309
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400310 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800311 if (host->fifo_mode) {
312 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
313 dwmci_writel(host, DWMCI_BYTCNT,
314 data->blocksize * data->blocks);
315 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400316 } else {
huang lina65f51b2015-11-17 14:20:22 +0800317 if (data->flags == MMC_DATA_READ) {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100318 ret = bounce_buffer_start(&bbstate,
319 (void*)data->dest,
huang lina65f51b2015-11-17 14:20:22 +0800320 data->blocksize *
321 data->blocks, GEN_BB_WRITE);
322 } else {
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100323 ret = bounce_buffer_start(&bbstate,
324 (void*)data->src,
huang lina65f51b2015-11-17 14:20:22 +0800325 data->blocksize *
326 data->blocks, GEN_BB_READ);
327 }
Marek Vasut6ad5aec2019-03-23 18:45:27 +0100328
329 if (ret)
330 return ret;
331
huang lina65f51b2015-11-17 14:20:22 +0800332 dwmci_prepare_data(host, data, cur_idmac,
333 bbstate.bounce_buffer);
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400334 }
Alexey Brodkin2a7a2102013-12-26 15:29:07 +0400335 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000336
Jaehoon Chung757bff42012-10-15 19:10:29 +0000337 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
338
339 if (data)
340 flags = dwmci_set_transfer_mode(host, data);
341
342 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keeping66d0b7e2021-12-07 16:09:35 +0000343 return -EBUSY;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000344
345 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
346 flags |= DWMCI_CMD_ABORT_STOP;
347 else
348 flags |= DWMCI_CMD_PRV_DAT_WAIT;
349
350 if (cmd->resp_type & MMC_RSP_PRESENT) {
351 flags |= DWMCI_CMD_RESP_EXP;
352 if (cmd->resp_type & MMC_RSP_136)
353 flags |= DWMCI_CMD_RESP_LENGTH;
354 }
355
356 if (cmd->resp_type & MMC_RSP_CRC)
357 flags |= DWMCI_CMD_CHECK_CRC;
358
359 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
360
361 debug("Sending CMD%d\n",cmd->cmdidx);
362
363 dwmci_writel(host, DWMCI_CMD, flags);
364
365 for (i = 0; i < retry; i++) {
366 mask = dwmci_readl(host, DWMCI_RINTSTS);
367 if (mask & DWMCI_INTMSK_CDONE) {
368 if (!data)
369 dwmci_writel(host, DWMCI_RINTSTS, mask);
370 break;
371 }
372 }
373
Pavel Machekf33c9302014-09-05 12:49:48 +0200374 if (i == retry) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600375 debug("%s: Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900376 return -ETIMEDOUT;
Pavel Machekf33c9302014-09-05 12:49:48 +0200377 }
Jaehoon Chung757bff42012-10-15 19:10:29 +0000378
379 if (mask & DWMCI_INTMSK_RTO) {
Pavel Machekf33c9302014-09-05 12:49:48 +0200380 /*
381 * Timeout here is not necessarily fatal. (e)MMC cards
382 * will splat here when they receive CMD55 as they do
383 * not support this command and that is exactly the way
384 * to tell them apart from SD cards. Thus, this output
385 * below shall be debug(). eMMC cards also do not favor
386 * CMD8, please keep that in mind.
387 */
388 debug("%s: Response Timeout.\n", __func__);
Jaehoon Chung915ffa52016-07-19 16:33:36 +0900389 return -ETIMEDOUT;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000390 } else if (mask & DWMCI_INTMSK_RE) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600391 debug("%s: Response Error.\n", __func__);
392 return -EIO;
Marek Vasut26cc40d2018-11-06 23:42:11 +0100393 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
394 (mask & DWMCI_INTMSK_RCRC)) {
395 debug("%s: Response CRC Error.\n", __func__);
396 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000397 }
398
Jaehoon Chung757bff42012-10-15 19:10:29 +0000399 if (cmd->resp_type & MMC_RSP_PRESENT) {
400 if (cmd->resp_type & MMC_RSP_136) {
401 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
402 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
403 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
404 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
405 } else {
406 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
407 }
408 }
409
410 if (data) {
huang lina65f51b2015-11-17 14:20:22 +0800411 ret = dwmci_data_transfer(host, data);
Sam Protsenko95e42a52024-08-07 22:14:13 -0500412 if (!host->fifo_mode)
413 ret = dwmci_dma_transfer(host, data->flags, &bbstate);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000414 }
415
416 udelay(100);
417
Marek Vasut9042d972015-07-27 22:39:38 +0200418 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000419}
420
Sam Protsenko454fda92024-08-07 22:14:11 -0500421static int dwmci_control_clken(struct dwmci_host *host, bool on)
422{
423 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
424 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
425 int timeout = 10000;
426 u32 status;
427
428 dwmci_writel(host, DWMCI_CLKENA, val);
429
430 /* Inform CIU */
431 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
432 do {
433 status = dwmci_readl(host, DWMCI_CMD);
434 if (timeout-- < 0) {
435 debug("%s: Timeout!\n", __func__);
436 return -ETIMEDOUT;
437 }
438 } while (status & DWMCI_CMD_START);
439
440 return 0;
441}
442
443/*
444 * Update the clock divider.
445 *
446 * To prevent a clock glitch keep the clock stopped during the update of
447 * clock divider and clock source.
448 */
449static int dwmci_update_div(struct dwmci_host *host, u32 div)
450{
451 int ret;
452
453 /* Disable clock */
454 ret = dwmci_control_clken(host, false);
455 if (ret)
456 return ret;
457
458 /* Set clock to desired speed */
459 dwmci_writel(host, DWMCI_CLKDIV, div);
460 dwmci_writel(host, DWMCI_CLKSRC, 0);
461
462 /* Enable clock */
463 return dwmci_control_clken(host, true);
464}
465
Jaehoon Chung757bff42012-10-15 19:10:29 +0000466static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
467{
Sam Protsenko454fda92024-08-07 22:14:11 -0500468 u32 div;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000469 unsigned long sclk;
Sam Protsenko454fda92024-08-07 22:14:11 -0500470 int ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000471
Amar9c50e352013-04-27 11:42:54 +0530472 if ((freq == host->clock) || (freq == 0))
Jaehoon Chung757bff42012-10-15 19:10:29 +0000473 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000474 /*
Pavel Machekf33c9302014-09-05 12:49:48 +0200475 * If host->get_mmc_clk isn't defined,
Jaehoon Chung757bff42012-10-15 19:10:29 +0000476 * then assume that host->bus_hz is source clock value.
Pavel Machekf33c9302014-09-05 12:49:48 +0200477 * host->bus_hz should be set by user.
Jaehoon Chung757bff42012-10-15 19:10:29 +0000478 */
Jaehoon Chungb44fe832013-10-06 18:59:31 +0900479 if (host->get_mmc_clk)
Simon Glasse3563f22015-08-30 16:55:15 -0600480 sclk = host->get_mmc_clk(host, freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000481 else if (host->bus_hz)
482 sclk = host->bus_hz;
483 else {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600484 debug("%s: Didn't get source clock value.\n", __func__);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000485 return -EINVAL;
486 }
487
Chin Liang See6ace1532014-06-10 01:26:52 -0500488 if (sclk == freq)
489 div = 0; /* bypass mode */
490 else
491 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000492
Sam Protsenko454fda92024-08-07 22:14:11 -0500493 ret = dwmci_update_div(host, div);
494 if (ret)
495 return ret;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000496
497 host->clock = freq;
498
499 return 0;
500}
501
Simon Glasse7881d82017-07-29 11:35:31 -0600502#ifdef CONFIG_DM_MMC
Jaehoon Chung56283472016-06-28 15:52:21 +0900503static int dwmci_set_ios(struct udevice *dev)
Simon Glass691272f2016-06-12 23:30:23 -0600504{
505 struct mmc *mmc = mmc_get_mmc_dev(dev);
506#else
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900507static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung757bff42012-10-15 19:10:29 +0000508{
Simon Glass691272f2016-06-12 23:30:23 -0600509#endif
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900510 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
511 u32 ctype, regs;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000512
Pavel Machekf33c9302014-09-05 12:49:48 +0200513 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000514
515 dwmci_setup_bus(host, mmc->clock);
516 switch (mmc->bus_width) {
517 case 8:
518 ctype = DWMCI_CTYPE_8BIT;
519 break;
520 case 4:
521 ctype = DWMCI_CTYPE_4BIT;
522 break;
523 default:
524 ctype = DWMCI_CTYPE_1BIT;
525 break;
526 }
527
528 dwmci_writel(host, DWMCI_CTYPE, ctype);
529
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900530 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov2b8a9692014-12-01 06:59:12 -0600531 if (mmc->ddr_mode)
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900532 regs |= DWMCI_DDR_MODE;
533 else
Jaehoon Chungafc9e2b2015-01-14 17:37:53 +0900534 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chung045bdcd2014-05-16 13:59:55 +0900535
536 dwmci_writel(host, DWMCI_UHS_REG, regs);
537
Siew Chin Limd456dfb2020-12-24 18:21:03 +0800538 if (host->clksel) {
539 int ret;
540
541 ret = host->clksel(host);
542 if (ret)
543 return ret;
544 }
Jaehoon Chung07b0b9c2016-12-30 15:30:16 +0900545
Urja Rannikko2b157012019-05-13 13:25:27 +0000546#if CONFIG_IS_ENABLED(DM_REGULATOR)
547 if (mmc->vqmmc_supply) {
548 int ret;
549
Jonas Karlman01b29172023-07-19 21:21:00 +0000550 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
551 if (ret)
552 return ret;
553
Urja Rannikko2b157012019-05-13 13:25:27 +0000554 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
555 regulator_set_value(mmc->vqmmc_supply, 1800000);
556 else
557 regulator_set_value(mmc->vqmmc_supply, 3300000);
558
559 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
560 if (ret)
561 return ret;
562 }
563#endif
564
Simon Glass691272f2016-06-12 23:30:23 -0600565 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000566}
567
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500568static void dwmci_init_fifo(struct dwmci_host *host)
569{
570 if (!host->fifoth_val) {
571 u32 fifo_size;
572
573 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
574 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
575 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
576 TX_WMARK(fifo_size / 2);
577 }
578
579 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
580}
581
Jaehoon Chung757bff42012-10-15 19:10:29 +0000582static int dwmci_init(struct mmc *mmc)
583{
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200584 struct dwmci_host *host = mmc->priv;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000585
Jaehoon Chung18ab6752013-11-29 20:08:57 +0900586 if (host->board_init)
587 host->board_init(host);
Rajeshwari Shinde6f0b7ca2013-10-29 12:53:13 +0530588
Jaehoon Chung757bff42012-10-15 19:10:29 +0000589 dwmci_writel(host, DWMCI_PWREN, 1);
590
591 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass1c87ffe2015-08-06 20:16:27 -0600592 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
593 return -EIO;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000594 }
595
Amar9c50e352013-04-27 11:42:54 +0530596 /* Enumerate at 400KHz */
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200597 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar9c50e352013-04-27 11:42:54 +0530598
Jaehoon Chung757bff42012-10-15 19:10:29 +0000599 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
600 dwmci_writel(host, DWMCI_INTMASK, 0);
601
602 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
603
604 dwmci_writel(host, DWMCI_IDINTEN, 0);
605 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko1db6dd12024-08-07 22:14:10 -0500606 dwmci_init_fifo(host);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000607
608 dwmci_writel(host, DWMCI_CLKENA, 0);
609 dwmci_writel(host, DWMCI_CLKSRC, 0);
610
Ley Foon Tan79975992018-12-20 17:55:41 +0800611 if (!host->fifo_mode)
612 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
613
Jaehoon Chung757bff42012-10-15 19:10:29 +0000614 return 0;
615}
616
Simon Glasse7881d82017-07-29 11:35:31 -0600617#ifdef CONFIG_DM_MMC
Simon Glass691272f2016-06-12 23:30:23 -0600618int dwmci_probe(struct udevice *dev)
619{
620 struct mmc *mmc = mmc_get_mmc_dev(dev);
621
622 return dwmci_init(mmc);
623}
624
625const struct dm_mmc_ops dm_dwmci_ops = {
626 .send_cmd = dwmci_send_cmd,
627 .set_ios = dwmci_set_ios,
628};
629
630#else
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200631static const struct mmc_ops dwmci_ops = {
632 .send_cmd = dwmci_send_cmd,
633 .set_ios = dwmci_set_ios,
634 .init = dwmci_init,
635};
Simon Glass691272f2016-06-12 23:30:23 -0600636#endif
Pantelis Antoniouab769f22014-02-26 19:28:45 +0200637
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900638void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
639 u32 max_clk, u32 min_clk)
Simon Glass5e6ff812016-05-14 14:03:07 -0600640{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900641 cfg->name = host->name;
Simon Glasse7881d82017-07-29 11:35:31 -0600642#ifndef CONFIG_DM_MMC
Simon Glass5e6ff812016-05-14 14:03:07 -0600643 cfg->ops = &dwmci_ops;
Simon Glass691272f2016-06-12 23:30:23 -0600644#endif
Simon Glass5e6ff812016-05-14 14:03:07 -0600645 cfg->f_min = min_clk;
646 cfg->f_max = max_clk;
647
648 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
649
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900650 cfg->host_caps = host->caps;
Simon Glass5e6ff812016-05-14 14:03:07 -0600651
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900652 if (host->buswidth == 8) {
Simon Glass5e6ff812016-05-14 14:03:07 -0600653 cfg->host_caps |= MMC_MODE_8BIT;
654 cfg->host_caps &= ~MMC_MODE_4BIT;
655 } else {
656 cfg->host_caps |= MMC_MODE_4BIT;
657 cfg->host_caps &= ~MMC_MODE_8BIT;
658 }
659 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
660
661 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
662}
663
664#ifdef CONFIG_BLK
665int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
666{
667 return mmc_bind(dev, mmc, cfg);
668}
669#else
Jaehoon Chung757bff42012-10-15 19:10:29 +0000670int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
671{
Jaehoon Chunge5113c32016-09-23 19:13:16 +0900672 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung757bff42012-10-15 19:10:29 +0000673
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200674 host->mmc = mmc_create(&host->cfg, host);
675 if (host->mmc == NULL)
676 return -1;
677
678 return 0;
Jaehoon Chung757bff42012-10-15 19:10:29 +0000679}
Simon Glass5e6ff812016-05-14 14:03:07 -0600680#endif