blob: 6a4453ca02e76e9d8db2fadab59aff83c24b8ac2 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Paul Burtonda61fa52013-09-09 15:30:26 +01002/*
3 * Copyright 2008, Freescale Semiconductor, Inc
4 * Andy Fleming
5 *
6 * Based vaguely on the Linux code
Paul Burtonda61fa52013-09-09 15:30:26 +01007 */
8
9#include <config.h>
10#include <common.h>
Simon Glasse6f6f9e2020-05-10 11:39:58 -060011#include <blk.h>
Simon Glass33fb2112016-05-01 13:52:41 -060012#include <dm.h>
Paul Burtonda61fa52013-09-09 15:30:26 +010013#include <part.h>
Tom Rini180f87f2015-06-11 20:53:31 -040014#include <div64.h>
15#include <linux/math64.h>
Paul Burtonda61fa52013-09-09 15:30:26 +010016#include "mmc_private.h"
17
18static ulong mmc_erase_t(struct mmc *mmc, ulong start, lbaint_t blkcnt)
19{
20 struct mmc_cmd cmd;
21 ulong end;
22 int err, start_cmd, end_cmd;
23
24 if (mmc->high_capacity) {
25 end = start + blkcnt - 1;
26 } else {
27 end = (start + blkcnt - 1) * mmc->write_bl_len;
28 start *= mmc->write_bl_len;
29 }
30
31 if (IS_SD(mmc)) {
32 start_cmd = SD_CMD_ERASE_WR_BLK_START;
33 end_cmd = SD_CMD_ERASE_WR_BLK_END;
34 } else {
35 start_cmd = MMC_CMD_ERASE_GROUP_START;
36 end_cmd = MMC_CMD_ERASE_GROUP_END;
37 }
38
39 cmd.cmdidx = start_cmd;
40 cmd.cmdarg = start;
41 cmd.resp_type = MMC_RSP_R1;
42
43 err = mmc_send_cmd(mmc, &cmd, NULL);
44 if (err)
45 goto err_out;
46
47 cmd.cmdidx = end_cmd;
48 cmd.cmdarg = end;
49
50 err = mmc_send_cmd(mmc, &cmd, NULL);
51 if (err)
52 goto err_out;
53
54 cmd.cmdidx = MMC_CMD_ERASE;
Eric Nelson1aa2d072015-12-07 07:50:01 -070055 cmd.cmdarg = MMC_ERASE_ARG;
Paul Burtonda61fa52013-09-09 15:30:26 +010056 cmd.resp_type = MMC_RSP_R1b;
57
58 err = mmc_send_cmd(mmc, &cmd, NULL);
59 if (err)
60 goto err_out;
61
62 return 0;
63
64err_out:
65 puts("mmc erase failed\n");
66 return err;
67}
68
Ezequiel Garcia2a4bb3d2019-01-07 18:13:24 -030069#if CONFIG_IS_ENABLED(BLK)
Simon Glass561e6242016-10-01 14:43:17 -060070ulong mmc_berase(struct udevice *dev, lbaint_t start, lbaint_t blkcnt)
71#else
72ulong mmc_berase(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt)
73#endif
Paul Burtonda61fa52013-09-09 15:30:26 +010074{
Ezequiel Garcia2a4bb3d2019-01-07 18:13:24 -030075#if CONFIG_IS_ENABLED(BLK)
Simon Glass561e6242016-10-01 14:43:17 -060076 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
77#endif
Simon Glassbcce53d2016-02-29 15:25:51 -070078 int dev_num = block_dev->devnum;
Paul Burtonda61fa52013-09-09 15:30:26 +010079 int err = 0;
Tom Rini180f87f2015-06-11 20:53:31 -040080 u32 start_rem, blkcnt_rem;
Paul Burtonda61fa52013-09-09 15:30:26 +010081 struct mmc *mmc = find_mmc_device(dev_num);
82 lbaint_t blk = 0, blk_r = 0;
Sam Protsenko6cf8a902019-08-14 22:52:51 +030083 int timeout_ms = 1000;
Paul Burtonda61fa52013-09-09 15:30:26 +010084
85 if (!mmc)
86 return -1;
87
Simon Glass69f45cd2016-05-01 13:52:29 -060088 err = blk_select_hwpart_devnum(IF_TYPE_MMC, dev_num,
89 block_dev->hwpart);
Stephen Warren873cc1d2015-12-07 11:38:49 -070090 if (err < 0)
91 return -1;
92
Tom Rini180f87f2015-06-11 20:53:31 -040093 /*
94 * We want to see if the requested start or total block count are
95 * unaligned. We discard the whole numbers and only care about the
96 * remainder.
97 */
98 err = div_u64_rem(start, mmc->erase_grp_size, &start_rem);
99 err = div_u64_rem(blkcnt, mmc->erase_grp_size, &blkcnt_rem);
100 if (start_rem || blkcnt_rem)
Paul Burtonda61fa52013-09-09 15:30:26 +0100101 printf("\n\nCaution! Your devices Erase group is 0x%x\n"
102 "The erase range would be change to "
103 "0x" LBAF "~0x" LBAF "\n\n",
104 mmc->erase_grp_size, start & ~(mmc->erase_grp_size - 1),
105 ((start + blkcnt + mmc->erase_grp_size)
106 & ~(mmc->erase_grp_size - 1)) - 1);
107
108 while (blk < blkcnt) {
Peng Fane492dbb2016-09-01 11:13:39 +0800109 if (IS_SD(mmc) && mmc->ssr.au) {
110 blk_r = ((blkcnt - blk) > mmc->ssr.au) ?
111 mmc->ssr.au : (blkcnt - blk);
112 } else {
113 blk_r = ((blkcnt - blk) > mmc->erase_grp_size) ?
114 mmc->erase_grp_size : (blkcnt - blk);
115 }
Paul Burtonda61fa52013-09-09 15:30:26 +0100116 err = mmc_erase_t(mmc, start + blk, blk_r);
117 if (err)
118 break;
119
120 blk += blk_r;
121
122 /* Waiting for the ready status */
Sam Protsenko6cf8a902019-08-14 22:52:51 +0300123 if (mmc_poll_for_busy(mmc, timeout_ms))
Paul Burtonda61fa52013-09-09 15:30:26 +0100124 return 0;
125 }
126
127 return blk;
128}
129
130static ulong mmc_write_blocks(struct mmc *mmc, lbaint_t start,
131 lbaint_t blkcnt, const void *src)
132{
133 struct mmc_cmd cmd;
134 struct mmc_data data;
Sam Protsenko6cf8a902019-08-14 22:52:51 +0300135 int timeout_ms = 1000;
Paul Burtonda61fa52013-09-09 15:30:26 +0100136
Simon Glassc40fdca2016-05-01 13:52:35 -0600137 if ((start + blkcnt) > mmc_get_blk_desc(mmc)->lba) {
Paul Burtonda61fa52013-09-09 15:30:26 +0100138 printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
Simon Glassc40fdca2016-05-01 13:52:35 -0600139 start + blkcnt, mmc_get_blk_desc(mmc)->lba);
Paul Burtonda61fa52013-09-09 15:30:26 +0100140 return 0;
141 }
142
143 if (blkcnt == 0)
144 return 0;
145 else if (blkcnt == 1)
146 cmd.cmdidx = MMC_CMD_WRITE_SINGLE_BLOCK;
147 else
148 cmd.cmdidx = MMC_CMD_WRITE_MULTIPLE_BLOCK;
149
150 if (mmc->high_capacity)
151 cmd.cmdarg = start;
152 else
153 cmd.cmdarg = start * mmc->write_bl_len;
154
155 cmd.resp_type = MMC_RSP_R1;
156
157 data.src = src;
158 data.blocks = blkcnt;
159 data.blocksize = mmc->write_bl_len;
160 data.flags = MMC_DATA_WRITE;
161
162 if (mmc_send_cmd(mmc, &cmd, &data)) {
163 printf("mmc write failed\n");
164 return 0;
165 }
166
167 /* SPI multiblock writes terminate using a special
168 * token, not a STOP_TRANSMISSION request.
169 */
170 if (!mmc_host_is_spi(mmc) && blkcnt > 1) {
171 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
172 cmd.cmdarg = 0;
173 cmd.resp_type = MMC_RSP_R1b;
174 if (mmc_send_cmd(mmc, &cmd, NULL)) {
175 printf("mmc fail to send stop cmd\n");
176 return 0;
177 }
178 }
179
180 /* Waiting for the ready status */
Sam Protsenko6cf8a902019-08-14 22:52:51 +0300181 if (mmc_poll_for_busy(mmc, timeout_ms))
Paul Burtonda61fa52013-09-09 15:30:26 +0100182 return 0;
183
184 return blkcnt;
185}
186
Ezequiel Garcia2a4bb3d2019-01-07 18:13:24 -0300187#if CONFIG_IS_ENABLED(BLK)
Simon Glass33fb2112016-05-01 13:52:41 -0600188ulong mmc_bwrite(struct udevice *dev, lbaint_t start, lbaint_t blkcnt,
189 const void *src)
190#else
Simon Glass4101f682016-02-29 15:25:34 -0700191ulong mmc_bwrite(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
Stephen Warren7c4213f2015-12-07 11:38:48 -0700192 const void *src)
Simon Glass33fb2112016-05-01 13:52:41 -0600193#endif
Paul Burtonda61fa52013-09-09 15:30:26 +0100194{
Ezequiel Garcia2a4bb3d2019-01-07 18:13:24 -0300195#if CONFIG_IS_ENABLED(BLK)
Simon Glass33fb2112016-05-01 13:52:41 -0600196 struct blk_desc *block_dev = dev_get_uclass_platdata(dev);
197#endif
Simon Glassbcce53d2016-02-29 15:25:51 -0700198 int dev_num = block_dev->devnum;
Paul Burtonda61fa52013-09-09 15:30:26 +0100199 lbaint_t cur, blocks_todo = blkcnt;
Stephen Warren873cc1d2015-12-07 11:38:49 -0700200 int err;
Paul Burtonda61fa52013-09-09 15:30:26 +0100201
202 struct mmc *mmc = find_mmc_device(dev_num);
203 if (!mmc)
204 return 0;
205
Simon Glass69f45cd2016-05-01 13:52:29 -0600206 err = blk_select_hwpart_devnum(IF_TYPE_MMC, dev_num, block_dev->hwpart);
Stephen Warren873cc1d2015-12-07 11:38:49 -0700207 if (err < 0)
208 return 0;
209
Paul Burtonda61fa52013-09-09 15:30:26 +0100210 if (mmc_set_blocklen(mmc, mmc->write_bl_len))
211 return 0;
212
213 do {
Pantelis Antoniou93bfd612014-03-11 19:34:20 +0200214 cur = (blocks_todo > mmc->cfg->b_max) ?
215 mmc->cfg->b_max : blocks_todo;
Paul Burtonda61fa52013-09-09 15:30:26 +0100216 if (mmc_write_blocks(mmc, start, cur, src) != cur)
217 return 0;
218 blocks_todo -= cur;
219 start += cur;
220 src += cur * mmc->write_bl_len;
221 } while (blocks_todo > 0);
222
223 return blkcnt;
224}