Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2008, Freescale Semiconductor, Inc |
| 4 | * Andy Fleming |
| 5 | * |
| 6 | * Based vaguely on the Linux code |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <config.h> |
| 10 | #include <common.h> |
Simon Glass | 33fb211 | 2016-05-01 13:52:41 -0600 | [diff] [blame] | 11 | #include <dm.h> |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 12 | #include <part.h> |
Tom Rini | 180f87f | 2015-06-11 20:53:31 -0400 | [diff] [blame] | 13 | #include <div64.h> |
| 14 | #include <linux/math64.h> |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 15 | #include "mmc_private.h" |
| 16 | |
| 17 | static ulong mmc_erase_t(struct mmc *mmc, ulong start, lbaint_t blkcnt) |
| 18 | { |
| 19 | struct mmc_cmd cmd; |
| 20 | ulong end; |
| 21 | int err, start_cmd, end_cmd; |
| 22 | |
| 23 | if (mmc->high_capacity) { |
| 24 | end = start + blkcnt - 1; |
| 25 | } else { |
| 26 | end = (start + blkcnt - 1) * mmc->write_bl_len; |
| 27 | start *= mmc->write_bl_len; |
| 28 | } |
| 29 | |
| 30 | if (IS_SD(mmc)) { |
| 31 | start_cmd = SD_CMD_ERASE_WR_BLK_START; |
| 32 | end_cmd = SD_CMD_ERASE_WR_BLK_END; |
| 33 | } else { |
| 34 | start_cmd = MMC_CMD_ERASE_GROUP_START; |
| 35 | end_cmd = MMC_CMD_ERASE_GROUP_END; |
| 36 | } |
| 37 | |
| 38 | cmd.cmdidx = start_cmd; |
| 39 | cmd.cmdarg = start; |
| 40 | cmd.resp_type = MMC_RSP_R1; |
| 41 | |
| 42 | err = mmc_send_cmd(mmc, &cmd, NULL); |
| 43 | if (err) |
| 44 | goto err_out; |
| 45 | |
| 46 | cmd.cmdidx = end_cmd; |
| 47 | cmd.cmdarg = end; |
| 48 | |
| 49 | err = mmc_send_cmd(mmc, &cmd, NULL); |
| 50 | if (err) |
| 51 | goto err_out; |
| 52 | |
| 53 | cmd.cmdidx = MMC_CMD_ERASE; |
Eric Nelson | 1aa2d07 | 2015-12-07 07:50:01 -0700 | [diff] [blame] | 54 | cmd.cmdarg = MMC_ERASE_ARG; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 55 | cmd.resp_type = MMC_RSP_R1b; |
| 56 | |
| 57 | err = mmc_send_cmd(mmc, &cmd, NULL); |
| 58 | if (err) |
| 59 | goto err_out; |
| 60 | |
| 61 | return 0; |
| 62 | |
| 63 | err_out: |
| 64 | puts("mmc erase failed\n"); |
| 65 | return err; |
| 66 | } |
| 67 | |
Ezequiel Garcia | 2a4bb3d | 2019-01-07 18:13:24 -0300 | [diff] [blame] | 68 | #if CONFIG_IS_ENABLED(BLK) |
Simon Glass | 561e624 | 2016-10-01 14:43:17 -0600 | [diff] [blame] | 69 | ulong mmc_berase(struct udevice *dev, lbaint_t start, lbaint_t blkcnt) |
| 70 | #else |
| 71 | ulong mmc_berase(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt) |
| 72 | #endif |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 73 | { |
Ezequiel Garcia | 2a4bb3d | 2019-01-07 18:13:24 -0300 | [diff] [blame] | 74 | #if CONFIG_IS_ENABLED(BLK) |
Simon Glass | 561e624 | 2016-10-01 14:43:17 -0600 | [diff] [blame] | 75 | struct blk_desc *block_dev = dev_get_uclass_platdata(dev); |
| 76 | #endif |
Simon Glass | bcce53d | 2016-02-29 15:25:51 -0700 | [diff] [blame] | 77 | int dev_num = block_dev->devnum; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 78 | int err = 0; |
Tom Rini | 180f87f | 2015-06-11 20:53:31 -0400 | [diff] [blame] | 79 | u32 start_rem, blkcnt_rem; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 80 | struct mmc *mmc = find_mmc_device(dev_num); |
| 81 | lbaint_t blk = 0, blk_r = 0; |
| 82 | int timeout = 1000; |
| 83 | |
| 84 | if (!mmc) |
| 85 | return -1; |
| 86 | |
Simon Glass | 69f45cd | 2016-05-01 13:52:29 -0600 | [diff] [blame] | 87 | err = blk_select_hwpart_devnum(IF_TYPE_MMC, dev_num, |
| 88 | block_dev->hwpart); |
Stephen Warren | 873cc1d | 2015-12-07 11:38:49 -0700 | [diff] [blame] | 89 | if (err < 0) |
| 90 | return -1; |
| 91 | |
Tom Rini | 180f87f | 2015-06-11 20:53:31 -0400 | [diff] [blame] | 92 | /* |
| 93 | * We want to see if the requested start or total block count are |
| 94 | * unaligned. We discard the whole numbers and only care about the |
| 95 | * remainder. |
| 96 | */ |
| 97 | err = div_u64_rem(start, mmc->erase_grp_size, &start_rem); |
| 98 | err = div_u64_rem(blkcnt, mmc->erase_grp_size, &blkcnt_rem); |
| 99 | if (start_rem || blkcnt_rem) |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 100 | printf("\n\nCaution! Your devices Erase group is 0x%x\n" |
| 101 | "The erase range would be change to " |
| 102 | "0x" LBAF "~0x" LBAF "\n\n", |
| 103 | mmc->erase_grp_size, start & ~(mmc->erase_grp_size - 1), |
| 104 | ((start + blkcnt + mmc->erase_grp_size) |
| 105 | & ~(mmc->erase_grp_size - 1)) - 1); |
| 106 | |
| 107 | while (blk < blkcnt) { |
Peng Fan | e492dbb | 2016-09-01 11:13:39 +0800 | [diff] [blame] | 108 | if (IS_SD(mmc) && mmc->ssr.au) { |
| 109 | blk_r = ((blkcnt - blk) > mmc->ssr.au) ? |
| 110 | mmc->ssr.au : (blkcnt - blk); |
| 111 | } else { |
| 112 | blk_r = ((blkcnt - blk) > mmc->erase_grp_size) ? |
| 113 | mmc->erase_grp_size : (blkcnt - blk); |
| 114 | } |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 115 | err = mmc_erase_t(mmc, start + blk, blk_r); |
| 116 | if (err) |
| 117 | break; |
| 118 | |
| 119 | blk += blk_r; |
| 120 | |
| 121 | /* Waiting for the ready status */ |
Jean-Jacques Hiblot | 863d100 | 2019-07-02 10:53:52 +0200 | [diff] [blame] | 122 | if (mmc_poll_for_busy(mmc, timeout)) |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 123 | return 0; |
| 124 | } |
| 125 | |
| 126 | return blk; |
| 127 | } |
| 128 | |
| 129 | static ulong mmc_write_blocks(struct mmc *mmc, lbaint_t start, |
| 130 | lbaint_t blkcnt, const void *src) |
| 131 | { |
| 132 | struct mmc_cmd cmd; |
| 133 | struct mmc_data data; |
| 134 | int timeout = 1000; |
| 135 | |
Simon Glass | c40fdca | 2016-05-01 13:52:35 -0600 | [diff] [blame] | 136 | if ((start + blkcnt) > mmc_get_blk_desc(mmc)->lba) { |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 137 | printf("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n", |
Simon Glass | c40fdca | 2016-05-01 13:52:35 -0600 | [diff] [blame] | 138 | start + blkcnt, mmc_get_blk_desc(mmc)->lba); |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | if (blkcnt == 0) |
| 143 | return 0; |
| 144 | else if (blkcnt == 1) |
| 145 | cmd.cmdidx = MMC_CMD_WRITE_SINGLE_BLOCK; |
| 146 | else |
| 147 | cmd.cmdidx = MMC_CMD_WRITE_MULTIPLE_BLOCK; |
| 148 | |
| 149 | if (mmc->high_capacity) |
| 150 | cmd.cmdarg = start; |
| 151 | else |
| 152 | cmd.cmdarg = start * mmc->write_bl_len; |
| 153 | |
| 154 | cmd.resp_type = MMC_RSP_R1; |
| 155 | |
| 156 | data.src = src; |
| 157 | data.blocks = blkcnt; |
| 158 | data.blocksize = mmc->write_bl_len; |
| 159 | data.flags = MMC_DATA_WRITE; |
| 160 | |
| 161 | if (mmc_send_cmd(mmc, &cmd, &data)) { |
| 162 | printf("mmc write failed\n"); |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | /* SPI multiblock writes terminate using a special |
| 167 | * token, not a STOP_TRANSMISSION request. |
| 168 | */ |
| 169 | if (!mmc_host_is_spi(mmc) && blkcnt > 1) { |
| 170 | cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION; |
| 171 | cmd.cmdarg = 0; |
| 172 | cmd.resp_type = MMC_RSP_R1b; |
| 173 | if (mmc_send_cmd(mmc, &cmd, NULL)) { |
| 174 | printf("mmc fail to send stop cmd\n"); |
| 175 | return 0; |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | /* Waiting for the ready status */ |
Jean-Jacques Hiblot | 863d100 | 2019-07-02 10:53:52 +0200 | [diff] [blame] | 180 | if (mmc_poll_for_busy(mmc, timeout)) |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 181 | return 0; |
| 182 | |
| 183 | return blkcnt; |
| 184 | } |
| 185 | |
Ezequiel Garcia | 2a4bb3d | 2019-01-07 18:13:24 -0300 | [diff] [blame] | 186 | #if CONFIG_IS_ENABLED(BLK) |
Simon Glass | 33fb211 | 2016-05-01 13:52:41 -0600 | [diff] [blame] | 187 | ulong mmc_bwrite(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, |
| 188 | const void *src) |
| 189 | #else |
Simon Glass | 4101f68 | 2016-02-29 15:25:34 -0700 | [diff] [blame] | 190 | ulong mmc_bwrite(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt, |
Stephen Warren | 7c4213f | 2015-12-07 11:38:48 -0700 | [diff] [blame] | 191 | const void *src) |
Simon Glass | 33fb211 | 2016-05-01 13:52:41 -0600 | [diff] [blame] | 192 | #endif |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 193 | { |
Ezequiel Garcia | 2a4bb3d | 2019-01-07 18:13:24 -0300 | [diff] [blame] | 194 | #if CONFIG_IS_ENABLED(BLK) |
Simon Glass | 33fb211 | 2016-05-01 13:52:41 -0600 | [diff] [blame] | 195 | struct blk_desc *block_dev = dev_get_uclass_platdata(dev); |
| 196 | #endif |
Simon Glass | bcce53d | 2016-02-29 15:25:51 -0700 | [diff] [blame] | 197 | int dev_num = block_dev->devnum; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 198 | lbaint_t cur, blocks_todo = blkcnt; |
Stephen Warren | 873cc1d | 2015-12-07 11:38:49 -0700 | [diff] [blame] | 199 | int err; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 200 | |
| 201 | struct mmc *mmc = find_mmc_device(dev_num); |
| 202 | if (!mmc) |
| 203 | return 0; |
| 204 | |
Simon Glass | 69f45cd | 2016-05-01 13:52:29 -0600 | [diff] [blame] | 205 | err = blk_select_hwpart_devnum(IF_TYPE_MMC, dev_num, block_dev->hwpart); |
Stephen Warren | 873cc1d | 2015-12-07 11:38:49 -0700 | [diff] [blame] | 206 | if (err < 0) |
| 207 | return 0; |
| 208 | |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 209 | if (mmc_set_blocklen(mmc, mmc->write_bl_len)) |
| 210 | return 0; |
| 211 | |
| 212 | do { |
Pantelis Antoniou | 93bfd61 | 2014-03-11 19:34:20 +0200 | [diff] [blame] | 213 | cur = (blocks_todo > mmc->cfg->b_max) ? |
| 214 | mmc->cfg->b_max : blocks_todo; |
Paul Burton | da61fa5 | 2013-09-09 15:30:26 +0100 | [diff] [blame] | 215 | if (mmc_write_blocks(mmc, start, cur, src) != cur) |
| 216 | return 0; |
| 217 | blocks_todo -= cur; |
| 218 | start += cur; |
| 219 | src += cur * mmc->write_bl_len; |
| 220 | } while (blocks_todo > 0); |
| 221 | |
| 222 | return blkcnt; |
| 223 | } |