Huang Jianan | 830613f | 2022-02-26 15:05:47 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | #include "internal.h" |
Huang Jianan | 65cb730 | 2022-02-26 15:05:49 +0800 | [diff] [blame] | 3 | #include "decompress.h" |
Huang Jianan | 830613f | 2022-02-26 15:05:47 +0800 | [diff] [blame] | 4 | |
| 5 | static int erofs_map_blocks_flatmode(struct erofs_inode *inode, |
| 6 | struct erofs_map_blocks *map, |
| 7 | int flags) |
| 8 | { |
| 9 | int err = 0; |
| 10 | erofs_blk_t nblocks, lastblk; |
| 11 | u64 offset = map->m_la; |
| 12 | struct erofs_inode *vi = inode; |
| 13 | bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); |
| 14 | |
| 15 | nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); |
| 16 | lastblk = nblocks - tailendpacking; |
| 17 | |
| 18 | /* there is no hole in flatmode */ |
| 19 | map->m_flags = EROFS_MAP_MAPPED; |
| 20 | |
| 21 | if (offset < blknr_to_addr(lastblk)) { |
| 22 | map->m_pa = blknr_to_addr(vi->u.i_blkaddr) + map->m_la; |
| 23 | map->m_plen = blknr_to_addr(lastblk) - offset; |
| 24 | } else if (tailendpacking) { |
| 25 | /* 2 - inode inline B: inode, [xattrs], inline last blk... */ |
| 26 | map->m_pa = iloc(vi->nid) + vi->inode_isize + |
| 27 | vi->xattr_isize + erofs_blkoff(map->m_la); |
| 28 | map->m_plen = inode->i_size - offset; |
| 29 | |
| 30 | /* inline data should be located in one meta block */ |
| 31 | if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { |
| 32 | erofs_err("inline data cross block boundary @ nid %" PRIu64, |
| 33 | vi->nid); |
| 34 | DBG_BUGON(1); |
| 35 | err = -EFSCORRUPTED; |
| 36 | goto err_out; |
| 37 | } |
| 38 | |
| 39 | map->m_flags |= EROFS_MAP_META; |
| 40 | } else { |
| 41 | erofs_err("internal error @ nid: %" PRIu64 " (size %llu), m_la 0x%" PRIx64, |
| 42 | vi->nid, (unsigned long long)inode->i_size, map->m_la); |
| 43 | DBG_BUGON(1); |
| 44 | err = -EIO; |
| 45 | goto err_out; |
| 46 | } |
| 47 | |
| 48 | map->m_llen = map->m_plen; |
| 49 | err_out: |
| 50 | return err; |
| 51 | } |
| 52 | |
| 53 | int erofs_map_blocks(struct erofs_inode *inode, |
| 54 | struct erofs_map_blocks *map, int flags) |
| 55 | { |
| 56 | struct erofs_inode *vi = inode; |
| 57 | struct erofs_inode_chunk_index *idx; |
| 58 | u8 buf[EROFS_BLKSIZ]; |
| 59 | u64 chunknr; |
| 60 | unsigned int unit; |
| 61 | erofs_off_t pos; |
| 62 | int err = 0; |
| 63 | |
| 64 | map->m_deviceid = 0; |
| 65 | if (map->m_la >= inode->i_size) { |
| 66 | /* leave out-of-bound access unmapped */ |
| 67 | map->m_flags = 0; |
| 68 | map->m_plen = 0; |
| 69 | goto out; |
| 70 | } |
| 71 | |
| 72 | if (vi->datalayout != EROFS_INODE_CHUNK_BASED) |
| 73 | return erofs_map_blocks_flatmode(inode, map, flags); |
| 74 | |
| 75 | if (vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES) |
| 76 | unit = sizeof(*idx); /* chunk index */ |
| 77 | else |
| 78 | unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ |
| 79 | |
| 80 | chunknr = map->m_la >> vi->u.chunkbits; |
| 81 | pos = roundup(iloc(vi->nid) + vi->inode_isize + |
| 82 | vi->xattr_isize, unit) + unit * chunknr; |
| 83 | |
| 84 | err = erofs_blk_read(buf, erofs_blknr(pos), 1); |
| 85 | if (err < 0) |
| 86 | return -EIO; |
| 87 | |
| 88 | map->m_la = chunknr << vi->u.chunkbits; |
| 89 | map->m_plen = min_t(erofs_off_t, 1UL << vi->u.chunkbits, |
| 90 | roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); |
| 91 | |
| 92 | /* handle block map */ |
| 93 | if (!(vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { |
| 94 | __le32 *blkaddr = (void *)buf + erofs_blkoff(pos); |
| 95 | |
| 96 | if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { |
| 97 | map->m_flags = 0; |
| 98 | } else { |
| 99 | map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr)); |
| 100 | map->m_flags = EROFS_MAP_MAPPED; |
| 101 | } |
| 102 | goto out; |
| 103 | } |
| 104 | /* parse chunk indexes */ |
| 105 | idx = (void *)buf + erofs_blkoff(pos); |
| 106 | switch (le32_to_cpu(idx->blkaddr)) { |
| 107 | case EROFS_NULL_ADDR: |
| 108 | map->m_flags = 0; |
| 109 | break; |
| 110 | default: |
| 111 | map->m_deviceid = le16_to_cpu(idx->device_id) & |
| 112 | sbi.device_id_mask; |
| 113 | map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr)); |
| 114 | map->m_flags = EROFS_MAP_MAPPED; |
| 115 | break; |
| 116 | } |
| 117 | out: |
| 118 | map->m_llen = map->m_plen; |
| 119 | return err; |
| 120 | } |
| 121 | |
| 122 | int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map) |
| 123 | { |
| 124 | struct erofs_device_info *dif; |
| 125 | int id; |
| 126 | |
| 127 | if (map->m_deviceid) { |
| 128 | if (sbi->extra_devices < map->m_deviceid) |
| 129 | return -ENODEV; |
| 130 | } else if (sbi->extra_devices) { |
| 131 | for (id = 0; id < sbi->extra_devices; ++id) { |
| 132 | erofs_off_t startoff, length; |
| 133 | |
| 134 | dif = sbi->devs + id; |
| 135 | if (!dif->mapped_blkaddr) |
| 136 | continue; |
| 137 | startoff = blknr_to_addr(dif->mapped_blkaddr); |
| 138 | length = blknr_to_addr(dif->blocks); |
| 139 | |
| 140 | if (map->m_pa >= startoff && |
| 141 | map->m_pa < startoff + length) { |
| 142 | map->m_pa -= startoff; |
| 143 | break; |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer, |
| 151 | erofs_off_t size, erofs_off_t offset) |
| 152 | { |
| 153 | struct erofs_map_blocks map = { |
| 154 | .index = UINT_MAX, |
| 155 | }; |
| 156 | struct erofs_map_dev mdev; |
| 157 | int ret; |
| 158 | erofs_off_t ptr = offset; |
| 159 | |
| 160 | while (ptr < offset + size) { |
| 161 | char *const estart = buffer + ptr - offset; |
| 162 | erofs_off_t eend; |
| 163 | |
| 164 | map.m_la = ptr; |
| 165 | ret = erofs_map_blocks(inode, &map, 0); |
| 166 | if (ret) |
| 167 | return ret; |
| 168 | |
| 169 | DBG_BUGON(map.m_plen != map.m_llen); |
| 170 | |
| 171 | mdev = (struct erofs_map_dev) { |
| 172 | .m_deviceid = map.m_deviceid, |
| 173 | .m_pa = map.m_pa, |
| 174 | }; |
| 175 | ret = erofs_map_dev(&sbi, &mdev); |
| 176 | if (ret) |
| 177 | return ret; |
| 178 | |
| 179 | /* trim extent */ |
| 180 | eend = min(offset + size, map.m_la + map.m_llen); |
| 181 | DBG_BUGON(ptr < map.m_la); |
| 182 | |
| 183 | if (!(map.m_flags & EROFS_MAP_MAPPED)) { |
| 184 | if (!map.m_llen) { |
| 185 | /* reached EOF */ |
| 186 | memset(estart, 0, offset + size - ptr); |
| 187 | ptr = offset + size; |
| 188 | continue; |
| 189 | } |
| 190 | memset(estart, 0, eend - ptr); |
| 191 | ptr = eend; |
| 192 | continue; |
| 193 | } |
| 194 | |
| 195 | if (ptr > map.m_la) { |
| 196 | mdev.m_pa += ptr - map.m_la; |
| 197 | map.m_la = ptr; |
| 198 | } |
| 199 | |
| 200 | ret = erofs_dev_read(mdev.m_deviceid, estart, mdev.m_pa, |
| 201 | eend - map.m_la); |
| 202 | if (ret < 0) |
| 203 | return -EIO; |
| 204 | ptr = eend; |
| 205 | } |
| 206 | return 0; |
| 207 | } |
| 208 | |
Huang Jianan | 65cb730 | 2022-02-26 15:05:49 +0800 | [diff] [blame] | 209 | static int z_erofs_read_data(struct erofs_inode *inode, char *buffer, |
| 210 | erofs_off_t size, erofs_off_t offset) |
| 211 | { |
| 212 | erofs_off_t end, length, skip; |
| 213 | struct erofs_map_blocks map = { |
| 214 | .index = UINT_MAX, |
| 215 | }; |
| 216 | struct erofs_map_dev mdev; |
| 217 | bool partial; |
| 218 | unsigned int bufsize = 0; |
| 219 | char *raw = NULL; |
| 220 | int ret = 0; |
| 221 | |
| 222 | end = offset + size; |
| 223 | while (end > offset) { |
| 224 | map.m_la = end - 1; |
| 225 | |
| 226 | ret = z_erofs_map_blocks_iter(inode, &map, 0); |
| 227 | if (ret) |
| 228 | break; |
| 229 | |
| 230 | /* no device id here, thus it will always succeed */ |
| 231 | mdev = (struct erofs_map_dev) { |
| 232 | .m_pa = map.m_pa, |
| 233 | }; |
| 234 | ret = erofs_map_dev(&sbi, &mdev); |
| 235 | if (ret) { |
| 236 | DBG_BUGON(1); |
| 237 | break; |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * trim to the needed size if the returned extent is quite |
| 242 | * larger than requested, and set up partial flag as well. |
| 243 | */ |
| 244 | if (end < map.m_la + map.m_llen) { |
| 245 | length = end - map.m_la; |
| 246 | partial = true; |
| 247 | } else { |
| 248 | DBG_BUGON(end != map.m_la + map.m_llen); |
| 249 | length = map.m_llen; |
| 250 | partial = !(map.m_flags & EROFS_MAP_FULL_MAPPED); |
| 251 | } |
| 252 | |
| 253 | if (map.m_la < offset) { |
| 254 | skip = offset - map.m_la; |
| 255 | end = offset; |
| 256 | } else { |
| 257 | skip = 0; |
| 258 | end = map.m_la; |
| 259 | } |
| 260 | |
| 261 | if (!(map.m_flags & EROFS_MAP_MAPPED)) { |
| 262 | memset(buffer + end - offset, 0, length); |
| 263 | end = map.m_la; |
| 264 | continue; |
| 265 | } |
| 266 | |
| 267 | if (map.m_plen > bufsize) { |
| 268 | bufsize = map.m_plen; |
| 269 | raw = realloc(raw, bufsize); |
| 270 | if (!raw) { |
| 271 | ret = -ENOMEM; |
| 272 | break; |
| 273 | } |
| 274 | } |
| 275 | ret = erofs_dev_read(mdev.m_deviceid, raw, mdev.m_pa, map.m_plen); |
| 276 | if (ret < 0) |
| 277 | break; |
| 278 | |
| 279 | ret = z_erofs_decompress(&(struct z_erofs_decompress_req) { |
| 280 | .in = raw, |
| 281 | .out = buffer + end - offset, |
| 282 | .decodedskip = skip, |
| 283 | .inputsize = map.m_plen, |
| 284 | .decodedlength = length, |
| 285 | .alg = map.m_algorithmformat, |
| 286 | .partial_decoding = partial |
| 287 | }); |
| 288 | if (ret < 0) |
| 289 | break; |
| 290 | } |
| 291 | if (raw) |
| 292 | free(raw); |
| 293 | return ret < 0 ? ret : 0; |
| 294 | } |
| 295 | |
Huang Jianan | 830613f | 2022-02-26 15:05:47 +0800 | [diff] [blame] | 296 | int erofs_pread(struct erofs_inode *inode, char *buf, |
| 297 | erofs_off_t count, erofs_off_t offset) |
| 298 | { |
| 299 | switch (inode->datalayout) { |
| 300 | case EROFS_INODE_FLAT_PLAIN: |
| 301 | case EROFS_INODE_FLAT_INLINE: |
| 302 | case EROFS_INODE_CHUNK_BASED: |
| 303 | return erofs_read_raw_data(inode, buf, count, offset); |
| 304 | case EROFS_INODE_FLAT_COMPRESSION_LEGACY: |
| 305 | case EROFS_INODE_FLAT_COMPRESSION: |
Huang Jianan | 65cb730 | 2022-02-26 15:05:49 +0800 | [diff] [blame] | 306 | return z_erofs_read_data(inode, buf, count, offset); |
Huang Jianan | 830613f | 2022-02-26 15:05:47 +0800 | [diff] [blame] | 307 | default: |
| 308 | break; |
| 309 | } |
| 310 | return -EINVAL; |
| 311 | } |