blob: 21201409ed4b22c85c9f788875399a6d10830552 [file] [log] [blame]
Tobias Waldekranzc41e2092023-02-16 16:33:49 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
5 */
6
7#include <common.h>
8#include <blk.h>
9#include <blkmap.h>
10#include <dm.h>
11#include <malloc.h>
12#include <mapmem.h>
13#include <part.h>
14#include <dm/device-internal.h>
15#include <dm/lists.h>
16#include <dm/root.h>
17
18struct blkmap;
19
20/**
21 * struct blkmap_slice - Region mapped to a blkmap
22 *
23 * Common data for a region mapped to a blkmap, specialized by each
24 * map type.
25 *
26 * @node: List node used to associate this slice with a blkmap
27 * @blknr: Start block number of the mapping
28 * @blkcnt: Number of blocks covered by this mapping
29 */
30struct blkmap_slice {
31 struct list_head node;
32
33 lbaint_t blknr;
34 lbaint_t blkcnt;
35
36 /**
37 * @read: - Read from slice
38 *
39 * @read.bm: Blkmap to which this slice belongs
40 * @read.bms: This slice
41 * @read.blknr: Start block number to read from
42 * @read.blkcnt: Number of blocks to read
43 * @read.buffer: Buffer to store read data to
44 */
45 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
46 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
47
48 /**
49 * @write: - Write to slice
50 *
51 * @write.bm: Blkmap to which this slice belongs
52 * @write.bms: This slice
53 * @write.blknr: Start block number to write to
54 * @write.blkcnt: Number of blocks to write
55 * @write.buffer: Data to be written
56 */
57 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
58 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
59
60 /**
61 * @destroy: - Tear down slice
62 *
63 * @read.bm: Blkmap to which this slice belongs
64 * @read.bms: This slice
65 */
66 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
67};
68
Tobias Waldekranzc41e2092023-02-16 16:33:49 +010069static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
70{
71 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
72}
73
74static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
75{
76 struct blkmap_slice *bms;
77 lbaint_t first, last;
78
79 first = new->blknr;
80 last = new->blknr + new->blkcnt - 1;
81
82 list_for_each_entry(bms, &bm->slices, node) {
83 if (blkmap_slice_contains(bms, first) ||
84 blkmap_slice_contains(bms, last) ||
85 blkmap_slice_contains(new, bms->blknr) ||
86 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
87 return false;
88 }
89
90 return true;
91}
92
93static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
94{
95 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
96 struct list_head *insert = &bm->slices;
97 struct blkmap_slice *bms;
98
99 if (!blkmap_slice_available(bm, new))
100 return -EBUSY;
101
102 list_for_each_entry(bms, &bm->slices, node) {
103 if (bms->blknr < new->blknr)
104 continue;
105
106 insert = &bms->node;
107 break;
108 }
109
110 list_add_tail(&new->node, insert);
111
112 /* Disk might have grown, update the size */
113 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
114 bd->lba = bms->blknr + bms->blkcnt;
115 return 0;
116}
117
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100118/**
Tobias Waldekranz762dc782023-02-16 16:33:51 +0100119 * struct blkmap_linear - Linear mapping to other block device
120 *
121 * @slice: Common map data
122 * @blk: Target block device of this mapping
123 * @blknr: Start block number of the target device
124 */
125struct blkmap_linear {
126 struct blkmap_slice slice;
127
128 struct udevice *blk;
129 lbaint_t blknr;
130};
131
132static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
133 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
134{
135 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
136
137 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
138}
139
140static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
141 lbaint_t blknr, lbaint_t blkcnt,
142 const void *buffer)
143{
144 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
145
146 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
147}
148
149int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
150 struct udevice *lblk, lbaint_t lblknr)
151{
152 struct blkmap *bm = dev_get_plat(dev);
153 struct blkmap_linear *linear;
154 struct blk_desc *bd, *lbd;
155 int err;
156
157 bd = dev_get_uclass_plat(bm->blk);
158 lbd = dev_get_uclass_plat(lblk);
Bin Mengcf83ff32023-09-26 16:43:39 +0800159 if (lbd->blksz != bd->blksz) {
160 /* update to match the mapped device */
161 bd->blksz = lbd->blksz;
162 bd->log2blksz = LOG2(bd->blksz);
163 }
Tobias Waldekranz762dc782023-02-16 16:33:51 +0100164
165 linear = malloc(sizeof(*linear));
166 if (!linear)
167 return -ENOMEM;
168
169 *linear = (struct blkmap_linear) {
170 .slice = {
171 .blknr = blknr,
172 .blkcnt = blkcnt,
173
174 .read = blkmap_linear_read,
175 .write = blkmap_linear_write,
176 },
177
178 .blk = lblk,
179 .blknr = lblknr,
180 };
181
182 err = blkmap_slice_add(bm, &linear->slice);
183 if (err)
184 free(linear);
185
186 return err;
187}
188
189/**
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100190 * struct blkmap_mem - Memory mapping
191 *
192 * @slice: Common map data
193 * @addr: Target memory region of this mapping
194 * @remapped: True if @addr is backed by a physical to virtual memory
195 * mapping that must be torn down at the end of this mapping's
196 * lifetime.
197 */
198struct blkmap_mem {
199 struct blkmap_slice slice;
200 void *addr;
201 bool remapped;
202};
203
204static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
205 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
206{
207 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
208 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
209 char *src;
210
211 src = bmm->addr + (blknr << bd->log2blksz);
212 memcpy(buffer, src, blkcnt << bd->log2blksz);
213 return blkcnt;
214}
215
216static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
217 lbaint_t blknr, lbaint_t blkcnt,
218 const void *buffer)
219{
220 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
221 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
222 char *dst;
223
224 dst = bmm->addr + (blknr << bd->log2blksz);
225 memcpy(dst, buffer, blkcnt << bd->log2blksz);
226 return blkcnt;
227}
228
229static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
230{
231 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
232
233 if (bmm->remapped)
234 unmap_sysmem(bmm->addr);
235}
236
237int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
238 void *addr, bool remapped)
239{
240 struct blkmap *bm = dev_get_plat(dev);
241 struct blkmap_mem *bmm;
242 int err;
243
244 bmm = malloc(sizeof(*bmm));
245 if (!bmm)
246 return -ENOMEM;
247
248 *bmm = (struct blkmap_mem) {
249 .slice = {
250 .blknr = blknr,
251 .blkcnt = blkcnt,
252
253 .read = blkmap_mem_read,
254 .write = blkmap_mem_write,
255 .destroy = blkmap_mem_destroy,
256 },
257
258 .addr = addr,
259 .remapped = remapped,
260 };
261
262 err = blkmap_slice_add(bm, &bmm->slice);
263 if (err)
264 free(bmm);
265
266 return err;
267}
268
269int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
270 void *addr)
271{
272 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
273}
274
275int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
276 phys_addr_t paddr)
277{
278 struct blkmap *bm = dev_get_plat(dev);
279 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
280 void *addr;
281 int err;
282
283 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
284 if (!addr)
285 return -ENOMEM;
286
287 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
288 if (err)
289 unmap_sysmem(addr);
290
291 return err;
292}
293
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100294static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
295 lbaint_t blknr, lbaint_t blkcnt,
296 void *buffer)
297{
298 lbaint_t nr, cnt;
299
300 nr = blknr - bms->blknr;
301 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
302 return bms->read(bm, bms, nr, cnt, buffer);
303}
304
305static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
306 lbaint_t blkcnt, void *buffer)
307{
308 struct blk_desc *bd = dev_get_uclass_plat(dev);
309 struct blkmap *bm = dev_get_plat(dev->parent);
310 struct blkmap_slice *bms;
311 lbaint_t cnt, total = 0;
312
313 list_for_each_entry(bms, &bm->slices, node) {
314 if (!blkmap_slice_contains(bms, blknr))
315 continue;
316
317 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
318 blknr += cnt;
319 blkcnt -= cnt;
320 buffer += cnt << bd->log2blksz;
321 total += cnt;
322 }
323
324 return total;
325}
326
327static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
328 lbaint_t blknr, lbaint_t blkcnt,
329 const void *buffer)
330{
331 lbaint_t nr, cnt;
332
333 nr = blknr - bms->blknr;
334 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
335 return bms->write(bm, bms, nr, cnt, buffer);
336}
337
338static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
339 lbaint_t blkcnt, const void *buffer)
340{
341 struct blk_desc *bd = dev_get_uclass_plat(dev);
342 struct blkmap *bm = dev_get_plat(dev->parent);
343 struct blkmap_slice *bms;
344 lbaint_t cnt, total = 0;
345
346 list_for_each_entry(bms, &bm->slices, node) {
347 if (!blkmap_slice_contains(bms, blknr))
348 continue;
349
350 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
351 blknr += cnt;
352 blkcnt -= cnt;
353 buffer += cnt << bd->log2blksz;
354 total += cnt;
355 }
356
357 return total;
358}
359
360static const struct blk_ops blkmap_blk_ops = {
361 .read = blkmap_blk_read,
362 .write = blkmap_blk_write,
363};
364
365U_BOOT_DRIVER(blkmap_blk) = {
366 .name = "blkmap_blk",
367 .id = UCLASS_BLK,
368 .ops = &blkmap_blk_ops,
369};
370
Bin Meng6efca7f2023-09-26 16:43:37 +0800371static int blkmap_dev_bind(struct udevice *dev)
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100372{
373 struct blkmap *bm = dev_get_plat(dev);
374 struct blk_desc *bd;
375 int err;
376
377 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
Bin Meng7020b2e2023-09-26 16:43:31 +0800378 dev_seq(dev), DEFAULT_BLKSZ, 0, &bm->blk);
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100379 if (err)
380 return log_msg_ret("blk", err);
381
382 INIT_LIST_HEAD(&bm->slices);
383
384 bd = dev_get_uclass_plat(bm->blk);
385 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
386 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
387 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
388
389 /* EFI core isn't keen on zero-sized disks, so we lie. This is
390 * updated with the correct size once the user adds a
391 * mapping.
392 */
393 bd->lba = 1;
394
395 return 0;
396}
397
Bin Meng6efca7f2023-09-26 16:43:37 +0800398static int blkmap_dev_unbind(struct udevice *dev)
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100399{
400 struct blkmap *bm = dev_get_plat(dev);
401 struct blkmap_slice *bms, *tmp;
402 int err;
403
404 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
405 list_del(&bms->node);
406 free(bms);
407 }
408
409 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
410 if (err)
411 return err;
412
413 return device_unbind(bm->blk);
414}
415
416U_BOOT_DRIVER(blkmap_root) = {
417 .name = "blkmap_dev",
418 .id = UCLASS_BLKMAP,
419 .bind = blkmap_dev_bind,
420 .unbind = blkmap_dev_unbind,
421 .plat_auto = sizeof(struct blkmap),
422};
423
424struct udevice *blkmap_from_label(const char *label)
425{
426 struct udevice *dev;
427 struct uclass *uc;
428 struct blkmap *bm;
429
430 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
431 bm = dev_get_plat(dev);
432 if (bm->label && !strcmp(label, bm->label))
433 return dev;
434 }
435
436 return NULL;
437}
438
439int blkmap_create(const char *label, struct udevice **devp)
440{
441 char *hname, *hlabel;
442 struct udevice *dev;
443 struct blkmap *bm;
444 size_t namelen;
445 int err;
446
447 dev = blkmap_from_label(label);
448 if (dev) {
449 err = -EBUSY;
450 goto err;
451 }
452
453 hlabel = strdup(label);
454 if (!hlabel) {
455 err = -ENOMEM;
456 goto err;
457 }
458
459 namelen = strlen("blkmap-") + strlen(label) + 1;
460 hname = malloc(namelen);
461 if (!hname) {
462 err = -ENOMEM;
463 goto err_free_hlabel;
464 }
465
466 strlcpy(hname, "blkmap-", namelen);
467 strlcat(hname, label, namelen);
468
469 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
470 if (err)
471 goto err_free_hname;
472
473 device_set_name_alloced(dev);
474 bm = dev_get_plat(dev);
475 bm->label = hlabel;
476
477 if (devp)
478 *devp = dev;
479
480 return 0;
481
482err_free_hname:
483 free(hname);
484err_free_hlabel:
485 free(hlabel);
486err:
487 return err;
488}
489
490int blkmap_destroy(struct udevice *dev)
491{
492 int err;
493
494 err = device_remove(dev, DM_REMOVE_NORMAL);
495 if (err)
496 return err;
497
498 return device_unbind(dev);
499}
500
501UCLASS_DRIVER(blkmap) = {
502 .id = UCLASS_BLKMAP,
503 .name = "blkmap",
504};