blob: 2bb0acc20f2cb20250ddf7bb67059a83778d848a [file] [log] [blame]
Tobias Waldekranzc41e2092023-02-16 16:33:49 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Addiva Elektronik
4 * Author: Tobias Waldekranz <tobias@waldekranz.com>
5 */
6
7#include <common.h>
8#include <blk.h>
9#include <blkmap.h>
10#include <dm.h>
11#include <malloc.h>
12#include <mapmem.h>
13#include <part.h>
14#include <dm/device-internal.h>
15#include <dm/lists.h>
16#include <dm/root.h>
17
18struct blkmap;
19
20/**
21 * struct blkmap_slice - Region mapped to a blkmap
22 *
23 * Common data for a region mapped to a blkmap, specialized by each
24 * map type.
25 *
26 * @node: List node used to associate this slice with a blkmap
27 * @blknr: Start block number of the mapping
28 * @blkcnt: Number of blocks covered by this mapping
29 */
30struct blkmap_slice {
31 struct list_head node;
32
33 lbaint_t blknr;
34 lbaint_t blkcnt;
35
36 /**
37 * @read: - Read from slice
38 *
39 * @read.bm: Blkmap to which this slice belongs
40 * @read.bms: This slice
41 * @read.blknr: Start block number to read from
42 * @read.blkcnt: Number of blocks to read
43 * @read.buffer: Buffer to store read data to
44 */
45 ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
46 lbaint_t blknr, lbaint_t blkcnt, void *buffer);
47
48 /**
49 * @write: - Write to slice
50 *
51 * @write.bm: Blkmap to which this slice belongs
52 * @write.bms: This slice
53 * @write.blknr: Start block number to write to
54 * @write.blkcnt: Number of blocks to write
55 * @write.buffer: Data to be written
56 */
57 ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
58 lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
59
60 /**
61 * @destroy: - Tear down slice
62 *
63 * @read.bm: Blkmap to which this slice belongs
64 * @read.bms: This slice
65 */
66 void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
67};
68
69/**
70 * struct blkmap - Block map
71 *
72 * Data associated with a blkmap.
73 *
74 * @label: Human readable name of this blkmap
75 * @blk: Underlying block device
76 * @slices: List of slices associated with this blkmap
77 */
78struct blkmap {
79 char *label;
80 struct udevice *blk;
81 struct list_head slices;
82};
83
84static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
85{
86 return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
87}
88
89static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
90{
91 struct blkmap_slice *bms;
92 lbaint_t first, last;
93
94 first = new->blknr;
95 last = new->blknr + new->blkcnt - 1;
96
97 list_for_each_entry(bms, &bm->slices, node) {
98 if (blkmap_slice_contains(bms, first) ||
99 blkmap_slice_contains(bms, last) ||
100 blkmap_slice_contains(new, bms->blknr) ||
101 blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
102 return false;
103 }
104
105 return true;
106}
107
108static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
109{
110 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
111 struct list_head *insert = &bm->slices;
112 struct blkmap_slice *bms;
113
114 if (!blkmap_slice_available(bm, new))
115 return -EBUSY;
116
117 list_for_each_entry(bms, &bm->slices, node) {
118 if (bms->blknr < new->blknr)
119 continue;
120
121 insert = &bms->node;
122 break;
123 }
124
125 list_add_tail(&new->node, insert);
126
127 /* Disk might have grown, update the size */
128 bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
129 bd->lba = bms->blknr + bms->blkcnt;
130 return 0;
131}
132
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100133/**
Tobias Waldekranz762dc782023-02-16 16:33:51 +0100134 * struct blkmap_linear - Linear mapping to other block device
135 *
136 * @slice: Common map data
137 * @blk: Target block device of this mapping
138 * @blknr: Start block number of the target device
139 */
140struct blkmap_linear {
141 struct blkmap_slice slice;
142
143 struct udevice *blk;
144 lbaint_t blknr;
145};
146
147static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
148 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
149{
150 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
151
152 return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
153}
154
155static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
156 lbaint_t blknr, lbaint_t blkcnt,
157 const void *buffer)
158{
159 struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
160
161 return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
162}
163
164int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
165 struct udevice *lblk, lbaint_t lblknr)
166{
167 struct blkmap *bm = dev_get_plat(dev);
168 struct blkmap_linear *linear;
169 struct blk_desc *bd, *lbd;
170 int err;
171
172 bd = dev_get_uclass_plat(bm->blk);
173 lbd = dev_get_uclass_plat(lblk);
174 if (lbd->blksz != bd->blksz)
175 /* We could support block size translation, but we
176 * don't yet.
177 */
178 return -EINVAL;
179
180 linear = malloc(sizeof(*linear));
181 if (!linear)
182 return -ENOMEM;
183
184 *linear = (struct blkmap_linear) {
185 .slice = {
186 .blknr = blknr,
187 .blkcnt = blkcnt,
188
189 .read = blkmap_linear_read,
190 .write = blkmap_linear_write,
191 },
192
193 .blk = lblk,
194 .blknr = lblknr,
195 };
196
197 err = blkmap_slice_add(bm, &linear->slice);
198 if (err)
199 free(linear);
200
201 return err;
202}
203
204/**
Tobias Waldekranz15d9e992023-02-16 16:33:50 +0100205 * struct blkmap_mem - Memory mapping
206 *
207 * @slice: Common map data
208 * @addr: Target memory region of this mapping
209 * @remapped: True if @addr is backed by a physical to virtual memory
210 * mapping that must be torn down at the end of this mapping's
211 * lifetime.
212 */
213struct blkmap_mem {
214 struct blkmap_slice slice;
215 void *addr;
216 bool remapped;
217};
218
219static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
220 lbaint_t blknr, lbaint_t blkcnt, void *buffer)
221{
222 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
223 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
224 char *src;
225
226 src = bmm->addr + (blknr << bd->log2blksz);
227 memcpy(buffer, src, blkcnt << bd->log2blksz);
228 return blkcnt;
229}
230
231static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
232 lbaint_t blknr, lbaint_t blkcnt,
233 const void *buffer)
234{
235 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
236 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
237 char *dst;
238
239 dst = bmm->addr + (blknr << bd->log2blksz);
240 memcpy(dst, buffer, blkcnt << bd->log2blksz);
241 return blkcnt;
242}
243
244static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
245{
246 struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
247
248 if (bmm->remapped)
249 unmap_sysmem(bmm->addr);
250}
251
252int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
253 void *addr, bool remapped)
254{
255 struct blkmap *bm = dev_get_plat(dev);
256 struct blkmap_mem *bmm;
257 int err;
258
259 bmm = malloc(sizeof(*bmm));
260 if (!bmm)
261 return -ENOMEM;
262
263 *bmm = (struct blkmap_mem) {
264 .slice = {
265 .blknr = blknr,
266 .blkcnt = blkcnt,
267
268 .read = blkmap_mem_read,
269 .write = blkmap_mem_write,
270 .destroy = blkmap_mem_destroy,
271 },
272
273 .addr = addr,
274 .remapped = remapped,
275 };
276
277 err = blkmap_slice_add(bm, &bmm->slice);
278 if (err)
279 free(bmm);
280
281 return err;
282}
283
284int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
285 void *addr)
286{
287 return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
288}
289
290int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
291 phys_addr_t paddr)
292{
293 struct blkmap *bm = dev_get_plat(dev);
294 struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
295 void *addr;
296 int err;
297
298 addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
299 if (!addr)
300 return -ENOMEM;
301
302 err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
303 if (err)
304 unmap_sysmem(addr);
305
306 return err;
307}
308
Tobias Waldekranzc41e2092023-02-16 16:33:49 +0100309static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
310 lbaint_t blknr, lbaint_t blkcnt,
311 void *buffer)
312{
313 lbaint_t nr, cnt;
314
315 nr = blknr - bms->blknr;
316 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
317 return bms->read(bm, bms, nr, cnt, buffer);
318}
319
320static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
321 lbaint_t blkcnt, void *buffer)
322{
323 struct blk_desc *bd = dev_get_uclass_plat(dev);
324 struct blkmap *bm = dev_get_plat(dev->parent);
325 struct blkmap_slice *bms;
326 lbaint_t cnt, total = 0;
327
328 list_for_each_entry(bms, &bm->slices, node) {
329 if (!blkmap_slice_contains(bms, blknr))
330 continue;
331
332 cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
333 blknr += cnt;
334 blkcnt -= cnt;
335 buffer += cnt << bd->log2blksz;
336 total += cnt;
337 }
338
339 return total;
340}
341
342static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
343 lbaint_t blknr, lbaint_t blkcnt,
344 const void *buffer)
345{
346 lbaint_t nr, cnt;
347
348 nr = blknr - bms->blknr;
349 cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
350 return bms->write(bm, bms, nr, cnt, buffer);
351}
352
353static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
354 lbaint_t blkcnt, const void *buffer)
355{
356 struct blk_desc *bd = dev_get_uclass_plat(dev);
357 struct blkmap *bm = dev_get_plat(dev->parent);
358 struct blkmap_slice *bms;
359 lbaint_t cnt, total = 0;
360
361 list_for_each_entry(bms, &bm->slices, node) {
362 if (!blkmap_slice_contains(bms, blknr))
363 continue;
364
365 cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
366 blknr += cnt;
367 blkcnt -= cnt;
368 buffer += cnt << bd->log2blksz;
369 total += cnt;
370 }
371
372 return total;
373}
374
375static const struct blk_ops blkmap_blk_ops = {
376 .read = blkmap_blk_read,
377 .write = blkmap_blk_write,
378};
379
380U_BOOT_DRIVER(blkmap_blk) = {
381 .name = "blkmap_blk",
382 .id = UCLASS_BLK,
383 .ops = &blkmap_blk_ops,
384};
385
386int blkmap_dev_bind(struct udevice *dev)
387{
388 struct blkmap *bm = dev_get_plat(dev);
389 struct blk_desc *bd;
390 int err;
391
392 err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
393 dev_seq(dev), 512, 0, &bm->blk);
394 if (err)
395 return log_msg_ret("blk", err);
396
397 INIT_LIST_HEAD(&bm->slices);
398
399 bd = dev_get_uclass_plat(bm->blk);
400 snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
401 snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
402 snprintf(bd->revision, BLK_REV_SIZE, "1.0");
403
404 /* EFI core isn't keen on zero-sized disks, so we lie. This is
405 * updated with the correct size once the user adds a
406 * mapping.
407 */
408 bd->lba = 1;
409
410 return 0;
411}
412
413int blkmap_dev_unbind(struct udevice *dev)
414{
415 struct blkmap *bm = dev_get_plat(dev);
416 struct blkmap_slice *bms, *tmp;
417 int err;
418
419 list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
420 list_del(&bms->node);
421 free(bms);
422 }
423
424 err = device_remove(bm->blk, DM_REMOVE_NORMAL);
425 if (err)
426 return err;
427
428 return device_unbind(bm->blk);
429}
430
431U_BOOT_DRIVER(blkmap_root) = {
432 .name = "blkmap_dev",
433 .id = UCLASS_BLKMAP,
434 .bind = blkmap_dev_bind,
435 .unbind = blkmap_dev_unbind,
436 .plat_auto = sizeof(struct blkmap),
437};
438
439struct udevice *blkmap_from_label(const char *label)
440{
441 struct udevice *dev;
442 struct uclass *uc;
443 struct blkmap *bm;
444
445 uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
446 bm = dev_get_plat(dev);
447 if (bm->label && !strcmp(label, bm->label))
448 return dev;
449 }
450
451 return NULL;
452}
453
454int blkmap_create(const char *label, struct udevice **devp)
455{
456 char *hname, *hlabel;
457 struct udevice *dev;
458 struct blkmap *bm;
459 size_t namelen;
460 int err;
461
462 dev = blkmap_from_label(label);
463 if (dev) {
464 err = -EBUSY;
465 goto err;
466 }
467
468 hlabel = strdup(label);
469 if (!hlabel) {
470 err = -ENOMEM;
471 goto err;
472 }
473
474 namelen = strlen("blkmap-") + strlen(label) + 1;
475 hname = malloc(namelen);
476 if (!hname) {
477 err = -ENOMEM;
478 goto err_free_hlabel;
479 }
480
481 strlcpy(hname, "blkmap-", namelen);
482 strlcat(hname, label, namelen);
483
484 err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
485 if (err)
486 goto err_free_hname;
487
488 device_set_name_alloced(dev);
489 bm = dev_get_plat(dev);
490 bm->label = hlabel;
491
492 if (devp)
493 *devp = dev;
494
495 return 0;
496
497err_free_hname:
498 free(hname);
499err_free_hlabel:
500 free(hlabel);
501err:
502 return err;
503}
504
505int blkmap_destroy(struct udevice *dev)
506{
507 int err;
508
509 err = device_remove(dev, DM_REMOVE_NORMAL);
510 if (err)
511 return err;
512
513 return device_unbind(dev);
514}
515
516UCLASS_DRIVER(blkmap) = {
517 .id = UCLASS_BLKMAP,
518 .name = "blkmap",
519};