blob: 1c5d039ef052cd4a69b4751998c3f69cefd7d92a [file] [log] [blame]
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +03003 * (C) 2007-2008 Samuel Thibault.
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +03004 * (C) Copyright 2020 EPAM Systems Inc.
5 */
6#include <blk.h>
7#include <common.h>
8#include <dm.h>
9#include <dm/device-internal.h>
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030010#include <malloc.h>
11#include <part.h>
12
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030013#include <asm/armv8/mmu.h>
Simon Glass401d1c42020-10-30 21:38:53 -060014#include <asm/global_data.h>
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030015#include <asm/io.h>
16#include <asm/xen/system.h>
17
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030018#include <linux/bug.h>
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030019#include <linux/compat.h>
20
21#include <xen/events.h>
22#include <xen/gnttab.h>
23#include <xen/hvm.h>
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030024#include <xen/xenbus.h>
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030025
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030026#include <xen/interface/io/ring.h>
27#include <xen/interface/io/blkif.h>
28#include <xen/interface/io/protocols.h>
29
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030030#define DRV_NAME "pvblock"
31#define DRV_NAME_BLK "pvblock_blk"
32
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030033#define O_RDONLY 00
34#define O_RDWR 02
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030035#define WAIT_RING_TO_MS 10
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030036
37struct blkfront_info {
38 u64 sectors;
39 unsigned int sector_size;
40 int mode;
41 int info;
42 int barrier;
43 int flush;
44};
45
46/**
47 * struct blkfront_dev - Struct representing blkfront device
48 * @dom: Domain id
49 * @ring: Front_ring structure
50 * @ring_ref: The grant reference, allowing us to grant access
51 * to the ring to the other end/domain
52 * @evtchn: Event channel used to signal ring events
53 * @handle: Events handle
54 * @nodename: Device XenStore path in format "device/vbd/" + @devid
55 * @backend: Backend XenStore path
56 * @info: Private data
57 * @devid: Device id
58 */
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030059struct blkfront_dev {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030060 domid_t dom;
61
62 struct blkif_front_ring ring;
63 grant_ref_t ring_ref;
64 evtchn_port_t evtchn;
65 blkif_vdev_t handle;
66
67 char *nodename;
68 char *backend;
69 struct blkfront_info info;
70 unsigned int devid;
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030071 u8 *bounce_buffer;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030072};
73
Simon Glass8a8d24b2020-12-03 16:55:23 -070074struct blkfront_plat {
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030075 unsigned int devid;
76};
77
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030078/**
79 * struct blkfront_aiocb - AIO сontrol block
80 * @aio_dev: Blockfront device
81 * @aio_buf: Memory buffer, which must be sector-aligned for
82 * @aio_dev sector
83 * @aio_nbytes: Size of AIO, which must be less than @aio_dev
84 * sector-sized amounts
85 * @aio_offset: Offset, which must not go beyond @aio_dev
86 * sector-aligned location
87 * @data: Data used to receiving response from ring
88 * @gref: Array of grant references
89 * @n: Number of segments
90 * @aio_cb: Represents one I/O request.
91 */
92struct blkfront_aiocb {
93 struct blkfront_dev *aio_dev;
94 u8 *aio_buf;
95 size_t aio_nbytes;
96 off_t aio_offset;
97 void *data;
98
99 grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
100 int n;
101
102 void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
103};
104
105static void blkfront_sync(struct blkfront_dev *dev);
106
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300107static void free_blkfront(struct blkfront_dev *dev)
108{
109 mask_evtchn(dev->evtchn);
110 free(dev->backend);
111
112 gnttab_end_access(dev->ring_ref);
113 free(dev->ring.sring);
114
115 unbind_evtchn(dev->evtchn);
116
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300117 free(dev->bounce_buffer);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300118 free(dev->nodename);
119 free(dev);
120}
121
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300122static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
123{
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300124 xenbus_transaction_t xbt;
125 char *err = NULL;
126 char *message = NULL;
127 struct blkif_sring *s;
128 int retry = 0;
129 char *msg = NULL;
130 char *c;
131 char nodename[32];
132 char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
133
134 sprintf(nodename, "device/vbd/%d", devid);
135
136 memset(dev, 0, sizeof(*dev));
137 dev->nodename = strdup(nodename);
138 dev->devid = devid;
139
140 snprintf(path, sizeof(path), "%s/backend-id", nodename);
141 dev->dom = xenbus_read_integer(path);
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300142 evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300143
144 s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
145 if (!s) {
146 printf("Failed to allocate shared ring\n");
147 goto error;
148 }
149
150 SHARED_RING_INIT(s);
151 FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
152
153 dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
154
155again:
156 err = xenbus_transaction_start(&xbt);
157 if (err) {
158 printf("starting transaction\n");
159 free(err);
160 }
161
162 err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
163 if (err) {
164 message = "writing ring-ref";
165 goto abort_transaction;
166 }
167 err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
168 if (err) {
169 message = "writing event-channel";
170 goto abort_transaction;
171 }
172 err = xenbus_printf(xbt, nodename, "protocol", "%s",
173 XEN_IO_PROTO_ABI_NATIVE);
174 if (err) {
175 message = "writing protocol";
176 goto abort_transaction;
177 }
178
179 snprintf(path, sizeof(path), "%s/state", nodename);
180 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
181 if (err) {
182 message = "switching state";
183 goto abort_transaction;
184 }
185
186 err = xenbus_transaction_end(xbt, 0, &retry);
187 free(err);
188 if (retry) {
189 goto again;
190 printf("completing transaction\n");
191 }
192
193 goto done;
194
195abort_transaction:
196 free(err);
197 err = xenbus_transaction_end(xbt, 1, &retry);
198 printf("Abort transaction %s\n", message);
199 goto error;
200
201done:
202 snprintf(path, sizeof(path), "%s/backend", nodename);
203 msg = xenbus_read(XBT_NIL, path, &dev->backend);
204 if (msg) {
205 printf("Error %s when reading the backend path %s\n",
206 msg, path);
207 goto error;
208 }
209
210 dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
211
212 {
213 XenbusState state;
214 char path[strlen(dev->backend) +
215 strlen("/feature-flush-cache") + 1];
216
217 snprintf(path, sizeof(path), "%s/mode", dev->backend);
218 msg = xenbus_read(XBT_NIL, path, &c);
219 if (msg) {
220 printf("Error %s when reading the mode\n", msg);
221 goto error;
222 }
223 if (*c == 'w')
224 dev->info.mode = O_RDWR;
225 else
226 dev->info.mode = O_RDONLY;
227 free(c);
228
229 snprintf(path, sizeof(path), "%s/state", dev->backend);
230
231 msg = NULL;
232 state = xenbus_read_integer(path);
233 while (!msg && state < XenbusStateConnected)
234 msg = xenbus_wait_for_state_change(path, &state);
235 if (msg || state != XenbusStateConnected) {
236 printf("backend not available, state=%d\n", state);
237 goto error;
238 }
239
240 snprintf(path, sizeof(path), "%s/info", dev->backend);
241 dev->info.info = xenbus_read_integer(path);
242
243 snprintf(path, sizeof(path), "%s/sectors", dev->backend);
244 /*
245 * FIXME: read_integer returns an int, so disk size
246 * limited to 1TB for now
247 */
248 dev->info.sectors = xenbus_read_integer(path);
249
250 snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
251 dev->info.sector_size = xenbus_read_integer(path);
252
253 snprintf(path, sizeof(path), "%s/feature-barrier",
254 dev->backend);
255 dev->info.barrier = xenbus_read_integer(path);
256
257 snprintf(path, sizeof(path), "%s/feature-flush-cache",
258 dev->backend);
259 dev->info.flush = xenbus_read_integer(path);
260 }
261 unmask_evtchn(dev->evtchn);
262
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300263 dev->bounce_buffer = memalign(dev->info.sector_size,
264 dev->info.sector_size);
265 if (!dev->bounce_buffer) {
266 printf("Failed to allocate bouncing buffer\n");
267 goto error;
268 }
269
270 debug("%llu sectors of %u bytes, bounce buffer at %p\n",
271 dev->info.sectors, dev->info.sector_size,
272 dev->bounce_buffer);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300273
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300274 return 0;
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300275
276error:
277 free(msg);
278 free(err);
279 free_blkfront(dev);
280 return -ENODEV;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300281}
282
283static void shutdown_blkfront(struct blkfront_dev *dev)
284{
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300285 char *err = NULL, *err2;
286 XenbusState state;
287
288 char path[strlen(dev->backend) + strlen("/state") + 1];
289 char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
290
291 debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
292
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300293 blkfront_sync(dev);
294
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300295 snprintf(path, sizeof(path), "%s/state", dev->backend);
296 snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
297
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300298 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
299 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300300 printf("%s: error changing state to %d: %s\n", __func__,
301 XenbusStateClosing, err);
302 goto close;
303 }
304
305 state = xenbus_read_integer(path);
306 while (!err && state < XenbusStateClosing)
307 err = xenbus_wait_for_state_change(path, &state);
308 free(err);
309
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300310 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
311 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300312 printf("%s: error changing state to %d: %s\n", __func__,
313 XenbusStateClosed, err);
314 goto close;
315 }
316
317 state = xenbus_read_integer(path);
318 while (state < XenbusStateClosed) {
319 err = xenbus_wait_for_state_change(path, &state);
320 free(err);
321 }
322
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300323 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
324 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300325 printf("%s: error changing state to %d: %s\n", __func__,
326 XenbusStateInitialising, err);
327 goto close;
328 }
329
330 state = xenbus_read_integer(path);
331 while (!err &&
332 (state < XenbusStateInitWait || state >= XenbusStateClosed))
333 err = xenbus_wait_for_state_change(path, &state);
334
335close:
336 free(err);
337
338 snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
339 err2 = xenbus_rm(XBT_NIL, nodename);
340 free(err2);
341 snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
342 err2 = xenbus_rm(XBT_NIL, nodename);
343 free(err2);
344
345 if (!err)
346 free_blkfront(dev);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300347}
348
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300349/**
350 * blkfront_aio_poll() - AIO polling function.
351 * @dev: Blkfront device
352 *
353 * Here we receive response from the ring and check its status. This happens
354 * until we read all data from the ring. We read the data from consumed pointer
355 * to the response pointer. Then increase consumed pointer to make it clear that
356 * the data has been read.
357 *
358 * Return: Number of consumed bytes.
359 */
360static int blkfront_aio_poll(struct blkfront_dev *dev)
361{
362 RING_IDX rp, cons;
363 struct blkif_response *rsp;
364 int more;
365 int nr_consumed;
366
367moretodo:
368 rp = dev->ring.sring->rsp_prod;
369 rmb(); /* Ensure we see queued responses up to 'rp'. */
370 cons = dev->ring.rsp_cons;
371
372 nr_consumed = 0;
373 while ((cons != rp)) {
374 struct blkfront_aiocb *aiocbp;
375 int status;
376
377 rsp = RING_GET_RESPONSE(&dev->ring, cons);
378 nr_consumed++;
379
380 aiocbp = (void *)(uintptr_t)rsp->id;
381 status = rsp->status;
382
383 switch (rsp->operation) {
384 case BLKIF_OP_READ:
385 case BLKIF_OP_WRITE:
386 {
387 int j;
388
389 if (status != BLKIF_RSP_OKAY)
390 printf("%s error %d on %s at offset %llu, num bytes %llu\n",
391 rsp->operation == BLKIF_OP_READ ?
392 "read" : "write",
393 status, aiocbp->aio_dev->nodename,
394 (unsigned long long)aiocbp->aio_offset,
395 (unsigned long long)aiocbp->aio_nbytes);
396
397 for (j = 0; j < aiocbp->n; j++)
398 gnttab_end_access(aiocbp->gref[j]);
399
400 break;
401 }
402
403 case BLKIF_OP_WRITE_BARRIER:
404 if (status != BLKIF_RSP_OKAY)
405 printf("write barrier error %d\n", status);
406 break;
407 case BLKIF_OP_FLUSH_DISKCACHE:
408 if (status != BLKIF_RSP_OKAY)
409 printf("flush error %d\n", status);
410 break;
411
412 default:
413 printf("unrecognized block operation %d response (status %d)\n",
414 rsp->operation, status);
415 break;
416 }
417
418 dev->ring.rsp_cons = ++cons;
419 /* Nota: callback frees aiocbp itself */
420 if (aiocbp && aiocbp->aio_cb)
421 aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
422 if (dev->ring.rsp_cons != cons)
423 /* We reentered, we must not continue here */
424 break;
425 }
426
427 RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
428 if (more)
429 goto moretodo;
430
431 return nr_consumed;
432}
433
434static void blkfront_wait_slot(struct blkfront_dev *dev)
435{
436 /* Wait for a slot */
437 if (RING_FULL(&dev->ring)) {
438 while (true) {
439 blkfront_aio_poll(dev);
440 if (!RING_FULL(&dev->ring))
441 break;
442 wait_event_timeout(NULL, !RING_FULL(&dev->ring),
443 WAIT_RING_TO_MS);
444 }
445 }
446}
447
448/**
449 * blkfront_aio_poll() - Issue an aio.
450 * @aiocbp: AIO control block structure
451 * @write: Describes is it read or write operation
452 * 0 - read
453 * 1 - write
454 *
455 * We check whether the AIO parameters meet the requirements of the device.
456 * Then receive request from ring and define its arguments. After this we
457 * grant access to the grant references. The last step is notifying about AIO
458 * via event channel.
459 */
460static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
461{
462 struct blkfront_dev *dev = aiocbp->aio_dev;
463 struct blkif_request *req;
464 RING_IDX i;
465 int notify;
466 int n, j;
467 uintptr_t start, end;
468
469 /* Can't io at non-sector-aligned location */
470 BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
471 /* Can't io non-sector-sized amounts */
472 BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
473 /* Can't io non-sector-aligned buffer */
474 BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
475
476 start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
477 end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
478 PAGE_SIZE - 1) & PAGE_MASK;
479 n = (end - start) / PAGE_SIZE;
480 aiocbp->n = n;
481
482 BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
483
484 blkfront_wait_slot(dev);
485 i = dev->ring.req_prod_pvt;
486 req = RING_GET_REQUEST(&dev->ring, i);
487
488 req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
489 req->nr_segments = n;
490 req->handle = dev->handle;
491 req->id = (uintptr_t)aiocbp;
492 req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
493
494 for (j = 0; j < n; j++) {
495 req->seg[j].first_sect = 0;
496 req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
497 }
498 req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
499 dev->info.sector_size;
500 req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
501 aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
502 for (j = 0; j < n; j++) {
503 uintptr_t data = start + j * PAGE_SIZE;
504
505 if (!write) {
506 /* Trigger CoW if needed */
507 *(char *)(data + (req->seg[j].first_sect *
508 dev->info.sector_size)) = 0;
509 barrier();
510 }
511 req->seg[j].gref = gnttab_grant_access(dev->dom,
512 virt_to_pfn((void *)data),
513 write);
514 aiocbp->gref[j] = req->seg[j].gref;
515 }
516
517 dev->ring.req_prod_pvt = i + 1;
518
519 wmb();
520 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
521
522 if (notify)
523 notify_remote_via_evtchn(dev->evtchn);
524}
525
526static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
527{
528 aiocbp->data = (void *)1;
529 aiocbp->aio_cb = NULL;
530}
531
532static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
533{
534 aiocbp->aio_cb = blkfront_aio_cb;
535 blkfront_aio(aiocbp, write);
536 aiocbp->data = NULL;
537
538 while (true) {
539 blkfront_aio_poll(aiocbp->aio_dev);
540 if (aiocbp->data)
541 break;
542 cpu_relax();
543 }
544}
545
546static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
547 uint64_t id)
548{
549 struct blkif_request *req;
550 int notify, i;
551
552 blkfront_wait_slot(dev);
553 i = dev->ring.req_prod_pvt;
554 req = RING_GET_REQUEST(&dev->ring, i);
555 req->operation = op;
556 req->nr_segments = 0;
557 req->handle = dev->handle;
558 req->id = id;
559 req->sector_number = 0;
560 dev->ring.req_prod_pvt = i + 1;
561 wmb();
562 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
563 if (notify)
564 notify_remote_via_evtchn(dev->evtchn);
565}
566
567static void blkfront_sync(struct blkfront_dev *dev)
568{
569 if (dev->info.mode == O_RDWR) {
570 if (dev->info.barrier == 1)
571 blkfront_push_operation(dev,
572 BLKIF_OP_WRITE_BARRIER, 0);
573
574 if (dev->info.flush == 1)
575 blkfront_push_operation(dev,
576 BLKIF_OP_FLUSH_DISKCACHE, 0);
577 }
578
579 while (true) {
580 blkfront_aio_poll(dev);
581 if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
582 break;
583 cpu_relax();
584 }
585}
586
587/**
588 * pvblock_iop() - Issue an aio.
589 * @udev: Pvblock device
590 * @blknr: Block number to read from / write to
591 * @blkcnt: Amount of blocks to read / write
592 * @buffer: Memory buffer with data to be read / write
593 * @write: Describes is it read or write operation
594 * 0 - read
595 * 1 - write
596 *
597 * Depending on the operation - reading or writing, data is read / written from the
598 * specified address (@buffer) to the sector (@blknr).
599 */
600static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
601 lbaint_t blkcnt, void *buffer, int write)
602{
603 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700604 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300605 struct blkfront_aiocb aiocb;
606 lbaint_t blocks_todo;
607 bool unaligned;
608
609 if (blkcnt == 0)
610 return 0;
611
612 if ((blknr + blkcnt) > desc->lba) {
613 printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
614 blknr + blkcnt, desc->lba);
615 return 0;
616 }
617
618 unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
619
620 aiocb.aio_dev = blk_dev;
621 aiocb.aio_offset = blknr * desc->blksz;
622 aiocb.aio_cb = NULL;
623 aiocb.data = NULL;
624 blocks_todo = blkcnt;
625 do {
626 aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
627
628 if (write && unaligned)
629 memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
630
631 aiocb.aio_nbytes = unaligned ? desc->blksz :
632 min((size_t)(BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE),
633 (size_t)(blocks_todo * desc->blksz));
634
635 blkfront_io(&aiocb, write);
636
637 if (!write && unaligned)
638 memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
639
640 aiocb.aio_offset += aiocb.aio_nbytes;
641 buffer += aiocb.aio_nbytes;
642 blocks_todo -= aiocb.aio_nbytes / desc->blksz;
643 } while (blocks_todo > 0);
644
645 return blkcnt;
646}
647
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300648ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
649 void *buffer)
650{
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300651 return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300652}
653
654ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
655 const void *buffer)
656{
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300657 return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300658}
659
660static int pvblock_blk_bind(struct udevice *udev)
661{
Simon Glasscaa4daa2020-12-03 16:55:18 -0700662 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300663 int devnum;
664
665 desc->if_type = IF_TYPE_PVBLOCK;
666 /*
667 * Initialize the devnum to -ENODEV. This is to make sure that
668 * blk_next_free_devnum() works as expected, since the default
669 * value 0 is a valid devnum.
670 */
671 desc->devnum = -ENODEV;
672 devnum = blk_next_free_devnum(IF_TYPE_PVBLOCK);
673 if (devnum < 0)
674 return devnum;
675 desc->devnum = devnum;
676 desc->part_type = PART_TYPE_UNKNOWN;
677 desc->bdev = udev;
678
679 strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
680 strncpy(desc->revision, "1", sizeof(desc->revision));
681 strncpy(desc->product, "Virtual disk", sizeof(desc->product));
682
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300683 return 0;
684}
685
686static int pvblock_blk_probe(struct udevice *udev)
687{
688 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glass8a8d24b2020-12-03 16:55:23 -0700689 struct blkfront_plat *plat = dev_get_plat(udev);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700690 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300691 int ret, devid;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300692
Simon Glasscaa4daa2020-12-03 16:55:18 -0700693 devid = plat->devid;
694 free(plat);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300695
696 ret = init_blkfront(devid, blk_dev);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300697 if (ret < 0)
698 return ret;
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300699
700 desc->blksz = blk_dev->info.sector_size;
701 desc->lba = blk_dev->info.sectors;
702 desc->log2blksz = LOG2(blk_dev->info.sector_size);
703
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300704 return 0;
705}
706
707static int pvblock_blk_remove(struct udevice *udev)
708{
709 struct blkfront_dev *blk_dev = dev_get_priv(udev);
710
711 shutdown_blkfront(blk_dev);
712 return 0;
713}
714
715static const struct blk_ops pvblock_blk_ops = {
716 .read = pvblock_blk_read,
717 .write = pvblock_blk_write,
718};
719
720U_BOOT_DRIVER(pvblock_blk) = {
721 .name = DRV_NAME_BLK,
722 .id = UCLASS_BLK,
723 .ops = &pvblock_blk_ops,
724 .bind = pvblock_blk_bind,
725 .probe = pvblock_blk_probe,
726 .remove = pvblock_blk_remove,
Simon Glass41575d82020-12-03 16:55:17 -0700727 .priv_auto = sizeof(struct blkfront_dev),
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300728 .flags = DM_FLAG_OS_PREPARE,
729};
730
731/*******************************************************************************
732 * Para-virtual block device class
733 *******************************************************************************/
734
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300735typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
736
737static int on_new_vbd(struct udevice *parent, unsigned int devid)
738{
739 struct driver_info info;
740 struct udevice *udev;
Simon Glass8a8d24b2020-12-03 16:55:23 -0700741 struct blkfront_plat *plat;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300742 int ret;
743
744 debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
745
Simon Glass8a8d24b2020-12-03 16:55:23 -0700746 plat = malloc(sizeof(struct blkfront_plat));
Simon Glasscaa4daa2020-12-03 16:55:18 -0700747 if (!plat) {
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300748 printf("Failed to allocate platform data\n");
749 return -ENOMEM;
750 }
751
Simon Glasscaa4daa2020-12-03 16:55:18 -0700752 plat->devid = devid;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300753
754 info.name = DRV_NAME_BLK;
Simon Glasscaa4daa2020-12-03 16:55:18 -0700755 info.plat = plat;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300756
757 ret = device_bind_by_name(parent, false, &info, &udev);
758 if (ret < 0) {
759 printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
760 devid, ret);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700761 free(plat);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300762 }
763 return ret;
764}
765
766static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
767{
768 char **dirs, *msg;
769 int i, ret;
770
771 msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
772 if (msg) {
773 printf("Failed to read device/vbd directory: %s\n", msg);
774 free(msg);
775 return -ENODEV;
776 }
777
778 for (i = 0; dirs[i]; i++) {
779 int devid;
780
781 sscanf(dirs[i], "%d", &devid);
782 ret = clb(udev, devid);
783 if (ret < 0)
784 goto fail;
785
786 free(dirs[i]);
787 }
788 ret = 0;
789
790fail:
791 for (; dirs[i]; i++)
792 free(dirs[i]);
793 free(dirs);
794 return ret;
795}
796
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300797static void print_pvblock_devices(void)
798{
799 struct udevice *udev;
800 bool first = true;
801 const char *class_name;
802
803 class_name = uclass_get_name(UCLASS_PVBLOCK);
804 for (blk_first_device(IF_TYPE_PVBLOCK, &udev); udev;
805 blk_next_device(&udev), first = false) {
Simon Glasscaa4daa2020-12-03 16:55:18 -0700806 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300807
808 if (!first)
809 puts(", ");
810 printf("%s: %d", class_name, desc->devnum);
811 }
812 printf("\n");
813}
814
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300815void pvblock_init(void)
816{
817 struct driver_info info;
818 struct udevice *udev;
819 struct uclass *uc;
820 int ret;
821
822 /*
823 * At this point Xen drivers have already initialized,
824 * so we can instantiate the class driver and enumerate
825 * virtual block devices.
826 */
827 info.name = DRV_NAME;
828 ret = device_bind_by_name(gd->dm_root, false, &info, &udev);
829 if (ret < 0)
830 printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
831
832 /* Bootstrap virtual block devices class driver */
833 ret = uclass_get(UCLASS_PVBLOCK, &uc);
834 if (ret)
835 return;
836 uclass_foreach_dev_probe(UCLASS_PVBLOCK, udev);
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300837
838 print_pvblock_devices();
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300839}
840
841static int pvblock_probe(struct udevice *udev)
842{
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300843 struct uclass *uc;
844 int ret;
845
846 if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
847 return -ENODEV;
848
849 ret = uclass_get(UCLASS_BLK, &uc);
850 if (ret)
851 return ret;
852 uclass_foreach_dev_probe(UCLASS_BLK, udev) {
853 if (_ret)
854 return _ret;
855 };
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300856 return 0;
857}
858
859U_BOOT_DRIVER(pvblock_drv) = {
860 .name = DRV_NAME,
861 .id = UCLASS_PVBLOCK,
862 .probe = pvblock_probe,
863};
864
865UCLASS_DRIVER(pvblock) = {
866 .name = DRV_NAME,
867 .id = UCLASS_PVBLOCK,
868};