blob: 6455dfd5c963c5eaf78997dffa1b64fbbdee8119 [file] [log] [blame]
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +03003 * (C) 2007-2008 Samuel Thibault.
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +03004 * (C) Copyright 2020 EPAM Systems Inc.
5 */
6#include <blk.h>
7#include <common.h>
8#include <dm.h>
9#include <dm/device-internal.h>
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030010#include <malloc.h>
11#include <part.h>
12
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030013#include <asm/armv8/mmu.h>
14#include <asm/io.h>
15#include <asm/xen/system.h>
16
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030017#include <linux/bug.h>
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030018#include <linux/compat.h>
19
20#include <xen/events.h>
21#include <xen/gnttab.h>
22#include <xen/hvm.h>
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030023#include <xen/xenbus.h>
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030024
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030025#include <xen/interface/io/ring.h>
26#include <xen/interface/io/blkif.h>
27#include <xen/interface/io/protocols.h>
28
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030029#define DRV_NAME "pvblock"
30#define DRV_NAME_BLK "pvblock_blk"
31
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030032#define O_RDONLY 00
33#define O_RDWR 02
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030034#define WAIT_RING_TO_MS 10
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030035
36struct blkfront_info {
37 u64 sectors;
38 unsigned int sector_size;
39 int mode;
40 int info;
41 int barrier;
42 int flush;
43};
44
45/**
46 * struct blkfront_dev - Struct representing blkfront device
47 * @dom: Domain id
48 * @ring: Front_ring structure
49 * @ring_ref: The grant reference, allowing us to grant access
50 * to the ring to the other end/domain
51 * @evtchn: Event channel used to signal ring events
52 * @handle: Events handle
53 * @nodename: Device XenStore path in format "device/vbd/" + @devid
54 * @backend: Backend XenStore path
55 * @info: Private data
56 * @devid: Device id
57 */
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030058struct blkfront_dev {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +030059 domid_t dom;
60
61 struct blkif_front_ring ring;
62 grant_ref_t ring_ref;
63 evtchn_port_t evtchn;
64 blkif_vdev_t handle;
65
66 char *nodename;
67 char *backend;
68 struct blkfront_info info;
69 unsigned int devid;
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030070 u8 *bounce_buffer;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +030071};
72
Simon Glass8a8d24b2020-12-03 16:55:23 -070073struct blkfront_plat {
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +030074 unsigned int devid;
75};
76
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +030077/**
78 * struct blkfront_aiocb - AIO сontrol block
79 * @aio_dev: Blockfront device
80 * @aio_buf: Memory buffer, which must be sector-aligned for
81 * @aio_dev sector
82 * @aio_nbytes: Size of AIO, which must be less than @aio_dev
83 * sector-sized amounts
84 * @aio_offset: Offset, which must not go beyond @aio_dev
85 * sector-aligned location
86 * @data: Data used to receiving response from ring
87 * @gref: Array of grant references
88 * @n: Number of segments
89 * @aio_cb: Represents one I/O request.
90 */
91struct blkfront_aiocb {
92 struct blkfront_dev *aio_dev;
93 u8 *aio_buf;
94 size_t aio_nbytes;
95 off_t aio_offset;
96 void *data;
97
98 grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST];
99 int n;
100
101 void (*aio_cb)(struct blkfront_aiocb *aiocb, int ret);
102};
103
104static void blkfront_sync(struct blkfront_dev *dev);
105
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300106static void free_blkfront(struct blkfront_dev *dev)
107{
108 mask_evtchn(dev->evtchn);
109 free(dev->backend);
110
111 gnttab_end_access(dev->ring_ref);
112 free(dev->ring.sring);
113
114 unbind_evtchn(dev->evtchn);
115
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300116 free(dev->bounce_buffer);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300117 free(dev->nodename);
118 free(dev);
119}
120
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300121static int init_blkfront(unsigned int devid, struct blkfront_dev *dev)
122{
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300123 xenbus_transaction_t xbt;
124 char *err = NULL;
125 char *message = NULL;
126 struct blkif_sring *s;
127 int retry = 0;
128 char *msg = NULL;
129 char *c;
130 char nodename[32];
131 char path[ARRAY_SIZE(nodename) + strlen("/backend-id") + 1];
132
133 sprintf(nodename, "device/vbd/%d", devid);
134
135 memset(dev, 0, sizeof(*dev));
136 dev->nodename = strdup(nodename);
137 dev->devid = devid;
138
139 snprintf(path, sizeof(path), "%s/backend-id", nodename);
140 dev->dom = xenbus_read_integer(path);
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300141 evtchn_alloc_unbound(dev->dom, NULL, dev, &dev->evtchn);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300142
143 s = (struct blkif_sring *)memalign(PAGE_SIZE, PAGE_SIZE);
144 if (!s) {
145 printf("Failed to allocate shared ring\n");
146 goto error;
147 }
148
149 SHARED_RING_INIT(s);
150 FRONT_RING_INIT(&dev->ring, s, PAGE_SIZE);
151
152 dev->ring_ref = gnttab_grant_access(dev->dom, virt_to_pfn(s), 0);
153
154again:
155 err = xenbus_transaction_start(&xbt);
156 if (err) {
157 printf("starting transaction\n");
158 free(err);
159 }
160
161 err = xenbus_printf(xbt, nodename, "ring-ref", "%u", dev->ring_ref);
162 if (err) {
163 message = "writing ring-ref";
164 goto abort_transaction;
165 }
166 err = xenbus_printf(xbt, nodename, "event-channel", "%u", dev->evtchn);
167 if (err) {
168 message = "writing event-channel";
169 goto abort_transaction;
170 }
171 err = xenbus_printf(xbt, nodename, "protocol", "%s",
172 XEN_IO_PROTO_ABI_NATIVE);
173 if (err) {
174 message = "writing protocol";
175 goto abort_transaction;
176 }
177
178 snprintf(path, sizeof(path), "%s/state", nodename);
179 err = xenbus_switch_state(xbt, path, XenbusStateConnected);
180 if (err) {
181 message = "switching state";
182 goto abort_transaction;
183 }
184
185 err = xenbus_transaction_end(xbt, 0, &retry);
186 free(err);
187 if (retry) {
188 goto again;
189 printf("completing transaction\n");
190 }
191
192 goto done;
193
194abort_transaction:
195 free(err);
196 err = xenbus_transaction_end(xbt, 1, &retry);
197 printf("Abort transaction %s\n", message);
198 goto error;
199
200done:
201 snprintf(path, sizeof(path), "%s/backend", nodename);
202 msg = xenbus_read(XBT_NIL, path, &dev->backend);
203 if (msg) {
204 printf("Error %s when reading the backend path %s\n",
205 msg, path);
206 goto error;
207 }
208
209 dev->handle = strtoul(strrchr(nodename, '/') + 1, NULL, 0);
210
211 {
212 XenbusState state;
213 char path[strlen(dev->backend) +
214 strlen("/feature-flush-cache") + 1];
215
216 snprintf(path, sizeof(path), "%s/mode", dev->backend);
217 msg = xenbus_read(XBT_NIL, path, &c);
218 if (msg) {
219 printf("Error %s when reading the mode\n", msg);
220 goto error;
221 }
222 if (*c == 'w')
223 dev->info.mode = O_RDWR;
224 else
225 dev->info.mode = O_RDONLY;
226 free(c);
227
228 snprintf(path, sizeof(path), "%s/state", dev->backend);
229
230 msg = NULL;
231 state = xenbus_read_integer(path);
232 while (!msg && state < XenbusStateConnected)
233 msg = xenbus_wait_for_state_change(path, &state);
234 if (msg || state != XenbusStateConnected) {
235 printf("backend not available, state=%d\n", state);
236 goto error;
237 }
238
239 snprintf(path, sizeof(path), "%s/info", dev->backend);
240 dev->info.info = xenbus_read_integer(path);
241
242 snprintf(path, sizeof(path), "%s/sectors", dev->backend);
243 /*
244 * FIXME: read_integer returns an int, so disk size
245 * limited to 1TB for now
246 */
247 dev->info.sectors = xenbus_read_integer(path);
248
249 snprintf(path, sizeof(path), "%s/sector-size", dev->backend);
250 dev->info.sector_size = xenbus_read_integer(path);
251
252 snprintf(path, sizeof(path), "%s/feature-barrier",
253 dev->backend);
254 dev->info.barrier = xenbus_read_integer(path);
255
256 snprintf(path, sizeof(path), "%s/feature-flush-cache",
257 dev->backend);
258 dev->info.flush = xenbus_read_integer(path);
259 }
260 unmask_evtchn(dev->evtchn);
261
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300262 dev->bounce_buffer = memalign(dev->info.sector_size,
263 dev->info.sector_size);
264 if (!dev->bounce_buffer) {
265 printf("Failed to allocate bouncing buffer\n");
266 goto error;
267 }
268
269 debug("%llu sectors of %u bytes, bounce buffer at %p\n",
270 dev->info.sectors, dev->info.sector_size,
271 dev->bounce_buffer);
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300272
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300273 return 0;
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300274
275error:
276 free(msg);
277 free(err);
278 free_blkfront(dev);
279 return -ENODEV;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300280}
281
282static void shutdown_blkfront(struct blkfront_dev *dev)
283{
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300284 char *err = NULL, *err2;
285 XenbusState state;
286
287 char path[strlen(dev->backend) + strlen("/state") + 1];
288 char nodename[strlen(dev->nodename) + strlen("/event-channel") + 1];
289
290 debug("Close " DRV_NAME ", device ID %d\n", dev->devid);
291
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300292 blkfront_sync(dev);
293
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300294 snprintf(path, sizeof(path), "%s/state", dev->backend);
295 snprintf(nodename, sizeof(nodename), "%s/state", dev->nodename);
296
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300297 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosing);
298 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300299 printf("%s: error changing state to %d: %s\n", __func__,
300 XenbusStateClosing, err);
301 goto close;
302 }
303
304 state = xenbus_read_integer(path);
305 while (!err && state < XenbusStateClosing)
306 err = xenbus_wait_for_state_change(path, &state);
307 free(err);
308
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300309 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateClosed);
310 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300311 printf("%s: error changing state to %d: %s\n", __func__,
312 XenbusStateClosed, err);
313 goto close;
314 }
315
316 state = xenbus_read_integer(path);
317 while (state < XenbusStateClosed) {
318 err = xenbus_wait_for_state_change(path, &state);
319 free(err);
320 }
321
Anastasiia Lukianenko3337b292020-08-21 12:10:04 +0300322 err = xenbus_switch_state(XBT_NIL, nodename, XenbusStateInitialising);
323 if (err) {
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300324 printf("%s: error changing state to %d: %s\n", __func__,
325 XenbusStateInitialising, err);
326 goto close;
327 }
328
329 state = xenbus_read_integer(path);
330 while (!err &&
331 (state < XenbusStateInitWait || state >= XenbusStateClosed))
332 err = xenbus_wait_for_state_change(path, &state);
333
334close:
335 free(err);
336
337 snprintf(nodename, sizeof(nodename), "%s/ring-ref", dev->nodename);
338 err2 = xenbus_rm(XBT_NIL, nodename);
339 free(err2);
340 snprintf(nodename, sizeof(nodename), "%s/event-channel", dev->nodename);
341 err2 = xenbus_rm(XBT_NIL, nodename);
342 free(err2);
343
344 if (!err)
345 free_blkfront(dev);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300346}
347
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300348/**
349 * blkfront_aio_poll() - AIO polling function.
350 * @dev: Blkfront device
351 *
352 * Here we receive response from the ring and check its status. This happens
353 * until we read all data from the ring. We read the data from consumed pointer
354 * to the response pointer. Then increase consumed pointer to make it clear that
355 * the data has been read.
356 *
357 * Return: Number of consumed bytes.
358 */
359static int blkfront_aio_poll(struct blkfront_dev *dev)
360{
361 RING_IDX rp, cons;
362 struct blkif_response *rsp;
363 int more;
364 int nr_consumed;
365
366moretodo:
367 rp = dev->ring.sring->rsp_prod;
368 rmb(); /* Ensure we see queued responses up to 'rp'. */
369 cons = dev->ring.rsp_cons;
370
371 nr_consumed = 0;
372 while ((cons != rp)) {
373 struct blkfront_aiocb *aiocbp;
374 int status;
375
376 rsp = RING_GET_RESPONSE(&dev->ring, cons);
377 nr_consumed++;
378
379 aiocbp = (void *)(uintptr_t)rsp->id;
380 status = rsp->status;
381
382 switch (rsp->operation) {
383 case BLKIF_OP_READ:
384 case BLKIF_OP_WRITE:
385 {
386 int j;
387
388 if (status != BLKIF_RSP_OKAY)
389 printf("%s error %d on %s at offset %llu, num bytes %llu\n",
390 rsp->operation == BLKIF_OP_READ ?
391 "read" : "write",
392 status, aiocbp->aio_dev->nodename,
393 (unsigned long long)aiocbp->aio_offset,
394 (unsigned long long)aiocbp->aio_nbytes);
395
396 for (j = 0; j < aiocbp->n; j++)
397 gnttab_end_access(aiocbp->gref[j]);
398
399 break;
400 }
401
402 case BLKIF_OP_WRITE_BARRIER:
403 if (status != BLKIF_RSP_OKAY)
404 printf("write barrier error %d\n", status);
405 break;
406 case BLKIF_OP_FLUSH_DISKCACHE:
407 if (status != BLKIF_RSP_OKAY)
408 printf("flush error %d\n", status);
409 break;
410
411 default:
412 printf("unrecognized block operation %d response (status %d)\n",
413 rsp->operation, status);
414 break;
415 }
416
417 dev->ring.rsp_cons = ++cons;
418 /* Nota: callback frees aiocbp itself */
419 if (aiocbp && aiocbp->aio_cb)
420 aiocbp->aio_cb(aiocbp, status ? -EIO : 0);
421 if (dev->ring.rsp_cons != cons)
422 /* We reentered, we must not continue here */
423 break;
424 }
425
426 RING_FINAL_CHECK_FOR_RESPONSES(&dev->ring, more);
427 if (more)
428 goto moretodo;
429
430 return nr_consumed;
431}
432
433static void blkfront_wait_slot(struct blkfront_dev *dev)
434{
435 /* Wait for a slot */
436 if (RING_FULL(&dev->ring)) {
437 while (true) {
438 blkfront_aio_poll(dev);
439 if (!RING_FULL(&dev->ring))
440 break;
441 wait_event_timeout(NULL, !RING_FULL(&dev->ring),
442 WAIT_RING_TO_MS);
443 }
444 }
445}
446
447/**
448 * blkfront_aio_poll() - Issue an aio.
449 * @aiocbp: AIO control block structure
450 * @write: Describes is it read or write operation
451 * 0 - read
452 * 1 - write
453 *
454 * We check whether the AIO parameters meet the requirements of the device.
455 * Then receive request from ring and define its arguments. After this we
456 * grant access to the grant references. The last step is notifying about AIO
457 * via event channel.
458 */
459static void blkfront_aio(struct blkfront_aiocb *aiocbp, int write)
460{
461 struct blkfront_dev *dev = aiocbp->aio_dev;
462 struct blkif_request *req;
463 RING_IDX i;
464 int notify;
465 int n, j;
466 uintptr_t start, end;
467
468 /* Can't io at non-sector-aligned location */
469 BUG_ON(aiocbp->aio_offset & (dev->info.sector_size - 1));
470 /* Can't io non-sector-sized amounts */
471 BUG_ON(aiocbp->aio_nbytes & (dev->info.sector_size - 1));
472 /* Can't io non-sector-aligned buffer */
473 BUG_ON(((uintptr_t)aiocbp->aio_buf & (dev->info.sector_size - 1)));
474
475 start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK;
476 end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes +
477 PAGE_SIZE - 1) & PAGE_MASK;
478 n = (end - start) / PAGE_SIZE;
479 aiocbp->n = n;
480
481 BUG_ON(n > BLKIF_MAX_SEGMENTS_PER_REQUEST);
482
483 blkfront_wait_slot(dev);
484 i = dev->ring.req_prod_pvt;
485 req = RING_GET_REQUEST(&dev->ring, i);
486
487 req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ;
488 req->nr_segments = n;
489 req->handle = dev->handle;
490 req->id = (uintptr_t)aiocbp;
491 req->sector_number = aiocbp->aio_offset / dev->info.sector_size;
492
493 for (j = 0; j < n; j++) {
494 req->seg[j].first_sect = 0;
495 req->seg[j].last_sect = PAGE_SIZE / dev->info.sector_size - 1;
496 }
497 req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) /
498 dev->info.sector_size;
499 req->seg[n - 1].last_sect = (((uintptr_t)aiocbp->aio_buf +
500 aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / dev->info.sector_size;
501 for (j = 0; j < n; j++) {
502 uintptr_t data = start + j * PAGE_SIZE;
503
504 if (!write) {
505 /* Trigger CoW if needed */
506 *(char *)(data + (req->seg[j].first_sect *
507 dev->info.sector_size)) = 0;
508 barrier();
509 }
510 req->seg[j].gref = gnttab_grant_access(dev->dom,
511 virt_to_pfn((void *)data),
512 write);
513 aiocbp->gref[j] = req->seg[j].gref;
514 }
515
516 dev->ring.req_prod_pvt = i + 1;
517
518 wmb();
519 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
520
521 if (notify)
522 notify_remote_via_evtchn(dev->evtchn);
523}
524
525static void blkfront_aio_cb(struct blkfront_aiocb *aiocbp, int ret)
526{
527 aiocbp->data = (void *)1;
528 aiocbp->aio_cb = NULL;
529}
530
531static void blkfront_io(struct blkfront_aiocb *aiocbp, int write)
532{
533 aiocbp->aio_cb = blkfront_aio_cb;
534 blkfront_aio(aiocbp, write);
535 aiocbp->data = NULL;
536
537 while (true) {
538 blkfront_aio_poll(aiocbp->aio_dev);
539 if (aiocbp->data)
540 break;
541 cpu_relax();
542 }
543}
544
545static void blkfront_push_operation(struct blkfront_dev *dev, u8 op,
546 uint64_t id)
547{
548 struct blkif_request *req;
549 int notify, i;
550
551 blkfront_wait_slot(dev);
552 i = dev->ring.req_prod_pvt;
553 req = RING_GET_REQUEST(&dev->ring, i);
554 req->operation = op;
555 req->nr_segments = 0;
556 req->handle = dev->handle;
557 req->id = id;
558 req->sector_number = 0;
559 dev->ring.req_prod_pvt = i + 1;
560 wmb();
561 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify);
562 if (notify)
563 notify_remote_via_evtchn(dev->evtchn);
564}
565
566static void blkfront_sync(struct blkfront_dev *dev)
567{
568 if (dev->info.mode == O_RDWR) {
569 if (dev->info.barrier == 1)
570 blkfront_push_operation(dev,
571 BLKIF_OP_WRITE_BARRIER, 0);
572
573 if (dev->info.flush == 1)
574 blkfront_push_operation(dev,
575 BLKIF_OP_FLUSH_DISKCACHE, 0);
576 }
577
578 while (true) {
579 blkfront_aio_poll(dev);
580 if (RING_FREE_REQUESTS(&dev->ring) == RING_SIZE(&dev->ring))
581 break;
582 cpu_relax();
583 }
584}
585
586/**
587 * pvblock_iop() - Issue an aio.
588 * @udev: Pvblock device
589 * @blknr: Block number to read from / write to
590 * @blkcnt: Amount of blocks to read / write
591 * @buffer: Memory buffer with data to be read / write
592 * @write: Describes is it read or write operation
593 * 0 - read
594 * 1 - write
595 *
596 * Depending on the operation - reading or writing, data is read / written from the
597 * specified address (@buffer) to the sector (@blknr).
598 */
599static ulong pvblock_iop(struct udevice *udev, lbaint_t blknr,
600 lbaint_t blkcnt, void *buffer, int write)
601{
602 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700603 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300604 struct blkfront_aiocb aiocb;
605 lbaint_t blocks_todo;
606 bool unaligned;
607
608 if (blkcnt == 0)
609 return 0;
610
611 if ((blknr + blkcnt) > desc->lba) {
612 printf(DRV_NAME ": block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
613 blknr + blkcnt, desc->lba);
614 return 0;
615 }
616
617 unaligned = (uintptr_t)buffer & (blk_dev->info.sector_size - 1);
618
619 aiocb.aio_dev = blk_dev;
620 aiocb.aio_offset = blknr * desc->blksz;
621 aiocb.aio_cb = NULL;
622 aiocb.data = NULL;
623 blocks_todo = blkcnt;
624 do {
625 aiocb.aio_buf = unaligned ? blk_dev->bounce_buffer : buffer;
626
627 if (write && unaligned)
628 memcpy(blk_dev->bounce_buffer, buffer, desc->blksz);
629
630 aiocb.aio_nbytes = unaligned ? desc->blksz :
631 min((size_t)(BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE),
632 (size_t)(blocks_todo * desc->blksz));
633
634 blkfront_io(&aiocb, write);
635
636 if (!write && unaligned)
637 memcpy(buffer, blk_dev->bounce_buffer, desc->blksz);
638
639 aiocb.aio_offset += aiocb.aio_nbytes;
640 buffer += aiocb.aio_nbytes;
641 blocks_todo -= aiocb.aio_nbytes / desc->blksz;
642 } while (blocks_todo > 0);
643
644 return blkcnt;
645}
646
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300647ulong pvblock_blk_read(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
648 void *buffer)
649{
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300650 return pvblock_iop(udev, blknr, blkcnt, buffer, 0);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300651}
652
653ulong pvblock_blk_write(struct udevice *udev, lbaint_t blknr, lbaint_t blkcnt,
654 const void *buffer)
655{
Anastasiia Lukianenko3a739cc2020-08-06 12:42:58 +0300656 return pvblock_iop(udev, blknr, blkcnt, (void *)buffer, 1);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300657}
658
659static int pvblock_blk_bind(struct udevice *udev)
660{
Simon Glasscaa4daa2020-12-03 16:55:18 -0700661 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300662 int devnum;
663
664 desc->if_type = IF_TYPE_PVBLOCK;
665 /*
666 * Initialize the devnum to -ENODEV. This is to make sure that
667 * blk_next_free_devnum() works as expected, since the default
668 * value 0 is a valid devnum.
669 */
670 desc->devnum = -ENODEV;
671 devnum = blk_next_free_devnum(IF_TYPE_PVBLOCK);
672 if (devnum < 0)
673 return devnum;
674 desc->devnum = devnum;
675 desc->part_type = PART_TYPE_UNKNOWN;
676 desc->bdev = udev;
677
678 strncpy(desc->vendor, "Xen", sizeof(desc->vendor));
679 strncpy(desc->revision, "1", sizeof(desc->revision));
680 strncpy(desc->product, "Virtual disk", sizeof(desc->product));
681
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300682 return 0;
683}
684
685static int pvblock_blk_probe(struct udevice *udev)
686{
687 struct blkfront_dev *blk_dev = dev_get_priv(udev);
Simon Glass8a8d24b2020-12-03 16:55:23 -0700688 struct blkfront_plat *plat = dev_get_plat(udev);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700689 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300690 int ret, devid;
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300691
Simon Glasscaa4daa2020-12-03 16:55:18 -0700692 devid = plat->devid;
693 free(plat);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300694
695 ret = init_blkfront(devid, blk_dev);
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300696 if (ret < 0)
697 return ret;
Anastasiia Lukianenko17c96f82020-08-06 12:42:57 +0300698
699 desc->blksz = blk_dev->info.sector_size;
700 desc->lba = blk_dev->info.sectors;
701 desc->log2blksz = LOG2(blk_dev->info.sector_size);
702
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300703 return 0;
704}
705
706static int pvblock_blk_remove(struct udevice *udev)
707{
708 struct blkfront_dev *blk_dev = dev_get_priv(udev);
709
710 shutdown_blkfront(blk_dev);
711 return 0;
712}
713
714static const struct blk_ops pvblock_blk_ops = {
715 .read = pvblock_blk_read,
716 .write = pvblock_blk_write,
717};
718
719U_BOOT_DRIVER(pvblock_blk) = {
720 .name = DRV_NAME_BLK,
721 .id = UCLASS_BLK,
722 .ops = &pvblock_blk_ops,
723 .bind = pvblock_blk_bind,
724 .probe = pvblock_blk_probe,
725 .remove = pvblock_blk_remove,
Simon Glass41575d82020-12-03 16:55:17 -0700726 .priv_auto = sizeof(struct blkfront_dev),
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300727 .flags = DM_FLAG_OS_PREPARE,
728};
729
730/*******************************************************************************
731 * Para-virtual block device class
732 *******************************************************************************/
733
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300734typedef int (*enum_vbd_callback)(struct udevice *parent, unsigned int devid);
735
736static int on_new_vbd(struct udevice *parent, unsigned int devid)
737{
738 struct driver_info info;
739 struct udevice *udev;
Simon Glass8a8d24b2020-12-03 16:55:23 -0700740 struct blkfront_plat *plat;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300741 int ret;
742
743 debug("New " DRV_NAME_BLK ", device ID %d\n", devid);
744
Simon Glass8a8d24b2020-12-03 16:55:23 -0700745 plat = malloc(sizeof(struct blkfront_plat));
Simon Glasscaa4daa2020-12-03 16:55:18 -0700746 if (!plat) {
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300747 printf("Failed to allocate platform data\n");
748 return -ENOMEM;
749 }
750
Simon Glasscaa4daa2020-12-03 16:55:18 -0700751 plat->devid = devid;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300752
753 info.name = DRV_NAME_BLK;
Simon Glasscaa4daa2020-12-03 16:55:18 -0700754 info.plat = plat;
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300755
756 ret = device_bind_by_name(parent, false, &info, &udev);
757 if (ret < 0) {
758 printf("Failed to bind " DRV_NAME_BLK " to device with ID %d, ret: %d\n",
759 devid, ret);
Simon Glasscaa4daa2020-12-03 16:55:18 -0700760 free(plat);
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300761 }
762 return ret;
763}
764
765static int xenbus_enumerate_vbd(struct udevice *udev, enum_vbd_callback clb)
766{
767 char **dirs, *msg;
768 int i, ret;
769
770 msg = xenbus_ls(XBT_NIL, "device/vbd", &dirs);
771 if (msg) {
772 printf("Failed to read device/vbd directory: %s\n", msg);
773 free(msg);
774 return -ENODEV;
775 }
776
777 for (i = 0; dirs[i]; i++) {
778 int devid;
779
780 sscanf(dirs[i], "%d", &devid);
781 ret = clb(udev, devid);
782 if (ret < 0)
783 goto fail;
784
785 free(dirs[i]);
786 }
787 ret = 0;
788
789fail:
790 for (; dirs[i]; i++)
791 free(dirs[i]);
792 free(dirs);
793 return ret;
794}
795
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300796static void print_pvblock_devices(void)
797{
798 struct udevice *udev;
799 bool first = true;
800 const char *class_name;
801
802 class_name = uclass_get_name(UCLASS_PVBLOCK);
803 for (blk_first_device(IF_TYPE_PVBLOCK, &udev); udev;
804 blk_next_device(&udev), first = false) {
Simon Glasscaa4daa2020-12-03 16:55:18 -0700805 struct blk_desc *desc = dev_get_uclass_plat(udev);
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300806
807 if (!first)
808 puts(", ");
809 printf("%s: %d", class_name, desc->devnum);
810 }
811 printf("\n");
812}
813
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300814void pvblock_init(void)
815{
816 struct driver_info info;
817 struct udevice *udev;
818 struct uclass *uc;
819 int ret;
820
821 /*
822 * At this point Xen drivers have already initialized,
823 * so we can instantiate the class driver and enumerate
824 * virtual block devices.
825 */
826 info.name = DRV_NAME;
827 ret = device_bind_by_name(gd->dm_root, false, &info, &udev);
828 if (ret < 0)
829 printf("Failed to bind " DRV_NAME ", ret: %d\n", ret);
830
831 /* Bootstrap virtual block devices class driver */
832 ret = uclass_get(UCLASS_PVBLOCK, &uc);
833 if (ret)
834 return;
835 uclass_foreach_dev_probe(UCLASS_PVBLOCK, udev);
Anastasiia Lukianenko53d725c2020-08-06 12:42:59 +0300836
837 print_pvblock_devices();
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300838}
839
840static int pvblock_probe(struct udevice *udev)
841{
Anastasiia Lukianenkoa9993132020-08-06 12:42:56 +0300842 struct uclass *uc;
843 int ret;
844
845 if (xenbus_enumerate_vbd(udev, on_new_vbd) < 0)
846 return -ENODEV;
847
848 ret = uclass_get(UCLASS_BLK, &uc);
849 if (ret)
850 return ret;
851 uclass_foreach_dev_probe(UCLASS_BLK, udev) {
852 if (_ret)
853 return _ret;
854 };
Anastasiia Lukianenko722bc5b2020-08-06 12:42:55 +0300855 return 0;
856}
857
858U_BOOT_DRIVER(pvblock_drv) = {
859 .name = DRV_NAME,
860 .id = UCLASS_PVBLOCK,
861 .probe = pvblock_probe,
862};
863
864UCLASS_DRIVER(pvblock) = {
865 .name = DRV_NAME,
866 .id = UCLASS_PVBLOCK,
867};