blob: b951ea3257b19af9cbfefcd13a795bf986bfda65 [file] [log] [blame]
Bin Mengfdc4aca2018-10-15 02:21:02 -07001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
5 *
6 * VirtIO memory-maped I/O transport driver
7 * Ported from Linux drivers/virtio/virtio_mmio.c
8 */
9
10#include <common.h>
11#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Bin Mengfdc4aca2018-10-15 02:21:02 -070013#include <virtio_types.h>
14#include <virtio.h>
15#include <virtio_ring.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060016#include <linux/bug.h>
Bin Mengfdc4aca2018-10-15 02:21:02 -070017#include <linux/compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070018#include <linux/err.h>
Bin Mengfdc4aca2018-10-15 02:21:02 -070019#include <linux/io.h>
20#include "virtio_mmio.h"
21
22static int virtio_mmio_get_config(struct udevice *udev, unsigned int offset,
23 void *buf, unsigned int len)
24{
25 struct virtio_mmio_priv *priv = dev_get_priv(udev);
26 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
27 u8 b;
28 __le16 w;
29 __le32 l;
30
31 if (priv->version == 1) {
32 u8 *ptr = buf;
33 int i;
34
35 for (i = 0; i < len; i++)
36 ptr[i] = readb(base + offset + i);
37
38 return 0;
39 }
40
41 switch (len) {
42 case 1:
43 b = readb(base + offset);
44 memcpy(buf, &b, sizeof(b));
45 break;
46 case 2:
47 w = cpu_to_le16(readw(base + offset));
48 memcpy(buf, &w, sizeof(w));
49 break;
50 case 4:
51 l = cpu_to_le32(readl(base + offset));
52 memcpy(buf, &l, sizeof(l));
53 break;
54 case 8:
55 l = cpu_to_le32(readl(base + offset));
56 memcpy(buf, &l, sizeof(l));
57 l = cpu_to_le32(readl(base + offset + sizeof(l)));
58 memcpy(buf + sizeof(l), &l, sizeof(l));
59 break;
60 default:
61 WARN_ON(true);
62 }
63
64 return 0;
65}
66
67static int virtio_mmio_set_config(struct udevice *udev, unsigned int offset,
68 const void *buf, unsigned int len)
69{
70 struct virtio_mmio_priv *priv = dev_get_priv(udev);
71 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
72 u8 b;
73 __le16 w;
74 __le32 l;
75
76 if (priv->version == 1) {
77 const u8 *ptr = buf;
78 int i;
79
80 for (i = 0; i < len; i++)
81 writeb(ptr[i], base + offset + i);
82
83 return 0;
84 }
85
86 switch (len) {
87 case 1:
88 memcpy(&b, buf, sizeof(b));
89 writeb(b, base + offset);
90 break;
91 case 2:
92 memcpy(&w, buf, sizeof(w));
93 writew(le16_to_cpu(w), base + offset);
94 break;
95 case 4:
96 memcpy(&l, buf, sizeof(l));
97 writel(le32_to_cpu(l), base + offset);
98 break;
99 case 8:
100 memcpy(&l, buf, sizeof(l));
101 writel(le32_to_cpu(l), base + offset);
102 memcpy(&l, buf + sizeof(l), sizeof(l));
103 writel(le32_to_cpu(l), base + offset + sizeof(l));
104 break;
105 default:
106 WARN_ON(true);
107 }
108
109 return 0;
110}
111
112static int virtio_mmio_generation(struct udevice *udev, u32 *counter)
113{
114 struct virtio_mmio_priv *priv = dev_get_priv(udev);
115
116 if (priv->version == 1)
117 *counter = 0;
118 else
119 *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
120
121 return 0;
122}
123
124static int virtio_mmio_get_status(struct udevice *udev, u8 *status)
125{
126 struct virtio_mmio_priv *priv = dev_get_priv(udev);
127
128 *status = readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
129
130 return 0;
131}
132
133static int virtio_mmio_set_status(struct udevice *udev, u8 status)
134{
135 struct virtio_mmio_priv *priv = dev_get_priv(udev);
136
137 /* We should never be setting status to 0 */
138 WARN_ON(status == 0);
139
140 writel(status, priv->base + VIRTIO_MMIO_STATUS);
141
142 return 0;
143}
144
145static int virtio_mmio_reset(struct udevice *udev)
146{
147 struct virtio_mmio_priv *priv = dev_get_priv(udev);
148
149 /* 0 status means a reset */
150 writel(0, priv->base + VIRTIO_MMIO_STATUS);
151
152 return 0;
153}
154
155static int virtio_mmio_get_features(struct udevice *udev, u64 *features)
156{
157 struct virtio_mmio_priv *priv = dev_get_priv(udev);
158
159 writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
160 *features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
161 *features <<= 32;
162
163 writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
164 *features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
165
166 return 0;
167}
168
169static int virtio_mmio_set_features(struct udevice *udev)
170{
171 struct virtio_mmio_priv *priv = dev_get_priv(udev);
172 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
173
174 /* Make sure there is are no mixed devices */
175 if (priv->version == 2 && uc_priv->legacy) {
176 debug("New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
177 return -EINVAL;
178 }
179
180 writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
181 writel((u32)(uc_priv->features >> 32),
182 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
183
184 writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
185 writel((u32)uc_priv->features,
186 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
187
188 return 0;
189}
190
191static struct virtqueue *virtio_mmio_setup_vq(struct udevice *udev,
192 unsigned int index)
193{
194 struct virtio_mmio_priv *priv = dev_get_priv(udev);
195 struct virtqueue *vq;
196 unsigned int num;
197 int err;
198
199 /* Select the queue we're interested in */
200 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
201
202 /* Queue shouldn't already be set up */
203 if (readl(priv->base + (priv->version == 1 ?
204 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
205 err = -ENOENT;
206 goto error_available;
207 }
208
209 num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
210 if (num == 0) {
211 err = -ENOENT;
212 goto error_new_virtqueue;
213 }
214
215 /* Create the vring */
216 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, udev);
217 if (!vq) {
218 err = -ENOMEM;
219 goto error_new_virtqueue;
220 }
221
222 /* Activate the queue */
223 writel(virtqueue_get_vring_size(vq),
224 priv->base + VIRTIO_MMIO_QUEUE_NUM);
225 if (priv->version == 1) {
226 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
227
228 /*
229 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
230 * that doesn't fit in 32bit, fail the setup rather than
231 * pretending to be successful.
232 */
233 if (q_pfn >> 32) {
234 debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
235 0x1ULL << (32 + PAGE_SHIFT - 30));
236 err = -E2BIG;
237 goto error_bad_pfn;
238 }
239
240 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
241 writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
242 } else {
243 u64 addr;
244
245 addr = virtqueue_get_desc_addr(vq);
246 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
247 writel((u32)(addr >> 32),
248 priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
249
250 addr = virtqueue_get_avail_addr(vq);
251 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
252 writel((u32)(addr >> 32),
253 priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
254
255 addr = virtqueue_get_used_addr(vq);
256 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
257 writel((u32)(addr >> 32),
258 priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
259
260 writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
261 }
262
263 return vq;
264
265error_bad_pfn:
266 vring_del_virtqueue(vq);
267
268error_new_virtqueue:
269 if (priv->version == 1) {
270 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
271 } else {
272 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
273 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
274 }
275
276error_available:
277 return ERR_PTR(err);
278}
279
280static void virtio_mmio_del_vq(struct virtqueue *vq)
281{
282 struct virtio_mmio_priv *priv = dev_get_priv(vq->vdev);
283 unsigned int index = vq->index;
284
285 /* Select and deactivate the queue */
286 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
287 if (priv->version == 1) {
288 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
289 } else {
290 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
291 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
292 }
293
294 vring_del_virtqueue(vq);
295}
296
297static int virtio_mmio_del_vqs(struct udevice *udev)
298{
299 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
300 struct virtqueue *vq, *n;
301
302 list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
303 virtio_mmio_del_vq(vq);
304
305 return 0;
306}
307
308static int virtio_mmio_find_vqs(struct udevice *udev, unsigned int nvqs,
309 struct virtqueue *vqs[])
310{
311 int i;
312
313 for (i = 0; i < nvqs; ++i) {
314 vqs[i] = virtio_mmio_setup_vq(udev, i);
315 if (IS_ERR(vqs[i])) {
316 virtio_mmio_del_vqs(udev);
317 return PTR_ERR(vqs[i]);
318 }
319 }
320
321 return 0;
322}
323
324static int virtio_mmio_notify(struct udevice *udev, struct virtqueue *vq)
325{
326 struct virtio_mmio_priv *priv = dev_get_priv(udev);
327
328 /*
329 * We write the queue's selector into the notification register
330 * to signal the other end
331 */
332 writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
333
334 return 0;
335}
336
337static int virtio_mmio_ofdata_to_platdata(struct udevice *udev)
338{
339 struct virtio_mmio_priv *priv = dev_get_priv(udev);
340
341 priv->base = (void __iomem *)(ulong)dev_read_addr(udev);
342 if (priv->base == (void __iomem *)FDT_ADDR_T_NONE)
343 return -EINVAL;
344
345 return 0;
346}
347
348static int virtio_mmio_probe(struct udevice *udev)
349{
350 struct virtio_mmio_priv *priv = dev_get_priv(udev);
351 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
352 u32 magic;
353
354 /* Check magic value */
355 magic = readl(priv->base + VIRTIO_MMIO_MAGIC_VALUE);
356 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
357 debug("(%s): wrong magic value 0x%08x!\n", udev->name, magic);
358 return 0;
359 }
360
361 /* Check device version */
362 priv->version = readl(priv->base + VIRTIO_MMIO_VERSION);
363 if (priv->version < 1 || priv->version > 2) {
364 debug("(%s): version %d not supported!\n",
365 udev->name, priv->version);
366 return 0;
367 }
368
Heinrich Schuchardt27a38a62019-12-24 12:21:09 +0100369 /* Check device ID */
Bin Mengfdc4aca2018-10-15 02:21:02 -0700370 uc_priv->device = readl(priv->base + VIRTIO_MMIO_DEVICE_ID);
371 if (uc_priv->device == 0) {
372 /*
373 * virtio-mmio device with an ID 0 is a (dummy) placeholder
374 * with no function. End probing now with no error reported.
375 */
376 return 0;
377 }
378 uc_priv->vendor = readl(priv->base + VIRTIO_MMIO_VENDOR_ID);
379
380 if (priv->version == 1)
381 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
382
383 debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
384 uc_priv->device, uc_priv->vendor, priv->version);
385
386 return 0;
387}
388
389static const struct dm_virtio_ops virtio_mmio_ops = {
390 .get_config = virtio_mmio_get_config,
391 .set_config = virtio_mmio_set_config,
392 .generation = virtio_mmio_generation,
393 .get_status = virtio_mmio_get_status,
394 .set_status = virtio_mmio_set_status,
395 .reset = virtio_mmio_reset,
396 .get_features = virtio_mmio_get_features,
397 .set_features = virtio_mmio_set_features,
398 .find_vqs = virtio_mmio_find_vqs,
399 .del_vqs = virtio_mmio_del_vqs,
400 .notify = virtio_mmio_notify,
401};
402
403static const struct udevice_id virtio_mmio_ids[] = {
404 { .compatible = "virtio,mmio" },
405 { }
406};
407
408U_BOOT_DRIVER(virtio_mmio) = {
409 .name = "virtio-mmio",
410 .id = UCLASS_VIRTIO,
411 .of_match = virtio_mmio_ids,
412 .ops = &virtio_mmio_ops,
413 .probe = virtio_mmio_probe,
414 .ofdata_to_platdata = virtio_mmio_ofdata_to_platdata,
415 .priv_auto_alloc_size = sizeof(struct virtio_mmio_priv),
416};