blob: 7b738703b827b10a09bdd72afa23b9605837b369 [file] [log] [blame]
Bin Mengfdc4aca2018-10-15 02:21:02 -07001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
5 *
6 * VirtIO memory-maped I/O transport driver
7 * Ported from Linux drivers/virtio/virtio_mmio.c
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <virtio_types.h>
13#include <virtio.h>
14#include <virtio_ring.h>
15#include <linux/compat.h>
16#include <linux/io.h>
17#include "virtio_mmio.h"
18
19static int virtio_mmio_get_config(struct udevice *udev, unsigned int offset,
20 void *buf, unsigned int len)
21{
22 struct virtio_mmio_priv *priv = dev_get_priv(udev);
23 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
24 u8 b;
25 __le16 w;
26 __le32 l;
27
28 if (priv->version == 1) {
29 u8 *ptr = buf;
30 int i;
31
32 for (i = 0; i < len; i++)
33 ptr[i] = readb(base + offset + i);
34
35 return 0;
36 }
37
38 switch (len) {
39 case 1:
40 b = readb(base + offset);
41 memcpy(buf, &b, sizeof(b));
42 break;
43 case 2:
44 w = cpu_to_le16(readw(base + offset));
45 memcpy(buf, &w, sizeof(w));
46 break;
47 case 4:
48 l = cpu_to_le32(readl(base + offset));
49 memcpy(buf, &l, sizeof(l));
50 break;
51 case 8:
52 l = cpu_to_le32(readl(base + offset));
53 memcpy(buf, &l, sizeof(l));
54 l = cpu_to_le32(readl(base + offset + sizeof(l)));
55 memcpy(buf + sizeof(l), &l, sizeof(l));
56 break;
57 default:
58 WARN_ON(true);
59 }
60
61 return 0;
62}
63
64static int virtio_mmio_set_config(struct udevice *udev, unsigned int offset,
65 const void *buf, unsigned int len)
66{
67 struct virtio_mmio_priv *priv = dev_get_priv(udev);
68 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
69 u8 b;
70 __le16 w;
71 __le32 l;
72
73 if (priv->version == 1) {
74 const u8 *ptr = buf;
75 int i;
76
77 for (i = 0; i < len; i++)
78 writeb(ptr[i], base + offset + i);
79
80 return 0;
81 }
82
83 switch (len) {
84 case 1:
85 memcpy(&b, buf, sizeof(b));
86 writeb(b, base + offset);
87 break;
88 case 2:
89 memcpy(&w, buf, sizeof(w));
90 writew(le16_to_cpu(w), base + offset);
91 break;
92 case 4:
93 memcpy(&l, buf, sizeof(l));
94 writel(le32_to_cpu(l), base + offset);
95 break;
96 case 8:
97 memcpy(&l, buf, sizeof(l));
98 writel(le32_to_cpu(l), base + offset);
99 memcpy(&l, buf + sizeof(l), sizeof(l));
100 writel(le32_to_cpu(l), base + offset + sizeof(l));
101 break;
102 default:
103 WARN_ON(true);
104 }
105
106 return 0;
107}
108
109static int virtio_mmio_generation(struct udevice *udev, u32 *counter)
110{
111 struct virtio_mmio_priv *priv = dev_get_priv(udev);
112
113 if (priv->version == 1)
114 *counter = 0;
115 else
116 *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
117
118 return 0;
119}
120
121static int virtio_mmio_get_status(struct udevice *udev, u8 *status)
122{
123 struct virtio_mmio_priv *priv = dev_get_priv(udev);
124
125 *status = readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
126
127 return 0;
128}
129
130static int virtio_mmio_set_status(struct udevice *udev, u8 status)
131{
132 struct virtio_mmio_priv *priv = dev_get_priv(udev);
133
134 /* We should never be setting status to 0 */
135 WARN_ON(status == 0);
136
137 writel(status, priv->base + VIRTIO_MMIO_STATUS);
138
139 return 0;
140}
141
142static int virtio_mmio_reset(struct udevice *udev)
143{
144 struct virtio_mmio_priv *priv = dev_get_priv(udev);
145
146 /* 0 status means a reset */
147 writel(0, priv->base + VIRTIO_MMIO_STATUS);
148
149 return 0;
150}
151
152static int virtio_mmio_get_features(struct udevice *udev, u64 *features)
153{
154 struct virtio_mmio_priv *priv = dev_get_priv(udev);
155
156 writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
157 *features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
158 *features <<= 32;
159
160 writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
161 *features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
162
163 return 0;
164}
165
166static int virtio_mmio_set_features(struct udevice *udev)
167{
168 struct virtio_mmio_priv *priv = dev_get_priv(udev);
169 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
170
171 /* Make sure there is are no mixed devices */
172 if (priv->version == 2 && uc_priv->legacy) {
173 debug("New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
174 return -EINVAL;
175 }
176
177 writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
178 writel((u32)(uc_priv->features >> 32),
179 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
180
181 writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
182 writel((u32)uc_priv->features,
183 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
184
185 return 0;
186}
187
188static struct virtqueue *virtio_mmio_setup_vq(struct udevice *udev,
189 unsigned int index)
190{
191 struct virtio_mmio_priv *priv = dev_get_priv(udev);
192 struct virtqueue *vq;
193 unsigned int num;
194 int err;
195
196 /* Select the queue we're interested in */
197 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
198
199 /* Queue shouldn't already be set up */
200 if (readl(priv->base + (priv->version == 1 ?
201 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
202 err = -ENOENT;
203 goto error_available;
204 }
205
206 num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
207 if (num == 0) {
208 err = -ENOENT;
209 goto error_new_virtqueue;
210 }
211
212 /* Create the vring */
213 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, udev);
214 if (!vq) {
215 err = -ENOMEM;
216 goto error_new_virtqueue;
217 }
218
219 /* Activate the queue */
220 writel(virtqueue_get_vring_size(vq),
221 priv->base + VIRTIO_MMIO_QUEUE_NUM);
222 if (priv->version == 1) {
223 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
224
225 /*
226 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
227 * that doesn't fit in 32bit, fail the setup rather than
228 * pretending to be successful.
229 */
230 if (q_pfn >> 32) {
231 debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
232 0x1ULL << (32 + PAGE_SHIFT - 30));
233 err = -E2BIG;
234 goto error_bad_pfn;
235 }
236
237 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
238 writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
239 } else {
240 u64 addr;
241
242 addr = virtqueue_get_desc_addr(vq);
243 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
244 writel((u32)(addr >> 32),
245 priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
246
247 addr = virtqueue_get_avail_addr(vq);
248 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
249 writel((u32)(addr >> 32),
250 priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
251
252 addr = virtqueue_get_used_addr(vq);
253 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
254 writel((u32)(addr >> 32),
255 priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
256
257 writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
258 }
259
260 return vq;
261
262error_bad_pfn:
263 vring_del_virtqueue(vq);
264
265error_new_virtqueue:
266 if (priv->version == 1) {
267 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
268 } else {
269 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
270 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
271 }
272
273error_available:
274 return ERR_PTR(err);
275}
276
277static void virtio_mmio_del_vq(struct virtqueue *vq)
278{
279 struct virtio_mmio_priv *priv = dev_get_priv(vq->vdev);
280 unsigned int index = vq->index;
281
282 /* Select and deactivate the queue */
283 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
284 if (priv->version == 1) {
285 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
286 } else {
287 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
288 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
289 }
290
291 vring_del_virtqueue(vq);
292}
293
294static int virtio_mmio_del_vqs(struct udevice *udev)
295{
296 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
297 struct virtqueue *vq, *n;
298
299 list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
300 virtio_mmio_del_vq(vq);
301
302 return 0;
303}
304
305static int virtio_mmio_find_vqs(struct udevice *udev, unsigned int nvqs,
306 struct virtqueue *vqs[])
307{
308 int i;
309
310 for (i = 0; i < nvqs; ++i) {
311 vqs[i] = virtio_mmio_setup_vq(udev, i);
312 if (IS_ERR(vqs[i])) {
313 virtio_mmio_del_vqs(udev);
314 return PTR_ERR(vqs[i]);
315 }
316 }
317
318 return 0;
319}
320
321static int virtio_mmio_notify(struct udevice *udev, struct virtqueue *vq)
322{
323 struct virtio_mmio_priv *priv = dev_get_priv(udev);
324
325 /*
326 * We write the queue's selector into the notification register
327 * to signal the other end
328 */
329 writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
330
331 return 0;
332}
333
334static int virtio_mmio_ofdata_to_platdata(struct udevice *udev)
335{
336 struct virtio_mmio_priv *priv = dev_get_priv(udev);
337
338 priv->base = (void __iomem *)(ulong)dev_read_addr(udev);
339 if (priv->base == (void __iomem *)FDT_ADDR_T_NONE)
340 return -EINVAL;
341
342 return 0;
343}
344
345static int virtio_mmio_probe(struct udevice *udev)
346{
347 struct virtio_mmio_priv *priv = dev_get_priv(udev);
348 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
349 u32 magic;
350
351 /* Check magic value */
352 magic = readl(priv->base + VIRTIO_MMIO_MAGIC_VALUE);
353 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
354 debug("(%s): wrong magic value 0x%08x!\n", udev->name, magic);
355 return 0;
356 }
357
358 /* Check device version */
359 priv->version = readl(priv->base + VIRTIO_MMIO_VERSION);
360 if (priv->version < 1 || priv->version > 2) {
361 debug("(%s): version %d not supported!\n",
362 udev->name, priv->version);
363 return 0;
364 }
365
366 /* Check devicd ID */
367 uc_priv->device = readl(priv->base + VIRTIO_MMIO_DEVICE_ID);
368 if (uc_priv->device == 0) {
369 /*
370 * virtio-mmio device with an ID 0 is a (dummy) placeholder
371 * with no function. End probing now with no error reported.
372 */
373 return 0;
374 }
375 uc_priv->vendor = readl(priv->base + VIRTIO_MMIO_VENDOR_ID);
376
377 if (priv->version == 1)
378 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
379
380 debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
381 uc_priv->device, uc_priv->vendor, priv->version);
382
383 return 0;
384}
385
386static const struct dm_virtio_ops virtio_mmio_ops = {
387 .get_config = virtio_mmio_get_config,
388 .set_config = virtio_mmio_set_config,
389 .generation = virtio_mmio_generation,
390 .get_status = virtio_mmio_get_status,
391 .set_status = virtio_mmio_set_status,
392 .reset = virtio_mmio_reset,
393 .get_features = virtio_mmio_get_features,
394 .set_features = virtio_mmio_set_features,
395 .find_vqs = virtio_mmio_find_vqs,
396 .del_vqs = virtio_mmio_del_vqs,
397 .notify = virtio_mmio_notify,
398};
399
400static const struct udevice_id virtio_mmio_ids[] = {
401 { .compatible = "virtio,mmio" },
402 { }
403};
404
405U_BOOT_DRIVER(virtio_mmio) = {
406 .name = "virtio-mmio",
407 .id = UCLASS_VIRTIO,
408 .of_match = virtio_mmio_ids,
409 .ops = &virtio_mmio_ops,
410 .probe = virtio_mmio_probe,
411 .ofdata_to_platdata = virtio_mmio_ofdata_to_platdata,
412 .priv_auto_alloc_size = sizeof(struct virtio_mmio_priv),
413};