blob: 60ece133abe51eeb9bce6c03c95c682a46e678c9 [file] [log] [blame]
Bin Mengfdc4aca2018-10-15 02:21:02 -07001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com>
5 *
6 * VirtIO memory-maped I/O transport driver
7 * Ported from Linux drivers/virtio/virtio_mmio.c
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <virtio_types.h>
13#include <virtio.h>
14#include <virtio_ring.h>
15#include <linux/compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070016#include <linux/err.h>
Bin Mengfdc4aca2018-10-15 02:21:02 -070017#include <linux/io.h>
18#include "virtio_mmio.h"
19
20static int virtio_mmio_get_config(struct udevice *udev, unsigned int offset,
21 void *buf, unsigned int len)
22{
23 struct virtio_mmio_priv *priv = dev_get_priv(udev);
24 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
25 u8 b;
26 __le16 w;
27 __le32 l;
28
29 if (priv->version == 1) {
30 u8 *ptr = buf;
31 int i;
32
33 for (i = 0; i < len; i++)
34 ptr[i] = readb(base + offset + i);
35
36 return 0;
37 }
38
39 switch (len) {
40 case 1:
41 b = readb(base + offset);
42 memcpy(buf, &b, sizeof(b));
43 break;
44 case 2:
45 w = cpu_to_le16(readw(base + offset));
46 memcpy(buf, &w, sizeof(w));
47 break;
48 case 4:
49 l = cpu_to_le32(readl(base + offset));
50 memcpy(buf, &l, sizeof(l));
51 break;
52 case 8:
53 l = cpu_to_le32(readl(base + offset));
54 memcpy(buf, &l, sizeof(l));
55 l = cpu_to_le32(readl(base + offset + sizeof(l)));
56 memcpy(buf + sizeof(l), &l, sizeof(l));
57 break;
58 default:
59 WARN_ON(true);
60 }
61
62 return 0;
63}
64
65static int virtio_mmio_set_config(struct udevice *udev, unsigned int offset,
66 const void *buf, unsigned int len)
67{
68 struct virtio_mmio_priv *priv = dev_get_priv(udev);
69 void __iomem *base = priv->base + VIRTIO_MMIO_CONFIG;
70 u8 b;
71 __le16 w;
72 __le32 l;
73
74 if (priv->version == 1) {
75 const u8 *ptr = buf;
76 int i;
77
78 for (i = 0; i < len; i++)
79 writeb(ptr[i], base + offset + i);
80
81 return 0;
82 }
83
84 switch (len) {
85 case 1:
86 memcpy(&b, buf, sizeof(b));
87 writeb(b, base + offset);
88 break;
89 case 2:
90 memcpy(&w, buf, sizeof(w));
91 writew(le16_to_cpu(w), base + offset);
92 break;
93 case 4:
94 memcpy(&l, buf, sizeof(l));
95 writel(le32_to_cpu(l), base + offset);
96 break;
97 case 8:
98 memcpy(&l, buf, sizeof(l));
99 writel(le32_to_cpu(l), base + offset);
100 memcpy(&l, buf + sizeof(l), sizeof(l));
101 writel(le32_to_cpu(l), base + offset + sizeof(l));
102 break;
103 default:
104 WARN_ON(true);
105 }
106
107 return 0;
108}
109
110static int virtio_mmio_generation(struct udevice *udev, u32 *counter)
111{
112 struct virtio_mmio_priv *priv = dev_get_priv(udev);
113
114 if (priv->version == 1)
115 *counter = 0;
116 else
117 *counter = readl(priv->base + VIRTIO_MMIO_CONFIG_GENERATION);
118
119 return 0;
120}
121
122static int virtio_mmio_get_status(struct udevice *udev, u8 *status)
123{
124 struct virtio_mmio_priv *priv = dev_get_priv(udev);
125
126 *status = readl(priv->base + VIRTIO_MMIO_STATUS) & 0xff;
127
128 return 0;
129}
130
131static int virtio_mmio_set_status(struct udevice *udev, u8 status)
132{
133 struct virtio_mmio_priv *priv = dev_get_priv(udev);
134
135 /* We should never be setting status to 0 */
136 WARN_ON(status == 0);
137
138 writel(status, priv->base + VIRTIO_MMIO_STATUS);
139
140 return 0;
141}
142
143static int virtio_mmio_reset(struct udevice *udev)
144{
145 struct virtio_mmio_priv *priv = dev_get_priv(udev);
146
147 /* 0 status means a reset */
148 writel(0, priv->base + VIRTIO_MMIO_STATUS);
149
150 return 0;
151}
152
153static int virtio_mmio_get_features(struct udevice *udev, u64 *features)
154{
155 struct virtio_mmio_priv *priv = dev_get_priv(udev);
156
157 writel(1, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
158 *features = readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
159 *features <<= 32;
160
161 writel(0, priv->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
162 *features |= readl(priv->base + VIRTIO_MMIO_DEVICE_FEATURES);
163
164 return 0;
165}
166
167static int virtio_mmio_set_features(struct udevice *udev)
168{
169 struct virtio_mmio_priv *priv = dev_get_priv(udev);
170 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
171
172 /* Make sure there is are no mixed devices */
173 if (priv->version == 2 && uc_priv->legacy) {
174 debug("New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
175 return -EINVAL;
176 }
177
178 writel(1, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
179 writel((u32)(uc_priv->features >> 32),
180 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
181
182 writel(0, priv->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
183 writel((u32)uc_priv->features,
184 priv->base + VIRTIO_MMIO_DRIVER_FEATURES);
185
186 return 0;
187}
188
189static struct virtqueue *virtio_mmio_setup_vq(struct udevice *udev,
190 unsigned int index)
191{
192 struct virtio_mmio_priv *priv = dev_get_priv(udev);
193 struct virtqueue *vq;
194 unsigned int num;
195 int err;
196
197 /* Select the queue we're interested in */
198 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
199
200 /* Queue shouldn't already be set up */
201 if (readl(priv->base + (priv->version == 1 ?
202 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
203 err = -ENOENT;
204 goto error_available;
205 }
206
207 num = readl(priv->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
208 if (num == 0) {
209 err = -ENOENT;
210 goto error_new_virtqueue;
211 }
212
213 /* Create the vring */
214 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, udev);
215 if (!vq) {
216 err = -ENOMEM;
217 goto error_new_virtqueue;
218 }
219
220 /* Activate the queue */
221 writel(virtqueue_get_vring_size(vq),
222 priv->base + VIRTIO_MMIO_QUEUE_NUM);
223 if (priv->version == 1) {
224 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
225
226 /*
227 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
228 * that doesn't fit in 32bit, fail the setup rather than
229 * pretending to be successful.
230 */
231 if (q_pfn >> 32) {
232 debug("platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
233 0x1ULL << (32 + PAGE_SHIFT - 30));
234 err = -E2BIG;
235 goto error_bad_pfn;
236 }
237
238 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_QUEUE_ALIGN);
239 writel(q_pfn, priv->base + VIRTIO_MMIO_QUEUE_PFN);
240 } else {
241 u64 addr;
242
243 addr = virtqueue_get_desc_addr(vq);
244 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
245 writel((u32)(addr >> 32),
246 priv->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
247
248 addr = virtqueue_get_avail_addr(vq);
249 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
250 writel((u32)(addr >> 32),
251 priv->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
252
253 addr = virtqueue_get_used_addr(vq);
254 writel((u32)addr, priv->base + VIRTIO_MMIO_QUEUE_USED_LOW);
255 writel((u32)(addr >> 32),
256 priv->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
257
258 writel(1, priv->base + VIRTIO_MMIO_QUEUE_READY);
259 }
260
261 return vq;
262
263error_bad_pfn:
264 vring_del_virtqueue(vq);
265
266error_new_virtqueue:
267 if (priv->version == 1) {
268 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
269 } else {
270 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
271 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
272 }
273
274error_available:
275 return ERR_PTR(err);
276}
277
278static void virtio_mmio_del_vq(struct virtqueue *vq)
279{
280 struct virtio_mmio_priv *priv = dev_get_priv(vq->vdev);
281 unsigned int index = vq->index;
282
283 /* Select and deactivate the queue */
284 writel(index, priv->base + VIRTIO_MMIO_QUEUE_SEL);
285 if (priv->version == 1) {
286 writel(0, priv->base + VIRTIO_MMIO_QUEUE_PFN);
287 } else {
288 writel(0, priv->base + VIRTIO_MMIO_QUEUE_READY);
289 WARN_ON(readl(priv->base + VIRTIO_MMIO_QUEUE_READY));
290 }
291
292 vring_del_virtqueue(vq);
293}
294
295static int virtio_mmio_del_vqs(struct udevice *udev)
296{
297 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
298 struct virtqueue *vq, *n;
299
300 list_for_each_entry_safe(vq, n, &uc_priv->vqs, list)
301 virtio_mmio_del_vq(vq);
302
303 return 0;
304}
305
306static int virtio_mmio_find_vqs(struct udevice *udev, unsigned int nvqs,
307 struct virtqueue *vqs[])
308{
309 int i;
310
311 for (i = 0; i < nvqs; ++i) {
312 vqs[i] = virtio_mmio_setup_vq(udev, i);
313 if (IS_ERR(vqs[i])) {
314 virtio_mmio_del_vqs(udev);
315 return PTR_ERR(vqs[i]);
316 }
317 }
318
319 return 0;
320}
321
322static int virtio_mmio_notify(struct udevice *udev, struct virtqueue *vq)
323{
324 struct virtio_mmio_priv *priv = dev_get_priv(udev);
325
326 /*
327 * We write the queue's selector into the notification register
328 * to signal the other end
329 */
330 writel(vq->index, priv->base + VIRTIO_MMIO_QUEUE_NOTIFY);
331
332 return 0;
333}
334
335static int virtio_mmio_ofdata_to_platdata(struct udevice *udev)
336{
337 struct virtio_mmio_priv *priv = dev_get_priv(udev);
338
339 priv->base = (void __iomem *)(ulong)dev_read_addr(udev);
340 if (priv->base == (void __iomem *)FDT_ADDR_T_NONE)
341 return -EINVAL;
342
343 return 0;
344}
345
346static int virtio_mmio_probe(struct udevice *udev)
347{
348 struct virtio_mmio_priv *priv = dev_get_priv(udev);
349 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev);
350 u32 magic;
351
352 /* Check magic value */
353 magic = readl(priv->base + VIRTIO_MMIO_MAGIC_VALUE);
354 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
355 debug("(%s): wrong magic value 0x%08x!\n", udev->name, magic);
356 return 0;
357 }
358
359 /* Check device version */
360 priv->version = readl(priv->base + VIRTIO_MMIO_VERSION);
361 if (priv->version < 1 || priv->version > 2) {
362 debug("(%s): version %d not supported!\n",
363 udev->name, priv->version);
364 return 0;
365 }
366
Heinrich Schuchardt27a38a62019-12-24 12:21:09 +0100367 /* Check device ID */
Bin Mengfdc4aca2018-10-15 02:21:02 -0700368 uc_priv->device = readl(priv->base + VIRTIO_MMIO_DEVICE_ID);
369 if (uc_priv->device == 0) {
370 /*
371 * virtio-mmio device with an ID 0 is a (dummy) placeholder
372 * with no function. End probing now with no error reported.
373 */
374 return 0;
375 }
376 uc_priv->vendor = readl(priv->base + VIRTIO_MMIO_VENDOR_ID);
377
378 if (priv->version == 1)
379 writel(PAGE_SIZE, priv->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
380
381 debug("(%s): device (%d) vendor (%08x) version (%d)\n", udev->name,
382 uc_priv->device, uc_priv->vendor, priv->version);
383
384 return 0;
385}
386
387static const struct dm_virtio_ops virtio_mmio_ops = {
388 .get_config = virtio_mmio_get_config,
389 .set_config = virtio_mmio_set_config,
390 .generation = virtio_mmio_generation,
391 .get_status = virtio_mmio_get_status,
392 .set_status = virtio_mmio_set_status,
393 .reset = virtio_mmio_reset,
394 .get_features = virtio_mmio_get_features,
395 .set_features = virtio_mmio_set_features,
396 .find_vqs = virtio_mmio_find_vqs,
397 .del_vqs = virtio_mmio_del_vqs,
398 .notify = virtio_mmio_notify,
399};
400
401static const struct udevice_id virtio_mmio_ids[] = {
402 { .compatible = "virtio,mmio" },
403 { }
404};
405
406U_BOOT_DRIVER(virtio_mmio) = {
407 .name = "virtio-mmio",
408 .id = UCLASS_VIRTIO,
409 .of_match = virtio_mmio_ids,
410 .ops = &virtio_mmio_ops,
411 .probe = virtio_mmio_probe,
412 .ofdata_to_platdata = virtio_mmio_ofdata_to_platdata,
413 .priv_auto_alloc_size = sizeof(struct virtio_mmio_priv),
414};