blob: ece534c3c0e34c8de31cf4489cdd3ef0b971d838 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nishanth Menonddf56bc2015-09-17 15:42:39 -05002/*
3 * (C) Copyright 2015
4 * Texas Instruments Incorporated - http://www.ti.com/
Nishanth Menonddf56bc2015-09-17 15:42:39 -05005 */
Patrick Delaunayb953ec22021-04-27 11:02:19 +02006
7#define LOG_CATEGORY UCLASS_REMOTEPROC
8
Nishanth Menonddf56bc2015-09-17 15:42:39 -05009#define pr_fmt(fmt) "%s: " fmt, __func__
10#include <common.h>
Keerthya03df892022-01-27 13:16:55 +010011#include <elf.h>
Nishanth Menonddf56bc2015-09-17 15:42:39 -050012#include <errno.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060013#include <log.h>
Nishanth Menonddf56bc2015-09-17 15:42:39 -050014#include <malloc.h>
Keerthya03df892022-01-27 13:16:55 +010015#include <virtio_ring.h>
Nishanth Menonddf56bc2015-09-17 15:42:39 -050016#include <remoteproc.h>
17#include <asm/io.h>
18#include <dm/device-internal.h>
19#include <dm.h>
20#include <dm/uclass.h>
21#include <dm/uclass-internal.h>
Keerthya03df892022-01-27 13:16:55 +010022#include <linux/compat.h>
Simon Glass1e94b462023-09-14 18:21:46 -060023#include <linux/printk.h>
Keerthya03df892022-01-27 13:16:55 +010024
25DECLARE_GLOBAL_DATA_PTR;
26
27struct resource_table {
28 u32 ver;
29 u32 num;
30 u32 reserved[2];
31 u32 offset[0];
32} __packed;
33
34typedef int (*handle_resource_t) (struct udevice *, void *, int offset, int avail);
35
36static struct resource_table *rsc_table;
Nishanth Menonddf56bc2015-09-17 15:42:39 -050037
Nishanth Menonddf56bc2015-09-17 15:42:39 -050038/**
39 * for_each_remoteproc_device() - iterate through the list of rproc devices
40 * @fn: check function to call per match, if this function returns fail,
41 * iteration is aborted with the resultant error value
42 * @skip_dev: Device to skip calling the callback about.
43 * @data: Data to pass to the callback function
44 *
45 * Return: 0 if none of the callback returned a non 0 result, else returns the
46 * result from the callback function
47 */
48static int for_each_remoteproc_device(int (*fn) (struct udevice *dev,
49 struct dm_rproc_uclass_pdata *uc_pdata,
50 const void *data),
51 struct udevice *skip_dev,
52 const void *data)
53{
54 struct udevice *dev;
55 struct dm_rproc_uclass_pdata *uc_pdata;
56 int ret;
57
58 for (ret = uclass_find_first_device(UCLASS_REMOTEPROC, &dev); dev;
59 ret = uclass_find_next_device(&dev)) {
60 if (ret || dev == skip_dev)
61 continue;
Simon Glasscaa4daa2020-12-03 16:55:18 -070062 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menonddf56bc2015-09-17 15:42:39 -050063 ret = fn(dev, uc_pdata, data);
64 if (ret)
65 return ret;
66 }
67
68 return 0;
69}
70
71/**
72 * _rproc_name_is_unique() - iteration helper to check if rproc name is unique
73 * @dev: device that we are checking name for
74 * @uc_pdata: uclass platform data
75 * @data: compare data (this is the name we want to ensure is unique)
76 *
77 * Return: 0 is there is no match(is unique); if there is a match(we dont
78 * have a unique name), return -EINVAL.
79 */
80static int _rproc_name_is_unique(struct udevice *dev,
81 struct dm_rproc_uclass_pdata *uc_pdata,
82 const void *data)
83{
84 const char *check_name = data;
85
86 /* devices not yet populated with data - so skip them */
Nishanth Menon9cb05a82015-11-30 22:05:58 -060087 if (!uc_pdata->name || !check_name)
Nishanth Menonddf56bc2015-09-17 15:42:39 -050088 return 0;
89
90 /* Return 0 to search further if we dont match */
91 if (strlen(uc_pdata->name) != strlen(check_name))
92 return 0;
93
94 if (!strcmp(uc_pdata->name, check_name))
95 return -EINVAL;
96
97 return 0;
98}
99
100/**
101 * rproc_name_is_unique() - Check if the rproc name is unique
102 * @check_dev: Device we are attempting to ensure is unique
103 * @check_name: Name we are trying to ensure is unique.
104 *
105 * Return: true if we have a unique name, false if name is not unique.
106 */
107static bool rproc_name_is_unique(struct udevice *check_dev,
108 const char *check_name)
109{
110 int ret;
111
112 ret = for_each_remoteproc_device(_rproc_name_is_unique,
113 check_dev, check_name);
114 return ret ? false : true;
115}
116
117/**
118 * rproc_pre_probe() - Pre probe accessor for the uclass
119 * @dev: device for which we are preprobing
120 *
121 * Parses and fills up the uclass pdata for use as needed by core and
122 * remote proc drivers.
123 *
124 * Return: 0 if all wernt ok, else appropriate error value.
125 */
126static int rproc_pre_probe(struct udevice *dev)
127{
128 struct dm_rproc_uclass_pdata *uc_pdata;
129 const struct dm_rproc_ops *ops;
130
Simon Glasscaa4daa2020-12-03 16:55:18 -0700131 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500132
133 /* See if we need to populate via fdt */
134
Simon Glass0fd3d912020-12-22 19:30:28 -0700135 if (!dev_get_plat(dev)) {
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500136#if CONFIG_IS_ENABLED(OF_CONTROL)
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500137 bool tmp;
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500138 debug("'%s': using fdt\n", dev->name);
Patrick Delaunaye2170c22021-09-20 17:56:06 +0200139 uc_pdata->name = dev_read_string(dev, "remoteproc-name");
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500140
141 /* Default is internal memory mapped */
142 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
Patrick Delaunaye2170c22021-09-20 17:56:06 +0200143 tmp = dev_read_bool(dev, "remoteproc-internal-memory-mapped");
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500144 if (tmp)
145 uc_pdata->mem_type = RPROC_INTERNAL_MEMORY_MAPPED;
146#else
147 /* Nothing much we can do about this, can we? */
148 return -EINVAL;
149#endif
150
151 } else {
Simon Glass0fd3d912020-12-22 19:30:28 -0700152 struct dm_rproc_uclass_pdata *pdata = dev_get_plat(dev);
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500153
154 debug("'%s': using legacy data\n", dev->name);
155 if (pdata->name)
156 uc_pdata->name = pdata->name;
157 uc_pdata->mem_type = pdata->mem_type;
158 uc_pdata->driver_plat_data = pdata->driver_plat_data;
159 }
160
161 /* Else try using device Name */
162 if (!uc_pdata->name)
163 uc_pdata->name = dev->name;
164 if (!uc_pdata->name) {
165 debug("Unnamed device!");
166 return -EINVAL;
167 }
168
169 if (!rproc_name_is_unique(dev, uc_pdata->name)) {
170 debug("%s duplicate name '%s'\n", dev->name, uc_pdata->name);
171 return -EINVAL;
172 }
173
174 ops = rproc_get_ops(dev);
175 if (!ops) {
176 debug("%s driver has no ops?\n", dev->name);
177 return -EINVAL;
178 }
179
180 if (!ops->load || !ops->start) {
181 debug("%s driver has missing mandatory ops?\n", dev->name);
182 return -EINVAL;
183 }
184
185 return 0;
186}
187
188/**
189 * rproc_post_probe() - post probe accessor for the uclass
190 * @dev: deivce we finished probing
191 *
192 * initiate init function after the probe is completed. This allows
193 * the remote processor drivers to split up the initializations between
194 * probe and init as needed.
195 *
196 * Return: if the remote proc driver has a init routine, invokes it and
197 * hands over the return value. overall, 0 if all went well, else appropriate
198 * error value.
199 */
200static int rproc_post_probe(struct udevice *dev)
201{
202 const struct dm_rproc_ops *ops;
203
204 ops = rproc_get_ops(dev);
205 if (!ops) {
206 debug("%s driver has no ops?\n", dev->name);
207 return -EINVAL;
208 }
209
210 if (ops->init)
211 return ops->init(dev);
212
213 return 0;
214}
215
Keerthya03df892022-01-27 13:16:55 +0100216/**
217 * rproc_add_res() - After parsing the resource table add the mappings
218 * @dev: device we finished probing
219 * @mapping: rproc_mem_entry for the resource
220 *
221 * Return: if the remote proc driver has a add_res routine, invokes it and
222 * hands over the return value. overall, 0 if all went well, else appropriate
223 * error value.
224 */
225static int rproc_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
226{
227 const struct dm_rproc_ops *ops = rproc_get_ops(dev);
228
229 if (!ops->add_res)
230 return -ENOSYS;
231
232 return ops->add_res(dev, mapping);
233}
234
235/**
236 * rproc_alloc_mem() - After parsing the resource table allocat mem
237 * @dev: device we finished probing
238 * @len: rproc_mem_entry for the resource
239 * @align: alignment for the resource
240 *
241 * Return: if the remote proc driver has a add_res routine, invokes it and
242 * hands over the return value. overall, 0 if all went well, else appropriate
243 * error value.
244 */
245static void *rproc_alloc_mem(struct udevice *dev, unsigned long len,
246 unsigned long align)
247{
248 const struct dm_rproc_ops *ops;
249
250 ops = rproc_get_ops(dev);
251 if (!ops) {
252 debug("%s driver has no ops?\n", dev->name);
253 return NULL;
254 }
255
256 if (ops->alloc_mem)
257 return ops->alloc_mem(dev, len, align);
258
259 return NULL;
260}
261
262/**
263 * rproc_config_pagetable() - Configure page table for remote processor
264 * @dev: device we finished probing
265 * @virt: Virtual address of the resource
266 * @phys: Physical address the resource
267 * @len: length the resource
268 *
269 * Return: if the remote proc driver has a add_res routine, invokes it and
270 * hands over the return value. overall, 0 if all went well, else appropriate
271 * error value.
272 */
273static int rproc_config_pagetable(struct udevice *dev, unsigned int virt,
274 unsigned int phys, unsigned int len)
275{
276 const struct dm_rproc_ops *ops;
277
278 ops = rproc_get_ops(dev);
279 if (!ops) {
280 debug("%s driver has no ops?\n", dev->name);
281 return -EINVAL;
282 }
283
284 if (ops->config_pagetable)
285 return ops->config_pagetable(dev, virt, phys, len);
286
287 return 0;
288}
289
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500290UCLASS_DRIVER(rproc) = {
291 .id = UCLASS_REMOTEPROC,
292 .name = "remoteproc",
293 .flags = DM_UC_FLAG_SEQ_ALIAS,
294 .pre_probe = rproc_pre_probe,
295 .post_probe = rproc_post_probe,
Simon Glassb012ff12020-12-03 16:55:22 -0700296 .per_device_plat_auto = sizeof(struct dm_rproc_uclass_pdata),
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500297};
298
299/* Remoteproc subsystem access functions */
300/**
301 * _rproc_probe_dev() - iteration helper to probe a rproc device
302 * @dev: device to probe
303 * @uc_pdata: uclass data allocated for the device
304 * @data: unused
305 *
306 * Return: 0 if all ok, else appropriate error value.
307 */
308static int _rproc_probe_dev(struct udevice *dev,
309 struct dm_rproc_uclass_pdata *uc_pdata,
310 const void *data)
311{
312 int ret;
313
314 ret = device_probe(dev);
315
316 if (ret)
317 debug("%s: Failed to initialize - %d\n", dev->name, ret);
318 return ret;
319}
320
321/**
322 * _rproc_dev_is_probed() - check if the device has been probed
323 * @dev: device to check
324 * @uc_pdata: unused
325 * @data: unused
326 *
327 * Return: -EAGAIN if not probed else return 0
328 */
329static int _rproc_dev_is_probed(struct udevice *dev,
330 struct dm_rproc_uclass_pdata *uc_pdata,
331 const void *data)
332{
Simon Glass73466df2020-12-19 10:40:10 -0700333 if (dev_get_flags(dev) & DM_FLAG_ACTIVATED)
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500334 return 0;
335
336 return -EAGAIN;
337}
338
339bool rproc_is_initialized(void)
340{
341 int ret = for_each_remoteproc_device(_rproc_dev_is_probed, NULL, NULL);
342 return ret ? false : true;
343}
344
345int rproc_init(void)
346{
347 int ret;
348
349 if (rproc_is_initialized()) {
350 debug("Already initialized\n");
351 return -EINVAL;
352 }
353
354 ret = for_each_remoteproc_device(_rproc_probe_dev, NULL, NULL);
355 return ret;
356}
357
Lokesh Vutla81ae6e62018-08-27 15:57:50 +0530358int rproc_dev_init(int id)
359{
360 struct udevice *dev = NULL;
361 int ret;
362
363 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
364 if (ret) {
365 debug("Unknown remote processor id '%d' requested(%d)\n",
366 id, ret);
367 return ret;
368 }
369
370 ret = device_probe(dev);
371 if (ret)
372 debug("%s: Failed to initialize - %d\n", dev->name, ret);
373
374 return ret;
375}
376
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500377int rproc_load(int id, ulong addr, ulong size)
378{
379 struct udevice *dev = NULL;
380 struct dm_rproc_uclass_pdata *uc_pdata;
381 const struct dm_rproc_ops *ops;
382 int ret;
383
384 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
385 if (ret) {
386 debug("Unknown remote processor id '%d' requested(%d)\n",
387 id, ret);
388 return ret;
389 }
390
Simon Glasscaa4daa2020-12-03 16:55:18 -0700391 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500392
393 ops = rproc_get_ops(dev);
394 if (!ops) {
395 debug("%s driver has no ops?\n", dev->name);
396 return -EINVAL;
397 }
398
399 debug("Loading to '%s' from address 0x%08lX size of %lu bytes\n",
400 uc_pdata->name, addr, size);
401 if (ops->load)
402 return ops->load(dev, addr, size);
403
404 debug("%s: data corruption?? mandatory function is missing!\n",
405 dev->name);
406
407 return -EINVAL;
408};
409
410/*
411 * Completely internal helper enums..
412 * Keeping this isolated helps this code evolve independent of other
413 * parts..
414 */
415enum rproc_ops {
416 RPROC_START,
417 RPROC_STOP,
418 RPROC_RESET,
419 RPROC_PING,
420 RPROC_RUNNING,
421};
422
423/**
424 * _rproc_ops_wrapper() - wrapper for invoking remote proc driver callback
425 * @id: id of the remote processor
426 * @op: one of rproc_ops that indicate what operation to invoke
427 *
428 * Most of the checks and verification for remoteproc operations are more
429 * or less same for almost all operations. This allows us to put a wrapper
430 * and use the common checks to allow the driver to function appropriately.
431 *
432 * Return: 0 if all ok, else appropriate error value.
433 */
434static int _rproc_ops_wrapper(int id, enum rproc_ops op)
435{
436 struct udevice *dev = NULL;
437 struct dm_rproc_uclass_pdata *uc_pdata;
438 const struct dm_rproc_ops *ops;
439 int (*fn)(struct udevice *dev);
440 bool mandatory = false;
441 char *op_str;
442 int ret;
443
444 ret = uclass_get_device_by_seq(UCLASS_REMOTEPROC, id, &dev);
445 if (ret) {
446 debug("Unknown remote processor id '%d' requested(%d)\n",
447 id, ret);
448 return ret;
449 }
450
Simon Glasscaa4daa2020-12-03 16:55:18 -0700451 uc_pdata = dev_get_uclass_plat(dev);
Nishanth Menonddf56bc2015-09-17 15:42:39 -0500452
453 ops = rproc_get_ops(dev);
454 if (!ops) {
455 debug("%s driver has no ops?\n", dev->name);
456 return -EINVAL;
457 }
458 switch (op) {
459 case RPROC_START:
460 fn = ops->start;
461 mandatory = true;
462 op_str = "Starting";
463 break;
464 case RPROC_STOP:
465 fn = ops->stop;
466 op_str = "Stopping";
467 break;
468 case RPROC_RESET:
469 fn = ops->reset;
470 op_str = "Resetting";
471 break;
472 case RPROC_RUNNING:
473 fn = ops->is_running;
474 op_str = "Checking if running:";
475 break;
476 case RPROC_PING:
477 fn = ops->ping;
478 op_str = "Pinging";
479 break;
480 default:
481 debug("what is '%d' operation??\n", op);
482 return -EINVAL;
483 }
484
485 debug("%s %s...\n", op_str, uc_pdata->name);
486 if (fn)
487 return fn(dev);
488
489 if (mandatory)
490 debug("%s: data corruption?? mandatory function is missing!\n",
491 dev->name);
492
493 return -ENOSYS;
494}
495
496int rproc_start(int id)
497{
498 return _rproc_ops_wrapper(id, RPROC_START);
499};
500
501int rproc_stop(int id)
502{
503 return _rproc_ops_wrapper(id, RPROC_STOP);
504};
505
506int rproc_reset(int id)
507{
508 return _rproc_ops_wrapper(id, RPROC_RESET);
509};
510
511int rproc_ping(int id)
512{
513 return _rproc_ops_wrapper(id, RPROC_PING);
514};
515
516int rproc_is_running(int id)
517{
518 return _rproc_ops_wrapper(id, RPROC_RUNNING);
519};
Keerthya03df892022-01-27 13:16:55 +0100520
521
522static int handle_trace(struct udevice *dev, struct fw_rsc_trace *rsc,
523 int offset, int avail)
524{
525 if (sizeof(*rsc) > avail) {
526 debug("trace rsc is truncated\n");
527 return -EINVAL;
528 }
529
530 /*
531 * make sure reserved bytes are zeroes
532 */
533 if (rsc->reserved) {
534 debug("trace rsc has non zero reserved bytes\n");
535 return -EINVAL;
536 }
537
538 debug("trace rsc: da 0x%x, len 0x%x\n", rsc->da, rsc->len);
539
540 return 0;
541}
542
543static int handle_devmem(struct udevice *dev, struct fw_rsc_devmem *rsc,
544 int offset, int avail)
545{
546 struct rproc_mem_entry *mapping;
547
548 if (sizeof(*rsc) > avail) {
549 debug("devmem rsc is truncated\n");
550 return -EINVAL;
551 }
552
553 /*
554 * make sure reserved bytes are zeroes
555 */
556 if (rsc->reserved) {
557 debug("devmem rsc has non zero reserved bytes\n");
558 return -EINVAL;
559 }
560
561 debug("devmem rsc: pa 0x%x, da 0x%x, len 0x%x\n",
562 rsc->pa, rsc->da, rsc->len);
563
564 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
565
566 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
567 if (!mapping)
568 return -ENOMEM;
569
570 /*
571 * We'll need this info later when we'll want to unmap everything
572 * (e.g. on shutdown).
573 *
574 * We can't trust the remote processor not to change the resource
575 * table, so we must maintain this info independently.
576 */
577 mapping->dma = rsc->pa;
578 mapping->da = rsc->da;
579 mapping->len = rsc->len;
580 rproc_add_res(dev, mapping);
581
582 debug("mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
583 rsc->pa, rsc->da, rsc->len);
584
585 return 0;
586}
587
588static int handle_carveout(struct udevice *dev, struct fw_rsc_carveout *rsc,
589 int offset, int avail)
590{
591 struct rproc_mem_entry *mapping;
592
593 if (sizeof(*rsc) > avail) {
594 debug("carveout rsc is truncated\n");
595 return -EINVAL;
596 }
597
598 /*
599 * make sure reserved bytes are zeroes
600 */
601 if (rsc->reserved) {
602 debug("carveout rsc has non zero reserved bytes\n");
603 return -EINVAL;
604 }
605
606 debug("carveout rsc: da %x, pa %x, len %x, flags %x\n",
607 rsc->da, rsc->pa, rsc->len, rsc->flags);
608
609 rsc->pa = (uintptr_t)rproc_alloc_mem(dev, rsc->len, 8);
610 if (!rsc->pa) {
611 debug
612 ("failed to allocate carveout rsc: da %x, pa %x, len %x, flags %x\n",
613 rsc->da, rsc->pa, rsc->len, rsc->flags);
614 return -ENOMEM;
615 }
616 rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
617
618 /*
619 * Ok, this is non-standard.
620 *
621 * Sometimes we can't rely on the generic iommu-based DMA API
622 * to dynamically allocate the device address and then set the IOMMU
623 * tables accordingly, because some remote processors might
624 * _require_ us to use hard coded device addresses that their
625 * firmware was compiled with.
626 *
627 * In this case, we must use the IOMMU API directly and map
628 * the memory to the device address as expected by the remote
629 * processor.
630 *
631 * Obviously such remote processor devices should not be configured
632 * to use the iommu-based DMA API: we expect 'dma' to contain the
633 * physical address in this case.
634 */
635 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
636 if (!mapping)
637 return -ENOMEM;
638
639 /*
640 * We'll need this info later when we'll want to unmap
641 * everything (e.g. on shutdown).
642 *
643 * We can't trust the remote processor not to change the
644 * resource table, so we must maintain this info independently.
645 */
646 mapping->dma = rsc->pa;
647 mapping->da = rsc->da;
648 mapping->len = rsc->len;
649 rproc_add_res(dev, mapping);
650
651 debug("carveout mapped 0x%x to 0x%x\n", rsc->da, rsc->pa);
652
653 return 0;
654}
655
656#define RPROC_PAGE_SHIFT 12
657#define RPROC_PAGE_SIZE BIT(RPROC_PAGE_SHIFT)
658#define RPROC_PAGE_ALIGN(x) (((x) + (RPROC_PAGE_SIZE - 1)) & ~(RPROC_PAGE_SIZE - 1))
659
660static int alloc_vring(struct udevice *dev, struct fw_rsc_vdev *rsc, int i)
661{
662 struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
663 int size;
664 int order;
665 void *pa;
666
667 debug("vdev rsc: vring%d: da %x, qsz %d, align %d\n",
668 i, vring->da, vring->num, vring->align);
669
670 /*
671 * verify queue size and vring alignment are sane
672 */
673 if (!vring->num || !vring->align) {
674 debug("invalid qsz (%d) or alignment (%d)\n", vring->num,
675 vring->align);
676 return -EINVAL;
677 }
678
679 /*
680 * actual size of vring (in bytes)
681 */
682 size = RPROC_PAGE_ALIGN(vring_size(vring->num, vring->align));
683 order = vring->align >> RPROC_PAGE_SHIFT;
684
685 pa = rproc_alloc_mem(dev, size, order);
686 if (!pa) {
687 debug("failed to allocate vring rsc\n");
688 return -ENOMEM;
689 }
690 debug("alloc_mem(%#x, %d): %p\n", size, order, pa);
691 vring->da = (uintptr_t)pa;
692
Dan Carpenterf9a12cc2023-07-26 10:00:33 +0300693 return 0;
Keerthya03df892022-01-27 13:16:55 +0100694}
695
696static int handle_vdev(struct udevice *dev, struct fw_rsc_vdev *rsc,
697 int offset, int avail)
698{
699 int i, ret;
700 void *pa;
701
702 /*
703 * make sure resource isn't truncated
704 */
705 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
706 + rsc->config_len > avail) {
707 debug("vdev rsc is truncated\n");
708 return -EINVAL;
709 }
710
711 /*
712 * make sure reserved bytes are zeroes
713 */
714 if (rsc->reserved[0] || rsc->reserved[1]) {
715 debug("vdev rsc has non zero reserved bytes\n");
716 return -EINVAL;
717 }
718
719 debug("vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
720 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
721
722 /*
723 * we currently support only two vrings per rvdev
724 */
725 if (rsc->num_of_vrings > 2) {
726 debug("too many vrings: %d\n", rsc->num_of_vrings);
727 return -EINVAL;
728 }
729
730 /*
731 * allocate the vrings
732 */
733 for (i = 0; i < rsc->num_of_vrings; i++) {
734 ret = alloc_vring(dev, rsc, i);
735 if (ret)
736 goto alloc_error;
737 }
738
739 pa = rproc_alloc_mem(dev, RPMSG_TOTAL_BUF_SPACE, 6);
740 if (!pa) {
741 debug("failed to allocate vdev rsc\n");
742 return -ENOMEM;
743 }
744 debug("vring buffer alloc_mem(%#x, 6): %p\n", RPMSG_TOTAL_BUF_SPACE,
745 pa);
746
747 return 0;
748
749 alloc_error:
750 return ret;
751}
752
753/*
754 * A lookup table for resource handlers. The indices are defined in
755 * enum fw_resource_type.
756 */
757static handle_resource_t loading_handlers[RSC_LAST] = {
758 [RSC_CARVEOUT] = (handle_resource_t)handle_carveout,
759 [RSC_DEVMEM] = (handle_resource_t)handle_devmem,
760 [RSC_TRACE] = (handle_resource_t)handle_trace,
761 [RSC_VDEV] = (handle_resource_t)handle_vdev,
762};
763
764/*
765 * handle firmware resource entries before booting the remote processor
766 */
767static int handle_resources(struct udevice *dev, int len,
768 handle_resource_t handlers[RSC_LAST])
769{
770 handle_resource_t handler;
771 int ret = 0, i;
772
773 for (i = 0; i < rsc_table->num; i++) {
774 int offset = rsc_table->offset[i];
775 struct fw_rsc_hdr *hdr = (void *)rsc_table + offset;
776 int avail = len - offset - sizeof(*hdr);
777 void *rsc = (void *)hdr + sizeof(*hdr);
778
779 /*
780 * make sure table isn't truncated
781 */
782 if (avail < 0) {
783 debug("rsc table is truncated\n");
784 return -EINVAL;
785 }
786
787 debug("rsc: type %d\n", hdr->type);
788
789 if (hdr->type >= RSC_LAST) {
790 debug("unsupported resource %d\n", hdr->type);
791 continue;
792 }
793
794 handler = handlers[hdr->type];
795 if (!handler)
796 continue;
797
798 ret = handler(dev, rsc, offset + sizeof(*hdr), avail);
799 if (ret)
800 break;
801 }
802
803 return ret;
804}
805
806static int
807handle_intmem_to_l3_mapping(struct udevice *dev,
808 struct rproc_intmem_to_l3_mapping *l3_mapping)
809{
810 u32 i = 0;
811
812 for (i = 0; i < l3_mapping->num_entries; i++) {
813 struct l3_map *curr_map = &l3_mapping->mappings[i];
814 struct rproc_mem_entry *mapping;
815
816 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
817 if (!mapping)
818 return -ENOMEM;
819
820 mapping->dma = curr_map->l3_addr;
821 mapping->da = curr_map->priv_addr;
822 mapping->len = curr_map->len;
823 rproc_add_res(dev, mapping);
824 }
825
826 return 0;
827}
828
829static Elf32_Shdr *rproc_find_table(unsigned int addr)
830{
831 Elf32_Ehdr *ehdr; /* Elf header structure pointer */
832 Elf32_Shdr *shdr; /* Section header structure pointer */
833 Elf32_Shdr sectionheader;
834 int i;
835 u8 *elf_data;
836 char *name_table;
837 struct resource_table *ptable;
838
839 ehdr = (Elf32_Ehdr *)(uintptr_t)addr;
840 elf_data = (u8 *)ehdr;
841 shdr = (Elf32_Shdr *)(elf_data + ehdr->e_shoff);
842 memcpy(&sectionheader, &shdr[ehdr->e_shstrndx], sizeof(sectionheader));
843 name_table = (char *)(elf_data + sectionheader.sh_offset);
844
845 for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
846 memcpy(&sectionheader, shdr, sizeof(sectionheader));
847 u32 size = sectionheader.sh_size;
848 u32 offset = sectionheader.sh_offset;
849
850 if (strcmp
851 (name_table + sectionheader.sh_name, ".resource_table"))
852 continue;
853
854 ptable = (struct resource_table *)(elf_data + offset);
855
856 /*
857 * make sure table has at least the header
858 */
859 if (sizeof(struct resource_table) > size) {
860 debug("header-less resource table\n");
861 return NULL;
862 }
863
864 /*
865 * we don't support any version beyond the first
866 */
867 if (ptable->ver != 1) {
868 debug("unsupported fw ver: %d\n", ptable->ver);
869 return NULL;
870 }
871
872 /*
873 * make sure reserved bytes are zeroes
874 */
875 if (ptable->reserved[0] || ptable->reserved[1]) {
876 debug("non zero reserved bytes\n");
877 return NULL;
878 }
879
880 /*
881 * make sure the offsets array isn't truncated
882 */
883 if (ptable->num * sizeof(ptable->offset[0]) +
884 sizeof(struct resource_table) > size) {
885 debug("resource table incomplete\n");
886 return NULL;
887 }
888
889 return shdr;
890 }
891
892 return NULL;
893}
894
895struct resource_table *rproc_find_resource_table(struct udevice *dev,
896 unsigned int addr,
897 int *tablesz)
898{
899 Elf32_Shdr *shdr;
900 Elf32_Shdr sectionheader;
901 struct resource_table *ptable;
902 u8 *elf_data = (u8 *)(uintptr_t)addr;
903
904 shdr = rproc_find_table(addr);
905 if (!shdr) {
906 debug("%s: failed to get resource section header\n", __func__);
907 return NULL;
908 }
909
910 memcpy(&sectionheader, shdr, sizeof(sectionheader));
911 ptable = (struct resource_table *)(elf_data + sectionheader.sh_offset);
912 if (tablesz)
913 *tablesz = sectionheader.sh_size;
914
915 return ptable;
916}
917
918unsigned long rproc_parse_resource_table(struct udevice *dev, struct rproc *cfg)
919{
920 struct resource_table *ptable = NULL;
921 int tablesz;
922 int ret;
923 unsigned long addr;
924
925 addr = cfg->load_addr;
926
927 ptable = rproc_find_resource_table(dev, addr, &tablesz);
928 if (!ptable) {
929 debug("%s : failed to find resource table\n", __func__);
930 return 0;
931 }
932
933 debug("%s : found resource table\n", __func__);
934 rsc_table = kzalloc(tablesz, GFP_KERNEL);
935 if (!rsc_table) {
936 debug("resource table alloc failed!\n");
937 return 0;
938 }
939
940 /*
941 * Copy the resource table into a local buffer before handling the
942 * resource table.
943 */
944 memcpy(rsc_table, ptable, tablesz);
945 if (cfg->intmem_to_l3_mapping)
946 handle_intmem_to_l3_mapping(dev, cfg->intmem_to_l3_mapping);
947 ret = handle_resources(dev, tablesz, loading_handlers);
948 if (ret) {
949 debug("handle_resources failed: %d\n", ret);
950 return 0;
951 }
952
953 /*
954 * Instead of trying to mimic the kernel flow of copying the
955 * processed resource table into its post ELF load location in DDR
956 * copying it into its original location.
957 */
958 memcpy(ptable, rsc_table, tablesz);
959 free(rsc_table);
960 rsc_table = NULL;
961
962 return 1;
963}