remoteproc: uclass: Add remoteproc resource handling helpers

Add remoteproc resource handling helpers. These functions
are primarily to parse the resource table and to handle
different types of resources. Carveout, devmem, trace &
vring resources are handled.

Signed-off-by: Keerthy <j-keerthy@ti.com>
[Amjad: fix redefinition of "struct resource_table" and compile warnings ]
Signed-off-by: Amjad Ouled-Ameur <aouledameur@baylibre.com>
diff --git a/drivers/remoteproc/rproc-uclass.c b/drivers/remoteproc/rproc-uclass.c
index 87e1ec7..50bcc90 100644
--- a/drivers/remoteproc/rproc-uclass.c
+++ b/drivers/remoteproc/rproc-uclass.c
@@ -8,15 +8,31 @@
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
 #include <common.h>
+#include <elf.h>
 #include <errno.h>
 #include <log.h>
 #include <malloc.h>
+#include <virtio_ring.h>
 #include <remoteproc.h>
 #include <asm/io.h>
 #include <dm/device-internal.h>
 #include <dm.h>
 #include <dm/uclass.h>
 #include <dm/uclass-internal.h>
+#include <linux/compat.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct resource_table {
+	u32 ver;
+	u32 num;
+	u32 reserved[2];
+	u32 offset[0];
+} __packed;
+
+typedef int (*handle_resource_t) (struct udevice *, void *, int offset, int avail);
+
+static struct resource_table *rsc_table;
 
 /**
  * for_each_remoteproc_device() - iterate through the list of rproc devices
@@ -196,6 +212,80 @@
 	return 0;
 }
 
+/**
+ * rproc_add_res() - After parsing the resource table add the mappings
+ * @dev:	device we finished probing
+ * @mapping: rproc_mem_entry for the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static int rproc_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
+{
+	const struct dm_rproc_ops *ops = rproc_get_ops(dev);
+
+	if (!ops->add_res)
+		return -ENOSYS;
+
+	return ops->add_res(dev, mapping);
+}
+
+/**
+ * rproc_alloc_mem() - After parsing the resource table allocat mem
+ * @dev:	device we finished probing
+ * @len: rproc_mem_entry for the resource
+ * @align: alignment for the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static void *rproc_alloc_mem(struct udevice *dev, unsigned long len,
+			     unsigned long align)
+{
+	const struct dm_rproc_ops *ops;
+
+	ops = rproc_get_ops(dev);
+	if (!ops) {
+		debug("%s driver has no ops?\n", dev->name);
+		return NULL;
+	}
+
+	if (ops->alloc_mem)
+		return ops->alloc_mem(dev, len, align);
+
+	return NULL;
+}
+
+/**
+ * rproc_config_pagetable() - Configure page table for remote processor
+ * @dev:	device we finished probing
+ * @virt: Virtual address of the resource
+ * @phys: Physical address the resource
+ * @len: length the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static int rproc_config_pagetable(struct udevice *dev, unsigned int virt,
+				  unsigned int phys, unsigned int len)
+{
+	const struct dm_rproc_ops *ops;
+
+	ops = rproc_get_ops(dev);
+	if (!ops) {
+		debug("%s driver has no ops?\n", dev->name);
+		return -EINVAL;
+	}
+
+	if (ops->config_pagetable)
+		return ops->config_pagetable(dev, virt, phys, len);
+
+	return 0;
+}
+
 UCLASS_DRIVER(rproc) = {
 	.id = UCLASS_REMOTEPROC,
 	.name = "remoteproc",
@@ -426,3 +516,447 @@
 {
 	return _rproc_ops_wrapper(id, RPROC_RUNNING);
 };
+
+
+static int handle_trace(struct udevice *dev, struct fw_rsc_trace *rsc,
+			int offset, int avail)
+{
+	if (sizeof(*rsc) > avail) {
+		debug("trace rsc is truncated\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure reserved bytes are zeroes
+	 */
+	if (rsc->reserved) {
+		debug("trace rsc has non zero reserved bytes\n");
+		return -EINVAL;
+	}
+
+	debug("trace rsc: da 0x%x, len 0x%x\n", rsc->da, rsc->len);
+
+	return 0;
+}
+
+static int handle_devmem(struct udevice *dev, struct fw_rsc_devmem *rsc,
+			 int offset, int avail)
+{
+	struct rproc_mem_entry *mapping;
+
+	if (sizeof(*rsc) > avail) {
+		debug("devmem rsc is truncated\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure reserved bytes are zeroes
+	 */
+	if (rsc->reserved) {
+		debug("devmem rsc has non zero reserved bytes\n");
+		return -EINVAL;
+	}
+
+	debug("devmem rsc: pa 0x%x, da 0x%x, len 0x%x\n",
+	      rsc->pa, rsc->da, rsc->len);
+
+	rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
+
+	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+
+	/*
+	 * We'll need this info later when we'll want to unmap everything
+	 * (e.g. on shutdown).
+	 *
+	 * We can't trust the remote processor not to change the resource
+	 * table, so we must maintain this info independently.
+	 */
+	mapping->dma = rsc->pa;
+	mapping->da = rsc->da;
+	mapping->len = rsc->len;
+	rproc_add_res(dev, mapping);
+
+	debug("mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+	      rsc->pa, rsc->da, rsc->len);
+
+	return 0;
+}
+
+static int handle_carveout(struct udevice *dev, struct fw_rsc_carveout *rsc,
+			   int offset, int avail)
+{
+	struct rproc_mem_entry *mapping;
+
+	if (sizeof(*rsc) > avail) {
+		debug("carveout rsc is truncated\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure reserved bytes are zeroes
+	 */
+	if (rsc->reserved) {
+		debug("carveout rsc has non zero reserved bytes\n");
+		return -EINVAL;
+	}
+
+	debug("carveout rsc: da %x, pa %x, len %x, flags %x\n",
+	      rsc->da, rsc->pa, rsc->len, rsc->flags);
+
+	rsc->pa = (uintptr_t)rproc_alloc_mem(dev, rsc->len, 8);
+	if (!rsc->pa) {
+		debug
+		    ("failed to allocate carveout rsc: da %x, pa %x, len %x, flags %x\n",
+		     rsc->da, rsc->pa, rsc->len, rsc->flags);
+		return -ENOMEM;
+	}
+	rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
+
+	/*
+	 * Ok, this is non-standard.
+	 *
+	 * Sometimes we can't rely on the generic iommu-based DMA API
+	 * to dynamically allocate the device address and then set the IOMMU
+	 * tables accordingly, because some remote processors might
+	 * _require_ us to use hard coded device addresses that their
+	 * firmware was compiled with.
+	 *
+	 * In this case, we must use the IOMMU API directly and map
+	 * the memory to the device address as expected by the remote
+	 * processor.
+	 *
+	 * Obviously such remote processor devices should not be configured
+	 * to use the iommu-based DMA API: we expect 'dma' to contain the
+	 * physical address in this case.
+	 */
+	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOMEM;
+
+	/*
+	 * We'll need this info later when we'll want to unmap
+	 * everything (e.g. on shutdown).
+	 *
+	 * We can't trust the remote processor not to change the
+	 * resource table, so we must maintain this info independently.
+	 */
+	mapping->dma = rsc->pa;
+	mapping->da = rsc->da;
+	mapping->len = rsc->len;
+	rproc_add_res(dev, mapping);
+
+	debug("carveout mapped 0x%x to 0x%x\n", rsc->da, rsc->pa);
+
+	return 0;
+}
+
+#define RPROC_PAGE_SHIFT 12
+#define RPROC_PAGE_SIZE  BIT(RPROC_PAGE_SHIFT)
+#define RPROC_PAGE_ALIGN(x) (((x) + (RPROC_PAGE_SIZE - 1)) & ~(RPROC_PAGE_SIZE - 1))
+
+static int alloc_vring(struct udevice *dev, struct fw_rsc_vdev *rsc, int i)
+{
+	struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+	int size;
+	int order;
+	void *pa;
+
+	debug("vdev rsc: vring%d: da %x, qsz %d, align %d\n",
+	      i, vring->da, vring->num, vring->align);
+
+	/*
+	 * verify queue size and vring alignment are sane
+	 */
+	if (!vring->num || !vring->align) {
+		debug("invalid qsz (%d) or alignment (%d)\n", vring->num,
+		      vring->align);
+		return -EINVAL;
+	}
+
+	/*
+	 * actual size of vring (in bytes)
+	 */
+	size = RPROC_PAGE_ALIGN(vring_size(vring->num, vring->align));
+	order = vring->align >> RPROC_PAGE_SHIFT;
+
+	pa = rproc_alloc_mem(dev, size, order);
+	if (!pa) {
+		debug("failed to allocate vring rsc\n");
+		return -ENOMEM;
+	}
+	debug("alloc_mem(%#x, %d): %p\n", size, order, pa);
+	vring->da = (uintptr_t)pa;
+
+	return !pa;
+}
+
+static int handle_vdev(struct udevice *dev, struct fw_rsc_vdev *rsc,
+		       int offset, int avail)
+{
+	int i, ret;
+	void *pa;
+
+	/*
+	 * make sure resource isn't truncated
+	 */
+	if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
+	    + rsc->config_len > avail) {
+		debug("vdev rsc is truncated\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * make sure reserved bytes are zeroes
+	 */
+	if (rsc->reserved[0] || rsc->reserved[1]) {
+		debug("vdev rsc has non zero reserved bytes\n");
+		return -EINVAL;
+	}
+
+	debug("vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
+	      rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
+
+	/*
+	 * we currently support only two vrings per rvdev
+	 */
+	if (rsc->num_of_vrings > 2) {
+		debug("too many vrings: %d\n", rsc->num_of_vrings);
+		return -EINVAL;
+	}
+
+	/*
+	 * allocate the vrings
+	 */
+	for (i = 0; i < rsc->num_of_vrings; i++) {
+		ret = alloc_vring(dev, rsc, i);
+		if (ret)
+			goto alloc_error;
+	}
+
+	pa = rproc_alloc_mem(dev, RPMSG_TOTAL_BUF_SPACE, 6);
+	if (!pa) {
+		debug("failed to allocate vdev rsc\n");
+		return -ENOMEM;
+	}
+	debug("vring buffer alloc_mem(%#x, 6): %p\n", RPMSG_TOTAL_BUF_SPACE,
+	      pa);
+
+	return 0;
+
+ alloc_error:
+	return ret;
+}
+
+/*
+ * A lookup table for resource handlers. The indices are defined in
+ * enum fw_resource_type.
+ */
+static handle_resource_t loading_handlers[RSC_LAST] = {
+	[RSC_CARVEOUT] = (handle_resource_t)handle_carveout,
+	[RSC_DEVMEM] = (handle_resource_t)handle_devmem,
+	[RSC_TRACE] = (handle_resource_t)handle_trace,
+	[RSC_VDEV] = (handle_resource_t)handle_vdev,
+};
+
+/*
+ * handle firmware resource entries before booting the remote processor
+ */
+static int handle_resources(struct udevice *dev, int len,
+			    handle_resource_t handlers[RSC_LAST])
+{
+	handle_resource_t handler;
+	int ret = 0, i;
+
+	for (i = 0; i < rsc_table->num; i++) {
+		int offset = rsc_table->offset[i];
+		struct fw_rsc_hdr *hdr = (void *)rsc_table + offset;
+		int avail = len - offset - sizeof(*hdr);
+		void *rsc = (void *)hdr + sizeof(*hdr);
+
+		/*
+		 * make sure table isn't truncated
+		 */
+		if (avail < 0) {
+			debug("rsc table is truncated\n");
+			return -EINVAL;
+		}
+
+		debug("rsc: type %d\n", hdr->type);
+
+		if (hdr->type >= RSC_LAST) {
+			debug("unsupported resource %d\n", hdr->type);
+			continue;
+		}
+
+		handler = handlers[hdr->type];
+		if (!handler)
+			continue;
+
+		ret = handler(dev, rsc, offset + sizeof(*hdr), avail);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static int
+handle_intmem_to_l3_mapping(struct udevice *dev,
+			    struct rproc_intmem_to_l3_mapping *l3_mapping)
+{
+	u32 i = 0;
+
+	for (i = 0; i < l3_mapping->num_entries; i++) {
+		struct l3_map *curr_map = &l3_mapping->mappings[i];
+		struct rproc_mem_entry *mapping;
+
+		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+		if (!mapping)
+			return -ENOMEM;
+
+		mapping->dma = curr_map->l3_addr;
+		mapping->da = curr_map->priv_addr;
+		mapping->len = curr_map->len;
+		rproc_add_res(dev, mapping);
+	}
+
+	return 0;
+}
+
+static Elf32_Shdr *rproc_find_table(unsigned int addr)
+{
+	Elf32_Ehdr *ehdr;	/* Elf header structure pointer */
+	Elf32_Shdr *shdr;	/* Section header structure pointer */
+	Elf32_Shdr sectionheader;
+	int i;
+	u8 *elf_data;
+	char *name_table;
+	struct resource_table *ptable;
+
+	ehdr = (Elf32_Ehdr *)(uintptr_t)addr;
+	elf_data = (u8 *)ehdr;
+	shdr = (Elf32_Shdr *)(elf_data + ehdr->e_shoff);
+	memcpy(&sectionheader, &shdr[ehdr->e_shstrndx], sizeof(sectionheader));
+	name_table = (char *)(elf_data + sectionheader.sh_offset);
+
+	for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+		memcpy(&sectionheader, shdr, sizeof(sectionheader));
+		u32 size = sectionheader.sh_size;
+		u32 offset = sectionheader.sh_offset;
+
+		if (strcmp
+		    (name_table + sectionheader.sh_name, ".resource_table"))
+			continue;
+
+		ptable = (struct resource_table *)(elf_data + offset);
+
+		/*
+		 * make sure table has at least the header
+		 */
+		if (sizeof(struct resource_table) > size) {
+			debug("header-less resource table\n");
+			return NULL;
+		}
+
+		/*
+		 * we don't support any version beyond the first
+		 */
+		if (ptable->ver != 1) {
+			debug("unsupported fw ver: %d\n", ptable->ver);
+			return NULL;
+		}
+
+		/*
+		 * make sure reserved bytes are zeroes
+		 */
+		if (ptable->reserved[0] || ptable->reserved[1]) {
+			debug("non zero reserved bytes\n");
+			return NULL;
+		}
+
+		/*
+		 * make sure the offsets array isn't truncated
+		 */
+		if (ptable->num * sizeof(ptable->offset[0]) +
+		    sizeof(struct resource_table) > size) {
+			debug("resource table incomplete\n");
+			return NULL;
+		}
+
+		return shdr;
+	}
+
+	return NULL;
+}
+
+struct resource_table *rproc_find_resource_table(struct udevice *dev,
+						 unsigned int addr,
+						 int *tablesz)
+{
+	Elf32_Shdr *shdr;
+	Elf32_Shdr sectionheader;
+	struct resource_table *ptable;
+	u8 *elf_data = (u8 *)(uintptr_t)addr;
+
+	shdr = rproc_find_table(addr);
+	if (!shdr) {
+		debug("%s: failed to get resource section header\n", __func__);
+		return NULL;
+	}
+
+	memcpy(&sectionheader, shdr, sizeof(sectionheader));
+	ptable = (struct resource_table *)(elf_data + sectionheader.sh_offset);
+	if (tablesz)
+		*tablesz = sectionheader.sh_size;
+
+	return ptable;
+}
+
+unsigned long rproc_parse_resource_table(struct udevice *dev, struct rproc *cfg)
+{
+	struct resource_table *ptable = NULL;
+	int tablesz;
+	int ret;
+	unsigned long addr;
+
+	addr = cfg->load_addr;
+
+	ptable = rproc_find_resource_table(dev, addr, &tablesz);
+	if (!ptable) {
+		debug("%s : failed to find resource table\n", __func__);
+		return 0;
+	}
+
+	debug("%s : found resource table\n", __func__);
+	rsc_table = kzalloc(tablesz, GFP_KERNEL);
+	if (!rsc_table) {
+		debug("resource table alloc failed!\n");
+		return 0;
+	}
+
+	/*
+	 * Copy the resource table into a local buffer before handling the
+	 * resource table.
+	 */
+	memcpy(rsc_table, ptable, tablesz);
+	if (cfg->intmem_to_l3_mapping)
+		handle_intmem_to_l3_mapping(dev, cfg->intmem_to_l3_mapping);
+	ret = handle_resources(dev, tablesz, loading_handlers);
+	if (ret) {
+		debug("handle_resources failed: %d\n", ret);
+		return 0;
+	}
+
+	/*
+	 * Instead of trying to mimic the kernel flow of copying the
+	 * processed resource table into its post ELF load location in DDR
+	 * copying it into its original location.
+	 */
+	memcpy(ptable, rsc_table, tablesz);
+	free(rsc_table);
+	rsc_table = NULL;
+
+	return 1;
+}