blob: d2b0b63ecd5e5718663ecd77ee9b1940df422474 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mugunthan V Na0594ce2016-02-15 15:31:37 +05302/*
3 * Direct Memory Access U-Class driver
4 *
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +01005 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V Na0594ce2016-02-15 15:31:37 +05308 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053010 */
11
12#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070013#include <cpu_func.h>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053014#include <dm.h>
Simon Glass336d4612020-02-03 07:36:16 -070015#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060016#include <asm/cache.h>
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010017#include <dm/read.h>
Álvaro Fernández Rojas10b4dc52018-11-28 19:17:49 +010018#include <dma-uclass.h>
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010019#include <dt-structs.h>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053020#include <errno.h>
21
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010022#ifdef CONFIG_DMA_CHANNELS
23static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
24{
25 return (struct dma_ops *)dev->driver->ops;
26}
27
28# if CONFIG_IS_ENABLED(OF_CONTROL)
29static int dma_of_xlate_default(struct dma *dma,
30 struct ofnode_phandle_args *args)
31{
32 debug("%s(dma=%p)\n", __func__, dma);
33
34 if (args->args_count > 1) {
35 pr_err("Invaild args_count: %d\n", args->args_count);
36 return -EINVAL;
37 }
38
39 if (args->args_count)
40 dma->id = args->args[0];
41 else
42 dma->id = 0;
43
44 return 0;
45}
46
47int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
48{
49 int ret;
50 struct ofnode_phandle_args args;
51 struct udevice *dev_dma;
52 const struct dma_ops *ops;
53
54 debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
55
56 assert(dma);
57 dma->dev = NULL;
58
59 ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
60 &args);
61 if (ret) {
62 pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
63 __func__, ret);
64 return ret;
65 }
66
67 ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
68 if (ret) {
69 pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
70 __func__, ret);
71 return ret;
72 }
73
74 dma->dev = dev_dma;
75
76 ops = dma_dev_ops(dev_dma);
77
78 if (ops->of_xlate)
79 ret = ops->of_xlate(dma, &args);
80 else
81 ret = dma_of_xlate_default(dma, &args);
82 if (ret) {
83 pr_err("of_xlate() failed: %d\n", ret);
84 return ret;
85 }
86
87 return dma_request(dev_dma, dma);
88}
89
90int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
91{
92 int index;
93
94 debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
95 dma->dev = NULL;
96
97 index = dev_read_stringlist_search(dev, "dma-names", name);
98 if (index < 0) {
99 pr_err("dev_read_stringlist_search() failed: %d\n", index);
100 return index;
101 }
102
103 return dma_get_by_index(dev, index, dma);
104}
105# endif /* OF_CONTROL */
106
107int dma_request(struct udevice *dev, struct dma *dma)
108{
109 struct dma_ops *ops = dma_dev_ops(dev);
110
111 debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
112
113 dma->dev = dev;
114
115 if (!ops->request)
116 return 0;
117
118 return ops->request(dma);
119}
120
121int dma_free(struct dma *dma)
122{
123 struct dma_ops *ops = dma_dev_ops(dma->dev);
124
125 debug("%s(dma=%p)\n", __func__, dma);
126
Simon Glassaae95882020-02-03 07:35:55 -0700127 if (!ops->rfree)
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100128 return 0;
129
Simon Glassaae95882020-02-03 07:35:55 -0700130 return ops->rfree(dma);
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100131}
132
133int dma_enable(struct dma *dma)
134{
135 struct dma_ops *ops = dma_dev_ops(dma->dev);
136
137 debug("%s(dma=%p)\n", __func__, dma);
138
139 if (!ops->enable)
140 return -ENOSYS;
141
142 return ops->enable(dma);
143}
144
145int dma_disable(struct dma *dma)
146{
147 struct dma_ops *ops = dma_dev_ops(dma->dev);
148
149 debug("%s(dma=%p)\n", __func__, dma);
150
151 if (!ops->disable)
152 return -ENOSYS;
153
154 return ops->disable(dma);
155}
156
157int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
158{
159 struct dma_ops *ops = dma_dev_ops(dma->dev);
160
161 debug("%s(dma=%p)\n", __func__, dma);
162
163 if (!ops->prepare_rcv_buf)
164 return -1;
165
166 return ops->prepare_rcv_buf(dma, dst, size);
167}
168
169int dma_receive(struct dma *dma, void **dst, void *metadata)
170{
171 struct dma_ops *ops = dma_dev_ops(dma->dev);
172
173 debug("%s(dma=%p)\n", __func__, dma);
174
175 if (!ops->receive)
176 return -ENOSYS;
177
178 return ops->receive(dma, dst, metadata);
179}
180
181int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
182{
183 struct dma_ops *ops = dma_dev_ops(dma->dev);
184
185 debug("%s(dma=%p)\n", __func__, dma);
186
187 if (!ops->send)
188 return -ENOSYS;
189
190 return ops->send(dma, src, len, metadata);
191}
Vignesh Raghavendrab8a4dd22019-12-04 22:17:20 +0530192
193int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
194{
195 struct dma_ops *ops = dma_dev_ops(dma->dev);
196
197 debug("%s(dma=%p)\n", __func__, dma);
198
199 if (!ops->get_cfg)
200 return -ENOSYS;
201
202 return ops->get_cfg(dma, cfg_id, cfg_data);
203}
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100204#endif /* CONFIG_DMA_CHANNELS */
205
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530206int dma_get_device(u32 transfer_type, struct udevice **devp)
207{
208 struct udevice *dev;
209 int ret;
210
211 for (ret = uclass_first_device(UCLASS_DMA, &dev); dev && !ret;
212 ret = uclass_next_device(&dev)) {
213 struct dma_dev_priv *uc_priv;
214
215 uc_priv = dev_get_uclass_priv(dev);
216 if (uc_priv->supported & transfer_type)
217 break;
218 }
219
220 if (!dev) {
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900221 pr_err("No DMA device found that supports %x type\n",
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530222 transfer_type);
223 return -EPROTONOSUPPORT;
224 }
225
226 *devp = dev;
227
228 return ret;
229}
230
231int dma_memcpy(void *dst, void *src, size_t len)
232{
233 struct udevice *dev;
234 const struct dma_ops *ops;
235 int ret;
236
237 ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
238 if (ret < 0)
239 return ret;
240
241 ops = device_get_ops(dev);
242 if (!ops->transfer)
243 return -ENOSYS;
244
245 /* Invalidate the area, so no writeback into the RAM races with DMA */
246 invalidate_dcache_range((unsigned long)dst, (unsigned long)dst +
247 roundup(len, ARCH_DMA_MINALIGN));
248
249 return ops->transfer(dev, DMA_MEM_TO_MEM, dst, src, len);
250}
251
252UCLASS_DRIVER(dma) = {
253 .id = UCLASS_DMA,
254 .name = "dma",
255 .flags = DM_UC_FLAG_SEQ_ALIAS,
256 .per_device_auto_alloc_size = sizeof(struct dma_dev_priv),
257};