blob: 0c1d88e10c65616318d57e59aaa2b4ad5184b4d1 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Mugunthan V Na0594ce2016-02-15 15:31:37 +05302/*
3 * Direct Memory Access U-Class driver
4 *
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +01005 * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6 * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7 * Written by Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V Na0594ce2016-02-15 15:31:37 +05308 *
9 * Author: Mugunthan V N <mugunthanvnm@ti.com>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053010 */
11
Patrick Delaunayb953ec22021-04-27 11:02:19 +020012#define LOG_CATEGORY UCLASS_DMA
13
Mugunthan V Na0594ce2016-02-15 15:31:37 +053014#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -070015#include <cpu_func.h>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053016#include <dm.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060017#include <log.h>
Simon Glass336d4612020-02-03 07:36:16 -070018#include <malloc.h>
Simon Glass90526e92020-05-10 11:39:56 -060019#include <asm/cache.h>
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010020#include <dm/read.h>
Álvaro Fernández Rojas10b4dc52018-11-28 19:17:49 +010021#include <dma-uclass.h>
Andrew Davisc8d2fc72022-10-07 12:11:11 -050022#include <linux/dma-mapping.h>
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010023#include <dt-structs.h>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053024#include <errno.h>
Simon Glass1e94b462023-09-14 18:21:46 -060025#include <linux/printk.h>
Mugunthan V Na0594ce2016-02-15 15:31:37 +053026
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010027#ifdef CONFIG_DMA_CHANNELS
28static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
29{
30 return (struct dma_ops *)dev->driver->ops;
31}
32
33# if CONFIG_IS_ENABLED(OF_CONTROL)
34static int dma_of_xlate_default(struct dma *dma,
35 struct ofnode_phandle_args *args)
36{
37 debug("%s(dma=%p)\n", __func__, dma);
38
39 if (args->args_count > 1) {
Sean Anderson46ad7ce2021-12-01 14:26:53 -050040 pr_err("Invalid args_count: %d\n", args->args_count);
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +010041 return -EINVAL;
42 }
43
44 if (args->args_count)
45 dma->id = args->args[0];
46 else
47 dma->id = 0;
48
49 return 0;
50}
51
52int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
53{
54 int ret;
55 struct ofnode_phandle_args args;
56 struct udevice *dev_dma;
57 const struct dma_ops *ops;
58
59 debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
60
61 assert(dma);
62 dma->dev = NULL;
63
64 ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
65 &args);
66 if (ret) {
67 pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
68 __func__, ret);
69 return ret;
70 }
71
72 ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
73 if (ret) {
74 pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
75 __func__, ret);
76 return ret;
77 }
78
79 dma->dev = dev_dma;
80
81 ops = dma_dev_ops(dev_dma);
82
83 if (ops->of_xlate)
84 ret = ops->of_xlate(dma, &args);
85 else
86 ret = dma_of_xlate_default(dma, &args);
87 if (ret) {
88 pr_err("of_xlate() failed: %d\n", ret);
89 return ret;
90 }
91
92 return dma_request(dev_dma, dma);
93}
94
95int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
96{
97 int index;
98
99 debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
100 dma->dev = NULL;
101
102 index = dev_read_stringlist_search(dev, "dma-names", name);
103 if (index < 0) {
104 pr_err("dev_read_stringlist_search() failed: %d\n", index);
105 return index;
106 }
107
108 return dma_get_by_index(dev, index, dma);
109}
110# endif /* OF_CONTROL */
111
112int dma_request(struct udevice *dev, struct dma *dma)
113{
114 struct dma_ops *ops = dma_dev_ops(dev);
115
116 debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
117
118 dma->dev = dev;
119
120 if (!ops->request)
121 return 0;
122
123 return ops->request(dma);
124}
125
126int dma_free(struct dma *dma)
127{
128 struct dma_ops *ops = dma_dev_ops(dma->dev);
129
130 debug("%s(dma=%p)\n", __func__, dma);
131
Simon Glassaae95882020-02-03 07:35:55 -0700132 if (!ops->rfree)
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100133 return 0;
134
Simon Glassaae95882020-02-03 07:35:55 -0700135 return ops->rfree(dma);
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100136}
137
138int dma_enable(struct dma *dma)
139{
140 struct dma_ops *ops = dma_dev_ops(dma->dev);
141
142 debug("%s(dma=%p)\n", __func__, dma);
143
144 if (!ops->enable)
145 return -ENOSYS;
146
147 return ops->enable(dma);
148}
149
150int dma_disable(struct dma *dma)
151{
152 struct dma_ops *ops = dma_dev_ops(dma->dev);
153
154 debug("%s(dma=%p)\n", __func__, dma);
155
156 if (!ops->disable)
157 return -ENOSYS;
158
159 return ops->disable(dma);
160}
161
162int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
163{
164 struct dma_ops *ops = dma_dev_ops(dma->dev);
165
166 debug("%s(dma=%p)\n", __func__, dma);
167
168 if (!ops->prepare_rcv_buf)
169 return -1;
170
171 return ops->prepare_rcv_buf(dma, dst, size);
172}
173
174int dma_receive(struct dma *dma, void **dst, void *metadata)
175{
176 struct dma_ops *ops = dma_dev_ops(dma->dev);
177
178 debug("%s(dma=%p)\n", __func__, dma);
179
180 if (!ops->receive)
181 return -ENOSYS;
182
183 return ops->receive(dma, dst, metadata);
184}
185
186int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
187{
188 struct dma_ops *ops = dma_dev_ops(dma->dev);
189
190 debug("%s(dma=%p)\n", __func__, dma);
191
192 if (!ops->send)
193 return -ENOSYS;
194
195 return ops->send(dma, src, len, metadata);
196}
Vignesh Raghavendrab8a4dd22019-12-04 22:17:20 +0530197
198int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
199{
200 struct dma_ops *ops = dma_dev_ops(dma->dev);
201
202 debug("%s(dma=%p)\n", __func__, dma);
203
204 if (!ops->get_cfg)
205 return -ENOSYS;
206
207 return ops->get_cfg(dma, cfg_id, cfg_data);
208}
Álvaro Fernández Rojas27ab27f2018-11-28 19:17:50 +0100209#endif /* CONFIG_DMA_CHANNELS */
210
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530211int dma_get_device(u32 transfer_type, struct udevice **devp)
212{
213 struct udevice *dev;
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530214
Michal Suchanek49549372022-10-12 21:58:08 +0200215 for (uclass_first_device(UCLASS_DMA, &dev); dev;
216 uclass_next_device(&dev)) {
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530217 struct dma_dev_priv *uc_priv;
218
219 uc_priv = dev_get_uclass_priv(dev);
220 if (uc_priv->supported & transfer_type)
221 break;
222 }
223
224 if (!dev) {
Vignesh Raghavendra8995a862020-09-17 16:53:07 +0530225 pr_debug("No DMA device found that supports %x type\n",
226 transfer_type);
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530227 return -EPROTONOSUPPORT;
228 }
229
230 *devp = dev;
231
Michal Suchanek49549372022-10-12 21:58:08 +0200232 return 0;
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530233}
234
235int dma_memcpy(void *dst, void *src, size_t len)
236{
237 struct udevice *dev;
238 const struct dma_ops *ops;
Andrew Davisc8d2fc72022-10-07 12:11:11 -0500239 dma_addr_t destination;
240 dma_addr_t source;
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530241 int ret;
242
243 ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
244 if (ret < 0)
245 return ret;
246
247 ops = device_get_ops(dev);
248 if (!ops->transfer)
249 return -ENOSYS;
250
Andrew Davisc8d2fc72022-10-07 12:11:11 -0500251 /* Clean the areas, so no writeback into the RAM races with DMA */
252 destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
253 source = dma_map_single(src, len, DMA_TO_DEVICE);
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530254
Andrew Davisb9add642022-10-07 12:11:13 -0500255 ret = ops->transfer(dev, DMA_MEM_TO_MEM, destination, source, len);
Andrew Davisc8d2fc72022-10-07 12:11:11 -0500256
257 /* Clean+Invalidate the areas after, so we can see DMA'd data */
258 dma_unmap_single(destination, len, DMA_FROM_DEVICE);
259 dma_unmap_single(source, len, DMA_TO_DEVICE);
260
261 return ret;
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530262}
263
264UCLASS_DRIVER(dma) = {
265 .id = UCLASS_DMA,
266 .name = "dma",
267 .flags = DM_UC_FLAG_SEQ_ALIAS,
Simon Glass41575d82020-12-03 16:55:17 -0700268 .per_device_auto = sizeof(struct dma_dev_priv),
Mugunthan V Na0594ce2016-02-15 15:31:37 +0530269};