blob: 2131e10a408c8fcff113dbb1111d2e66afce2e5c [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Khoronzhuk, Ivane165b1d2014-10-22 17:47:56 +03002/*
3 * Enhanced Direct Memory Access (EDMA3) Controller
4 *
5 * (C) Copyright 2014
6 * Texas Instruments Incorporated, <www.ti.com>
7 *
8 * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
Khoronzhuk, Ivane165b1d2014-10-22 17:47:56 +03009 */
10
11#include <asm/io.h>
12#include <common.h>
Simon Glass9d922452017-05-17 17:18:03 -060013#include <dm.h>
Mugunthan V N1218e5c2016-02-15 15:31:41 +053014#include <dma.h>
Mugunthan V N1218e5c2016-02-15 15:31:41 +053015#include <asm/omap_common.h>
Khoronzhuk, Ivane165b1d2014-10-22 17:47:56 +030016#include <asm/ti-common/ti-edma3.h>
17
18#define EDMA3_SL_BASE(slot) (0x4000 + ((slot) << 5))
19#define EDMA3_SL_MAX_NUM 512
20#define EDMA3_SLOPT_FIFO_WIDTH_MASK (0x7 << 8)
21
22#define EDMA3_QCHMAP(ch) 0x0200 + ((ch) << 2)
23#define EDMA3_CHMAP_PARSET_MASK 0x1ff
24#define EDMA3_CHMAP_PARSET_SHIFT 0x5
25#define EDMA3_CHMAP_TRIGWORD_SHIFT 0x2
26
27#define EDMA3_QEMCR 0x314
28#define EDMA3_IPR 0x1068
29#define EDMA3_IPRH 0x106c
30#define EDMA3_ICR 0x1070
31#define EDMA3_ICRH 0x1074
32#define EDMA3_QEECR 0x1088
33#define EDMA3_QEESR 0x108c
34#define EDMA3_QSECR 0x1094
35
Tero Kristo72b7af52017-12-29 11:47:46 +053036#define EDMA_FILL_BUFFER_SIZE 512
37
Mugunthan V N1218e5c2016-02-15 15:31:41 +053038struct ti_edma3_priv {
39 u32 base;
40};
41
Tero Kristo72b7af52017-12-29 11:47:46 +053042static u8 edma_fill_buffer[EDMA_FILL_BUFFER_SIZE] __aligned(ARCH_DMA_MINALIGN);
43
Khoronzhuk, Ivane165b1d2014-10-22 17:47:56 +030044/**
45 * qedma3_start - start qdma on a channel
46 * @base: base address of edma
47 * @cfg: pinter to struct edma3_channel_config where you can set
48 * the slot number to associate with, the chnum, which corresponds
49 * your quick channel number 0-7, complete code - transfer complete code
50 * and trigger slot word - which has to correspond to the word number in
51 * edma3_slot_layout struct for generating event.
52 *
53 */
54void qedma3_start(u32 base, struct edma3_channel_config *cfg)
55{
56 u32 qchmap;
57
58 /* Clear the pending int bit */
59 if (cfg->complete_code < 32)
60 __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
61 else
62 __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
63
64 /* Map parameter set and trigger word 7 to quick channel */
65 qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot)
66 << EDMA3_CHMAP_PARSET_SHIFT) |
67 (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT);
68
69 __raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum));
70
71 /* Clear missed event if set*/
72 __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
73 __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
74
75 /* Enable qdma channel event */
76 __raw_writel(1 << cfg->chnum, base + EDMA3_QEESR);
77}
78
79/**
80 * edma3_set_dest - set initial DMA destination address in parameter RAM slot
81 * @base: base address of edma
82 * @slot: parameter RAM slot being configured
83 * @dst: physical address of destination (memory, controller FIFO, etc)
84 * @addressMode: INCR, except in very rare cases
85 * @width: ignored unless @addressMode is FIFO, else specifies the
86 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
87 *
88 * Note that the destination address is modified during the DMA transfer
89 * according to edma3_set_dest_index().
90 */
91void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode,
92 enum edma3_fifo_width width)
93{
94 u32 opt;
95 struct edma3_slot_layout *rg;
96
97 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
98
99 opt = __raw_readl(&rg->opt);
100 if (mode == FIFO)
101 opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
102 (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
103 EDMA3_SLOPT_FIFO_WIDTH_SET(width));
104 else
105 opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
106
107 __raw_writel(opt, &rg->opt);
108 __raw_writel(dst, &rg->dst);
109}
110
111/**
112 * edma3_set_dest_index - configure DMA destination address indexing
113 * @base: base address of edma
114 * @slot: parameter RAM slot being configured
115 * @bidx: byte offset between destination arrays in a frame
116 * @cidx: byte offset between destination frames in a block
117 *
118 * Offsets are specified to support either contiguous or discontiguous
119 * memory transfers, or repeated access to a hardware register, as needed.
120 * When accessing hardware registers, both offsets are normally zero.
121 */
122void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx)
123{
124 u32 src_dst_bidx;
125 u32 src_dst_cidx;
126 struct edma3_slot_layout *rg;
127
128 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
129
130 src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
131 src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
132
133 __raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16),
134 &rg->src_dst_bidx);
135 __raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16),
136 &rg->src_dst_cidx);
137}
138
139/**
140 * edma3_set_dest_addr - set destination address for slot only
141 */
142void edma3_set_dest_addr(u32 base, int slot, u32 dst)
143{
144 struct edma3_slot_layout *rg;
145
146 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
147 __raw_writel(dst, &rg->dst);
148}
149
150/**
151 * edma3_set_src - set initial DMA source address in parameter RAM slot
152 * @base: base address of edma
153 * @slot: parameter RAM slot being configured
154 * @src_port: physical address of source (memory, controller FIFO, etc)
155 * @mode: INCR, except in very rare cases
156 * @width: ignored unless @addressMode is FIFO, else specifies the
157 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
158 *
159 * Note that the source address is modified during the DMA transfer
160 * according to edma3_set_src_index().
161 */
162void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode,
163 enum edma3_fifo_width width)
164{
165 u32 opt;
166 struct edma3_slot_layout *rg;
167
168 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
169
170 opt = __raw_readl(&rg->opt);
171 if (mode == FIFO)
172 opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
173 (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
174 EDMA3_SLOPT_FIFO_WIDTH_SET(width));
175 else
176 opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
177
178 __raw_writel(opt, &rg->opt);
179 __raw_writel(src, &rg->src);
180}
181
182/**
183 * edma3_set_src_index - configure DMA source address indexing
184 * @base: base address of edma
185 * @slot: parameter RAM slot being configured
186 * @bidx: byte offset between source arrays in a frame
187 * @cidx: byte offset between source frames in a block
188 *
189 * Offsets are specified to support either contiguous or discontiguous
190 * memory transfers, or repeated access to a hardware register, as needed.
191 * When accessing hardware registers, both offsets are normally zero.
192 */
193void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx)
194{
195 u32 src_dst_bidx;
196 u32 src_dst_cidx;
197 struct edma3_slot_layout *rg;
198
199 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
200
201 src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
202 src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
203
204 __raw_writel((src_dst_bidx & 0xffff0000) | bidx,
205 &rg->src_dst_bidx);
206 __raw_writel((src_dst_cidx & 0xffff0000) | cidx,
207 &rg->src_dst_cidx);
208}
209
210/**
211 * edma3_set_src_addr - set source address for slot only
212 */
213void edma3_set_src_addr(u32 base, int slot, u32 src)
214{
215 struct edma3_slot_layout *rg;
216
217 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
218 __raw_writel(src, &rg->src);
219}
220
221/**
222 * edma3_set_transfer_params - configure DMA transfer parameters
223 * @base: base address of edma
224 * @slot: parameter RAM slot being configured
225 * @acnt: how many bytes per array (at least one)
226 * @bcnt: how many arrays per frame (at least one)
227 * @ccnt: how many frames per block (at least one)
228 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
229 * the value to reload into bcnt when it decrements to zero
230 * @sync_mode: ASYNC or ABSYNC
231 *
232 * See the EDMA3 documentation to understand how to configure and link
233 * transfers using the fields in PaRAM slots. If you are not doing it
234 * all at once with edma3_write_slot(), you will use this routine
235 * plus two calls each for source and destination, setting the initial
236 * address and saying how to index that address.
237 *
238 * An example of an A-Synchronized transfer is a serial link using a
239 * single word shift register. In that case, @acnt would be equal to
240 * that word size; the serial controller issues a DMA synchronization
241 * event to transfer each word, and memory access by the DMA transfer
242 * controller will be word-at-a-time.
243 *
244 * An example of an AB-Synchronized transfer is a device using a FIFO.
245 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
246 * The controller with the FIFO issues DMA synchronization events when
247 * the FIFO threshold is reached, and the DMA transfer controller will
248 * transfer one frame to (or from) the FIFO. It will probably use
249 * efficient burst modes to access memory.
250 */
251void edma3_set_transfer_params(u32 base, int slot, int acnt,
252 int bcnt, int ccnt, u16 bcnt_rld,
253 enum edma3_sync_dimension sync_mode)
254{
255 u32 opt;
256 u32 link_bcntrld;
257 struct edma3_slot_layout *rg;
258
259 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
260
261 link_bcntrld = __raw_readl(&rg->link_bcntrld);
262
263 __raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld),
264 &rg->link_bcntrld);
265
266 opt = __raw_readl(&rg->opt);
267 if (sync_mode == ASYNC)
268 __raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt);
269 else
270 __raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt);
271
272 /* Set the acount, bcount, ccount registers */
273 __raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt);
274 __raw_writel(0xffff & ccnt, &rg->ccnt);
275}
276
277/**
278 * edma3_write_slot - write parameter RAM data for slot
279 * @base: base address of edma
280 * @slot: number of parameter RAM slot being modified
281 * @param: data to be written into parameter RAM slot
282 *
283 * Use this to assign all parameters of a transfer at once. This
284 * allows more efficient setup of transfers than issuing multiple
285 * calls to set up those parameters in small pieces, and provides
286 * complete control over all transfer options.
287 */
288void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param)
289{
290 int i;
291 u32 *p = (u32 *)param;
292 u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
293
294 for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
295 __raw_writel(*p++, addr++);
296}
297
298/**
299 * edma3_read_slot - read parameter RAM data from slot
300 * @base: base address of edma
301 * @slot: number of parameter RAM slot being copied
302 * @param: where to store copy of parameter RAM data
303 *
304 * Use this to read data from a parameter RAM slot, perhaps to
305 * save them as a template for later reuse.
306 */
307void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param)
308{
309 int i;
310 u32 *p = (u32 *)param;
311 u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
312
313 for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
314 *p++ = __raw_readl(addr++);
315}
316
317void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg)
318{
319 struct edma3_slot_layout *rg;
320
321 rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
322
323 __raw_writel(cfg->opt, &rg->opt);
324 __raw_writel(cfg->src, &rg->src);
325 __raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt);
326 __raw_writel(cfg->dst, &rg->dst);
327 __raw_writel((cfg->dst_bidx << 16) |
328 (cfg->src_bidx & 0xffff), &rg->src_dst_bidx);
329 __raw_writel((cfg->bcntrld << 16) |
330 (cfg->link & 0xffff), &rg->link_bcntrld);
331 __raw_writel((cfg->dst_cidx << 16) |
332 (cfg->src_cidx & 0xffff), &rg->src_dst_cidx);
333 __raw_writel(0xffff & cfg->ccnt, &rg->ccnt);
334}
335
336/**
337 * edma3_check_for_transfer - check if transfer coplete by checking
338 * interrupt pending bit. Clear interrupt pending bit if complete.
339 * @base: base address of edma
340 * @cfg: pinter to struct edma3_channel_config which was passed
341 * to qedma3_start when you started qdma channel
342 *
343 * Return 0 if complete, 1 if not.
344 */
345int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg)
346{
347 u32 inum;
348 u32 ipr_base;
349 u32 icr_base;
350
351 if (cfg->complete_code < 32) {
352 ipr_base = base + EDMA3_IPR;
353 icr_base = base + EDMA3_ICR;
354 inum = 1 << cfg->complete_code;
355 } else {
356 ipr_base = base + EDMA3_IPRH;
357 icr_base = base + EDMA3_ICRH;
358 inum = 1 << (cfg->complete_code - 32);
359 }
360
361 /* check complete interrupt */
362 if (!(__raw_readl(ipr_base) & inum))
363 return 1;
364
365 /* clean up the pending int bit */
366 __raw_writel(inum, icr_base);
367
368 return 0;
369}
370
371/**
372 * qedma3_stop - stops dma on the channel passed
373 * @base: base address of edma
374 * @cfg: pinter to struct edma3_channel_config which was passed
375 * to qedma3_start when you started qdma channel
376 */
377void qedma3_stop(u32 base, struct edma3_channel_config *cfg)
378{
379 /* Disable qdma channel event */
380 __raw_writel(1 << cfg->chnum, base + EDMA3_QEECR);
381
382 /* clean up the interrupt indication */
383 if (cfg->complete_code < 32)
384 __raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
385 else
386 __raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
387
388 /* Clear missed event if set*/
389 __raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
390 __raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
391
392 /* Clear the channel map */
393 __raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum));
394}
Vignesh R664ab2c2015-08-17 13:29:55 +0530395
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530396void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
Tero Kristo72b7af52017-12-29 11:47:46 +0530397 void *dst, void *src, size_t len, size_t s_len)
Vignesh R664ab2c2015-08-17 13:29:55 +0530398{
399 struct edma3_slot_config slot;
400 struct edma3_channel_config edma_channel;
401 int b_cnt_value = 1;
402 int rem_bytes = 0;
403 int a_cnt_value = len;
404 unsigned int addr = (unsigned int) (dst);
405 unsigned int max_acnt = 0x7FFFU;
406
Tero Kristo72b7af52017-12-29 11:47:46 +0530407 if (len > s_len) {
408 b_cnt_value = (len / s_len);
409 rem_bytes = (len % s_len);
410 a_cnt_value = s_len;
411 } else if (len > max_acnt) {
Vignesh R664ab2c2015-08-17 13:29:55 +0530412 b_cnt_value = (len / max_acnt);
413 rem_bytes = (len % max_acnt);
414 a_cnt_value = max_acnt;
415 }
416
417 slot.opt = 0;
418 slot.src = ((unsigned int) src);
419 slot.acnt = a_cnt_value;
420 slot.bcnt = b_cnt_value;
421 slot.ccnt = 1;
Tero Kristo72b7af52017-12-29 11:47:46 +0530422 if (len == s_len)
423 slot.src_bidx = a_cnt_value;
424 else
425 slot.src_bidx = 0;
Vignesh R664ab2c2015-08-17 13:29:55 +0530426 slot.dst_bidx = a_cnt_value;
427 slot.src_cidx = 0;
428 slot.dst_cidx = 0;
429 slot.link = EDMA3_PARSET_NULL_LINK;
430 slot.bcntrld = 0;
431 slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
432 EDMA3_SLOPT_COMP_CODE(0) |
433 EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
434
435 edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
436 edma_channel.slot = edma_slot_num;
437 edma_channel.chnum = 0;
438 edma_channel.complete_code = 0;
439 /* set event trigger to dst update */
440 edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
441
442 qedma3_start(edma3_base_addr, &edma_channel);
443 edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr);
444
445 while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
446 ;
447 qedma3_stop(edma3_base_addr, &edma_channel);
448
449 if (rem_bytes != 0) {
450 slot.opt = 0;
Tero Kristo72b7af52017-12-29 11:47:46 +0530451 if (len == s_len)
452 slot.src =
453 (b_cnt_value * max_acnt) + ((unsigned int) src);
454 else
455 slot.src = (unsigned int) src;
Vignesh R664ab2c2015-08-17 13:29:55 +0530456 slot.acnt = rem_bytes;
457 slot.bcnt = 1;
458 slot.ccnt = 1;
459 slot.src_bidx = rem_bytes;
460 slot.dst_bidx = rem_bytes;
461 slot.src_cidx = 0;
462 slot.dst_cidx = 0;
463 slot.link = EDMA3_PARSET_NULL_LINK;
464 slot.bcntrld = 0;
465 slot.opt = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
466 EDMA3_SLOPT_COMP_CODE(0) |
467 EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
468 edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
469 edma_channel.slot = edma_slot_num;
470 edma_channel.chnum = 0;
471 edma_channel.complete_code = 0;
472 /* set event trigger to dst update */
473 edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
474
475 qedma3_start(edma3_base_addr, &edma_channel);
476 edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr +
477 (max_acnt * b_cnt_value));
478 while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
479 ;
480 qedma3_stop(edma3_base_addr, &edma_channel);
481 }
482}
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530483
Tero Kristo72b7af52017-12-29 11:47:46 +0530484void __edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
485 void *dst, u8 val, size_t len)
486{
487 int xfer_len;
488 int max_xfer = EDMA_FILL_BUFFER_SIZE * 65535;
489
490 memset((void *)edma_fill_buffer, val, sizeof(edma_fill_buffer));
491
492 while (len) {
493 xfer_len = len;
494 if (xfer_len > max_xfer)
495 xfer_len = max_xfer;
496
497 __edma3_transfer(edma3_base_addr, edma_slot_num, dst,
498 edma_fill_buffer, xfer_len,
499 EDMA_FILL_BUFFER_SIZE);
500 len -= xfer_len;
501 dst += xfer_len;
502 }
503}
504
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530505#ifndef CONFIG_DMA
506
507void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
508 void *dst, void *src, size_t len)
509{
Tero Kristo72b7af52017-12-29 11:47:46 +0530510 __edma3_transfer(edma3_base_addr, edma_slot_num, dst, src, len, len);
511}
512
513void edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
514 void *dst, u8 val, size_t len)
515{
516 __edma3_fill(edma3_base_addr, edma_slot_num, dst, val, len);
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530517}
518
519#else
520
521static int ti_edma3_transfer(struct udevice *dev, int direction, void *dst,
522 void *src, size_t len)
523{
524 struct ti_edma3_priv *priv = dev_get_priv(dev);
525
526 /* enable edma3 clocks */
527 enable_edma3_clocks();
528
529 switch (direction) {
530 case DMA_MEM_TO_MEM:
Tero Kristo72b7af52017-12-29 11:47:46 +0530531 __edma3_transfer(priv->base, 1, dst, src, len, len);
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530532 break;
533 default:
Masahiro Yamada9b643e32017-09-16 14:10:41 +0900534 pr_err("Transfer type not implemented in DMA driver\n");
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530535 break;
536 }
537
538 /* disable edma3 clocks */
539 disable_edma3_clocks();
540
541 return 0;
542}
543
544static int ti_edma3_ofdata_to_platdata(struct udevice *dev)
545{
546 struct ti_edma3_priv *priv = dev_get_priv(dev);
547
Simon Glassa821c4a2017-05-17 17:18:05 -0600548 priv->base = devfdt_get_addr(dev);
Mugunthan V N1218e5c2016-02-15 15:31:41 +0530549
550 return 0;
551}
552
553static int ti_edma3_probe(struct udevice *dev)
554{
555 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
556
557 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM;
558
559 return 0;
560}
561
562static const struct dma_ops ti_edma3_ops = {
563 .transfer = ti_edma3_transfer,
564};
565
566static const struct udevice_id ti_edma3_ids[] = {
567 { .compatible = "ti,edma3" },
568 { }
569};
570
571U_BOOT_DRIVER(ti_edma3) = {
572 .name = "ti_edma3",
573 .id = UCLASS_DMA,
574 .of_match = ti_edma3_ids,
575 .ops = &ti_edma3_ops,
576 .ofdata_to_platdata = ti_edma3_ofdata_to_platdata,
577 .probe = ti_edma3_probe,
578 .priv_auto_alloc_size = sizeof(struct ti_edma3_priv),
579};
580#endif /* CONFIG_DMA */