blob: 86603d43f14fc923e35d9501542bb420d067a8d3 [file] [log] [blame]
Vignesh Rffcc66e2019-02-05 17:31:24 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6#define pr_fmt(fmt) "udma: " fmt
7
8#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060010#include <log.h>
Simon Glass90526e92020-05-10 11:39:56 -060011#include <asm/cache.h>
Vignesh Rffcc66e2019-02-05 17:31:24 +053012#include <asm/io.h>
13#include <asm/bitops.h>
14#include <malloc.h>
Simon Glasscd93d622020-05-10 11:40:13 -060015#include <linux/bitops.h>
Masahiro Yamada9d86b892020-02-14 16:40:19 +090016#include <linux/dma-mapping.h>
Vignesh Rffcc66e2019-02-05 17:31:24 +053017#include <dm.h>
Simon Glass336d4612020-02-03 07:36:16 -070018#include <dm/device_compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070019#include <dm/devres.h>
Vignesh Rffcc66e2019-02-05 17:31:24 +053020#include <dm/read.h>
21#include <dm/of_access.h>
22#include <dma.h>
23#include <dma-uclass.h>
24#include <linux/delay.h>
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +053025#include <linux/bitmap.h>
Simon Glass61b29b82020-02-03 07:36:15 -070026#include <linux/err.h>
Vignesh Rffcc66e2019-02-05 17:31:24 +053027#include <linux/soc/ti/k3-navss-ringacc.h>
28#include <linux/soc/ti/cppi5.h>
29#include <linux/soc/ti/ti-udma.h>
30#include <linux/soc/ti/ti_sci_protocol.h>
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053031#include <linux/soc/ti/cppi5.h>
Vignesh Rffcc66e2019-02-05 17:31:24 +053032
33#include "k3-udma-hwdef.h"
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +053034#include "k3-psil-priv.h"
Vignesh Rffcc66e2019-02-05 17:31:24 +053035
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +053036#define K3_UDMA_MAX_RFLOWS 1024
37
Vignesh Rffcc66e2019-02-05 17:31:24 +053038struct udma_chan;
39
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053040enum k3_dma_type {
41 DMA_TYPE_UDMA = 0,
42 DMA_TYPE_BCDMA,
43 DMA_TYPE_PKTDMA,
44};
45
Vignesh Rffcc66e2019-02-05 17:31:24 +053046enum udma_mmr {
47 MMR_GCFG = 0,
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053048 MMR_BCHANRT,
Vignesh Rffcc66e2019-02-05 17:31:24 +053049 MMR_RCHANRT,
50 MMR_TCHANRT,
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +053051 MMR_RCHAN,
52 MMR_TCHAN,
53 MMR_RFLOW,
Vignesh Rffcc66e2019-02-05 17:31:24 +053054 MMR_LAST,
55};
56
57static const char * const mmr_names[] = {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053058 [MMR_GCFG] = "gcfg",
59 [MMR_BCHANRT] = "bchanrt",
60 [MMR_RCHANRT] = "rchanrt",
61 [MMR_TCHANRT] = "tchanrt",
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +053062 [MMR_RCHAN] = "rchan",
63 [MMR_TCHAN] = "tchan",
64 [MMR_RFLOW] = "rflow",
Vignesh Rffcc66e2019-02-05 17:31:24 +053065};
66
67struct udma_tchan {
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +053068 void __iomem *reg_chan;
Vignesh Rffcc66e2019-02-05 17:31:24 +053069 void __iomem *reg_rt;
70
71 int id;
72 struct k3_nav_ring *t_ring; /* Transmit ring */
73 struct k3_nav_ring *tc_ring; /* Transmit Completion ring */
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053074 int tflow_id; /* applicable only for PKTDMA */
75
76};
77
78#define udma_bchan udma_tchan
79
80struct udma_rflow {
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +053081 void __iomem *reg_rflow;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053082 int id;
83 struct k3_nav_ring *fd_ring; /* Free Descriptor ring */
84 struct k3_nav_ring *r_ring; /* Receive ring */
Vignesh Rffcc66e2019-02-05 17:31:24 +053085};
86
87struct udma_rchan {
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +053088 void __iomem *reg_chan;
Vignesh Rffcc66e2019-02-05 17:31:24 +053089 void __iomem *reg_rt;
90
91 int id;
Vignesh Rffcc66e2019-02-05 17:31:24 +053092};
93
Vignesh Raghavendra9a928512021-05-10 20:06:08 +053094struct udma_oes_offsets {
95 /* K3 UDMA Output Event Offset */
96 u32 udma_rchan;
97
98 /* BCDMA Output Event Offsets */
99 u32 bcdma_bchan_data;
100 u32 bcdma_bchan_ring;
101 u32 bcdma_tchan_data;
102 u32 bcdma_tchan_ring;
103 u32 bcdma_rchan_data;
104 u32 bcdma_rchan_ring;
105
106 /* PKTDMA Output Event Offsets */
107 u32 pktdma_tchan_flow;
108 u32 pktdma_rchan_flow;
109};
110
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +0530111#define UDMA_FLAG_PDMA_ACC32 BIT(0)
112#define UDMA_FLAG_PDMA_BURST BIT(1)
113#define UDMA_FLAG_TDTYPE BIT(2)
114
115struct udma_match_data {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530116 enum k3_dma_type type;
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +0530117 u32 psil_base;
118 bool enable_memcpy_support;
119 u32 flags;
120 u32 statictr_z_mask;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530121 struct udma_oes_offsets oes;
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +0530122
123 u8 tpl_levels;
124 u32 level_start_idx[];
125};
126
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530127enum udma_rm_range {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530128 RM_RANGE_BCHAN = 0,
129 RM_RANGE_TCHAN,
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530130 RM_RANGE_RCHAN,
131 RM_RANGE_RFLOW,
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530132 RM_RANGE_TFLOW,
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530133 RM_RANGE_LAST,
134};
135
136struct udma_tisci_rm {
137 const struct ti_sci_handle *tisci;
138 const struct ti_sci_rm_udmap_ops *tisci_udmap_ops;
139 u32 tisci_dev_id;
140
141 /* tisci information for PSI-L thread pairing/unpairing */
142 const struct ti_sci_rm_psil_ops *tisci_psil_ops;
143 u32 tisci_navss_dev_id;
144
145 struct ti_sci_resource *rm_ranges[RM_RANGE_LAST];
146};
147
Vignesh Rffcc66e2019-02-05 17:31:24 +0530148struct udma_dev {
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530149 struct udevice *dev;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530150 void __iomem *mmrs[MMR_LAST];
151
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530152 struct udma_tisci_rm tisci_rm;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530153 struct k3_nav_ringacc *ringacc;
154
155 u32 features;
156
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530157 int bchan_cnt;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530158 int tchan_cnt;
159 int echan_cnt;
160 int rchan_cnt;
161 int rflow_cnt;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530162 int tflow_cnt;
163 unsigned long *bchan_map;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530164 unsigned long *tchan_map;
165 unsigned long *rchan_map;
166 unsigned long *rflow_map;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530167 unsigned long *rflow_map_reserved;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530168 unsigned long *tflow_map;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530169
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530170 struct udma_bchan *bchans;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530171 struct udma_tchan *tchans;
172 struct udma_rchan *rchans;
173 struct udma_rflow *rflows;
174
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +0530175 struct udma_match_data *match_data;
176
Vignesh Rffcc66e2019-02-05 17:31:24 +0530177 struct udma_chan *channels;
178 u32 psil_base;
179
180 u32 ch_count;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530181};
182
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530183struct udma_chan_config {
184 u32 psd_size; /* size of Protocol Specific Data */
185 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
186 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
187 int remote_thread_id;
188 u32 atype;
189 u32 src_thread;
190 u32 dst_thread;
191 enum psil_endpoint_type ep_type;
192 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
193
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530194 /* PKTDMA mapped channel */
195 int mapped_channel_id;
196 /* PKTDMA default tflow or rflow for mapped channel */
197 int default_flow_id;
198
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530199 enum dma_direction dir;
200
201 unsigned int pkt_mode:1; /* TR or packet */
202 unsigned int needs_epib:1; /* EPIB is needed for the communication or not */
203 unsigned int enable_acc32:1;
204 unsigned int enable_burst:1;
205 unsigned int notdpkt:1; /* Suppress sending TDC packet */
206};
207
Vignesh Rffcc66e2019-02-05 17:31:24 +0530208struct udma_chan {
209 struct udma_dev *ud;
210 char name[20];
211
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530212 struct udma_bchan *bchan;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530213 struct udma_tchan *tchan;
214 struct udma_rchan *rchan;
215 struct udma_rflow *rflow;
216
Vignesh Raghavendra5e6d9cc2019-12-04 22:17:21 +0530217 struct ti_udma_drv_chan_cfg_data cfg_data;
218
Vignesh Rffcc66e2019-02-05 17:31:24 +0530219 u32 bcnt; /* number of bytes completed since the start of the channel */
220
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530221 struct udma_chan_config config;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530222
223 u32 id;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530224
225 struct cppi5_host_desc_t *desc_tx;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530226 bool in_use;
227 void *desc_rx;
228 u32 num_rx_bufs;
229 u32 desc_rx_cur;
230
231};
232
233#define UDMA_CH_1000(ch) (ch * 0x1000)
234#define UDMA_CH_100(ch) (ch * 0x100)
235#define UDMA_CH_40(ch) (ch * 0x40)
236
237#ifdef PKTBUFSRX
238#define UDMA_RX_DESC_NUM PKTBUFSRX
239#else
240#define UDMA_RX_DESC_NUM 4
241#endif
242
243/* Generic register access functions */
244static inline u32 udma_read(void __iomem *base, int reg)
245{
246 u32 v;
247
248 v = __raw_readl(base + reg);
249 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, base + reg);
250 return v;
251}
252
253static inline void udma_write(void __iomem *base, int reg, u32 val)
254{
255 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", val, base + reg);
256 __raw_writel(val, base + reg);
257}
258
259static inline void udma_update_bits(void __iomem *base, int reg,
260 u32 mask, u32 val)
261{
262 u32 tmp, orig;
263
264 orig = udma_read(base, reg);
265 tmp = orig & ~mask;
266 tmp |= (val & mask);
267
268 if (tmp != orig)
269 udma_write(base, reg, tmp);
270}
271
272/* TCHANRT */
273static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg)
274{
275 if (!tchan)
276 return 0;
277 return udma_read(tchan->reg_rt, reg);
278}
279
280static inline void udma_tchanrt_write(struct udma_tchan *tchan,
281 int reg, u32 val)
282{
283 if (!tchan)
284 return;
285 udma_write(tchan->reg_rt, reg, val);
286}
287
288/* RCHANRT */
289static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg)
290{
291 if (!rchan)
292 return 0;
293 return udma_read(rchan->reg_rt, reg);
294}
295
296static inline void udma_rchanrt_write(struct udma_rchan *rchan,
297 int reg, u32 val)
298{
299 if (!rchan)
300 return;
301 udma_write(rchan->reg_rt, reg, val);
302}
303
304static inline int udma_navss_psil_pair(struct udma_dev *ud, u32 src_thread,
305 u32 dst_thread)
306{
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530307 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
308
Vignesh Rffcc66e2019-02-05 17:31:24 +0530309 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530310
311 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
312 tisci_rm->tisci_navss_dev_id,
313 src_thread, dst_thread);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530314}
315
316static inline int udma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
317 u32 dst_thread)
318{
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530319 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
320
Vignesh Rffcc66e2019-02-05 17:31:24 +0530321 dst_thread |= UDMA_PSIL_DST_THREAD_ID_OFFSET;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530322
323 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
324 tisci_rm->tisci_navss_dev_id,
325 src_thread, dst_thread);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530326}
327
328static inline char *udma_get_dir_text(enum dma_direction dir)
329{
330 switch (dir) {
331 case DMA_DEV_TO_MEM:
332 return "DEV_TO_MEM";
333 case DMA_MEM_TO_DEV:
334 return "MEM_TO_DEV";
335 case DMA_MEM_TO_MEM:
336 return "MEM_TO_MEM";
337 case DMA_DEV_TO_DEV:
338 return "DEV_TO_DEV";
339 default:
340 break;
341 }
342
343 return "invalid";
344}
345
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +0530346#include "k3-udma-u-boot.c"
347
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530348static void udma_reset_uchan(struct udma_chan *uc)
349{
350 memset(&uc->config, 0, sizeof(uc->config));
351 uc->config.remote_thread_id = -1;
352 uc->config.mapped_channel_id = -1;
353 uc->config.default_flow_id = -1;
354}
355
Vignesh Rffcc66e2019-02-05 17:31:24 +0530356static inline bool udma_is_chan_running(struct udma_chan *uc)
357{
358 u32 trt_ctl = 0;
359 u32 rrt_ctl = 0;
360
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530361 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530362 case DMA_DEV_TO_MEM:
363 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
364 pr_debug("%s: rrt_ctl: 0x%08x (peer: 0x%08x)\n",
365 __func__, rrt_ctl,
366 udma_rchanrt_read(uc->rchan,
367 UDMA_RCHAN_RT_PEER_RT_EN_REG));
368 break;
369 case DMA_MEM_TO_DEV:
370 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
371 pr_debug("%s: trt_ctl: 0x%08x (peer: 0x%08x)\n",
372 __func__, trt_ctl,
373 udma_tchanrt_read(uc->tchan,
374 UDMA_TCHAN_RT_PEER_RT_EN_REG));
375 break;
376 case DMA_MEM_TO_MEM:
377 trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
378 rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
379 break;
380 default:
381 break;
382 }
383
384 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
385 return true;
386
387 return false;
388}
389
Vignesh Rffcc66e2019-02-05 17:31:24 +0530390static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
391{
392 struct k3_nav_ring *ring = NULL;
393 int ret = -ENOENT;
394
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530395 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530396 case DMA_DEV_TO_MEM:
Vignesh Raghavendra7be51212020-07-06 13:26:26 +0530397 ring = uc->rflow->r_ring;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530398 break;
399 case DMA_MEM_TO_DEV:
400 ring = uc->tchan->tc_ring;
401 break;
402 case DMA_MEM_TO_MEM:
403 ring = uc->tchan->tc_ring;
404 break;
405 default:
406 break;
407 }
408
409 if (ring && k3_nav_ringacc_ring_get_occ(ring))
410 ret = k3_nav_ringacc_ring_pop(ring, addr);
411
412 return ret;
413}
414
415static void udma_reset_rings(struct udma_chan *uc)
416{
417 struct k3_nav_ring *ring1 = NULL;
418 struct k3_nav_ring *ring2 = NULL;
419
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530420 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530421 case DMA_DEV_TO_MEM:
Vignesh Raghavendra7be51212020-07-06 13:26:26 +0530422 ring1 = uc->rflow->fd_ring;
423 ring2 = uc->rflow->r_ring;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530424 break;
425 case DMA_MEM_TO_DEV:
426 ring1 = uc->tchan->t_ring;
427 ring2 = uc->tchan->tc_ring;
428 break;
429 case DMA_MEM_TO_MEM:
430 ring1 = uc->tchan->t_ring;
431 ring2 = uc->tchan->tc_ring;
432 break;
433 default:
434 break;
435 }
436
437 if (ring1)
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530438 k3_nav_ringacc_ring_reset_dma(ring1, k3_nav_ringacc_ring_get_occ(ring1));
Vignesh Rffcc66e2019-02-05 17:31:24 +0530439 if (ring2)
440 k3_nav_ringacc_ring_reset(ring2);
441}
442
443static void udma_reset_counters(struct udma_chan *uc)
444{
445 u32 val;
446
447 if (uc->tchan) {
448 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
449 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val);
450
451 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG);
452 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val);
453
454 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG);
455 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val);
456
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530457 if (!uc->bchan) {
458 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
459 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val);
460 }
Vignesh Rffcc66e2019-02-05 17:31:24 +0530461 }
462
463 if (uc->rchan) {
464 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG);
465 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val);
466
467 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG);
468 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val);
469
470 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG);
471 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val);
472
473 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG);
474 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val);
475 }
476
477 uc->bcnt = 0;
478}
479
480static inline int udma_stop_hard(struct udma_chan *uc)
481{
482 pr_debug("%s: ENTER (chan%d)\n", __func__, uc->id);
483
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530484 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530485 case DMA_DEV_TO_MEM:
486 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0);
487 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
488 break;
489 case DMA_MEM_TO_DEV:
490 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
491 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0);
492 break;
493 case DMA_MEM_TO_MEM:
494 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
495 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
496 break;
497 default:
498 return -EINVAL;
499 }
500
501 return 0;
502}
503
504static int udma_start(struct udma_chan *uc)
505{
506 /* Channel is already running, no need to proceed further */
507 if (udma_is_chan_running(uc))
508 goto out;
509
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530510 pr_debug("%s: chan:%d dir:%s\n",
511 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh Rffcc66e2019-02-05 17:31:24 +0530512
513 /* Make sure that we clear the teardown bit, if it is set */
514 udma_stop_hard(uc);
515
516 /* Reset all counters */
517 udma_reset_counters(uc);
518
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530519 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530520 case DMA_DEV_TO_MEM:
521 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
522 UDMA_CHAN_RT_CTL_EN);
523
524 /* Enable remote */
525 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
526 UDMA_PEER_RT_EN_ENABLE);
527
528 pr_debug("%s(rx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
529 __func__,
530 udma_rchanrt_read(uc->rchan,
531 UDMA_RCHAN_RT_CTL_REG),
532 udma_rchanrt_read(uc->rchan,
533 UDMA_RCHAN_RT_PEER_RT_EN_REG));
534 break;
535 case DMA_MEM_TO_DEV:
536 /* Enable remote */
537 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG,
538 UDMA_PEER_RT_EN_ENABLE);
539
540 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
541 UDMA_CHAN_RT_CTL_EN);
542
543 pr_debug("%s(tx): RT_CTL:0x%08x PEER RT_ENABLE:0x%08x\n",
544 __func__,
Vignesh Raghavendrac16cdd42019-12-09 10:25:36 +0530545 udma_tchanrt_read(uc->tchan,
Vignesh Rffcc66e2019-02-05 17:31:24 +0530546 UDMA_TCHAN_RT_CTL_REG),
Vignesh Raghavendrac16cdd42019-12-09 10:25:36 +0530547 udma_tchanrt_read(uc->tchan,
Vignesh Rffcc66e2019-02-05 17:31:24 +0530548 UDMA_TCHAN_RT_PEER_RT_EN_REG));
549 break;
550 case DMA_MEM_TO_MEM:
551 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG,
552 UDMA_CHAN_RT_CTL_EN);
553 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
554 UDMA_CHAN_RT_CTL_EN);
555
556 break;
557 default:
558 return -EINVAL;
559 }
560
561 pr_debug("%s: DONE chan:%d\n", __func__, uc->id);
562out:
563 return 0;
564}
565
566static inline void udma_stop_mem2dev(struct udma_chan *uc, bool sync)
567{
568 int i = 0;
569 u32 val;
570
571 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG,
572 UDMA_CHAN_RT_CTL_EN |
573 UDMA_CHAN_RT_CTL_TDOWN);
574
575 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
576
577 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
578 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG);
579 udelay(1);
580 if (i > 1000) {
581 printf(" %s TIMEOUT !\n", __func__);
582 break;
583 }
584 i++;
585 }
586
587 val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG);
588 if (val & UDMA_PEER_RT_EN_ENABLE)
589 printf("%s: peer not stopped TIMEOUT !\n", __func__);
590}
591
592static inline void udma_stop_dev2mem(struct udma_chan *uc, bool sync)
593{
594 int i = 0;
595 u32 val;
596
597 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
598 UDMA_PEER_RT_EN_ENABLE |
599 UDMA_PEER_RT_EN_TEARDOWN);
600
601 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
602
603 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
604 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG);
605 udelay(1);
606 if (i > 1000) {
607 printf("%s TIMEOUT !\n", __func__);
608 break;
609 }
610 i++;
611 }
612
613 val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG);
614 if (val & UDMA_PEER_RT_EN_ENABLE)
615 printf("%s: peer not stopped TIMEOUT !\n", __func__);
616}
617
618static inline int udma_stop(struct udma_chan *uc)
619{
620 pr_debug("%s: chan:%d dir:%s\n",
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530621 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh Rffcc66e2019-02-05 17:31:24 +0530622
623 udma_reset_counters(uc);
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530624 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530625 case DMA_DEV_TO_MEM:
626 udma_stop_dev2mem(uc, true);
627 break;
628 case DMA_MEM_TO_DEV:
629 udma_stop_mem2dev(uc, true);
630 break;
631 case DMA_MEM_TO_MEM:
632 udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0);
633 udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0);
634 break;
635 default:
636 return -EINVAL;
637 }
638
639 return 0;
640}
641
642static void udma_poll_completion(struct udma_chan *uc, dma_addr_t *paddr)
643{
644 int i = 1;
645
646 while (udma_pop_from_ring(uc, paddr)) {
647 udelay(1);
648 if (!(i % 1000000))
649 printf(".");
650 i++;
651 }
652}
653
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +0530654static struct udma_rflow *__udma_reserve_rflow(struct udma_dev *ud, int id)
655{
656 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
657
658 if (id >= 0) {
659 if (test_bit(id, ud->rflow_map)) {
660 dev_err(ud->dev, "rflow%d is in use\n", id);
661 return ERR_PTR(-ENOENT);
662 }
663 } else {
664 bitmap_or(tmp, ud->rflow_map, ud->rflow_map_reserved,
665 ud->rflow_cnt);
666
667 id = find_next_zero_bit(tmp, ud->rflow_cnt, ud->rchan_cnt);
668 if (id >= ud->rflow_cnt)
669 return ERR_PTR(-ENOENT);
670 }
671
672 __set_bit(id, ud->rflow_map);
673 return &ud->rflows[id];
674}
675
Vignesh Rffcc66e2019-02-05 17:31:24 +0530676#define UDMA_RESERVE_RESOURCE(res) \
677static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
678 int id) \
679{ \
680 if (id >= 0) { \
681 if (test_bit(id, ud->res##_map)) { \
682 dev_err(ud->dev, "res##%d is in use\n", id); \
683 return ERR_PTR(-ENOENT); \
684 } \
685 } else { \
686 id = find_first_zero_bit(ud->res##_map, ud->res##_cnt); \
687 if (id == ud->res##_cnt) { \
688 return ERR_PTR(-ENOENT); \
689 } \
690 } \
691 \
692 __set_bit(id, ud->res##_map); \
693 return &ud->res##s[id]; \
694}
695
696UDMA_RESERVE_RESOURCE(tchan);
697UDMA_RESERVE_RESOURCE(rchan);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530698
699static int udma_get_tchan(struct udma_chan *uc)
700{
701 struct udma_dev *ud = uc->ud;
702
703 if (uc->tchan) {
704 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
705 uc->id, uc->tchan->id);
706 return 0;
707 }
708
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530709 uc->tchan = __udma_reserve_tchan(ud, uc->config.mapped_channel_id);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530710 if (IS_ERR(uc->tchan))
711 return PTR_ERR(uc->tchan);
712
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530713 if (ud->tflow_cnt) {
714 int tflow_id;
715
716 /* Only PKTDMA have support for tx flows */
717 if (uc->config.default_flow_id >= 0)
718 tflow_id = uc->config.default_flow_id;
719 else
720 tflow_id = uc->tchan->id;
721
722 if (test_bit(tflow_id, ud->tflow_map)) {
723 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
724 __clear_bit(uc->tchan->id, ud->tchan_map);
725 uc->tchan = NULL;
726 return -ENOENT;
727 }
728
729 uc->tchan->tflow_id = tflow_id;
730 __set_bit(tflow_id, ud->tflow_map);
731 } else {
732 uc->tchan->tflow_id = -1;
733 }
734
Vignesh Rffcc66e2019-02-05 17:31:24 +0530735 pr_debug("chan%d: got tchan%d\n", uc->id, uc->tchan->id);
736
Vignesh Rffcc66e2019-02-05 17:31:24 +0530737 return 0;
738}
739
740static int udma_get_rchan(struct udma_chan *uc)
741{
742 struct udma_dev *ud = uc->ud;
743
744 if (uc->rchan) {
745 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
746 uc->id, uc->rchan->id);
747 return 0;
748 }
749
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530750 uc->rchan = __udma_reserve_rchan(ud, uc->config.mapped_channel_id);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530751 if (IS_ERR(uc->rchan))
752 return PTR_ERR(uc->rchan);
753
754 pr_debug("chan%d: got rchan%d\n", uc->id, uc->rchan->id);
755
Vignesh Rffcc66e2019-02-05 17:31:24 +0530756 return 0;
757}
758
759static int udma_get_chan_pair(struct udma_chan *uc)
760{
761 struct udma_dev *ud = uc->ud;
762 int chan_id, end;
763
764 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
765 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
766 uc->id, uc->tchan->id);
767 return 0;
768 }
769
770 if (uc->tchan) {
771 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
772 uc->id, uc->tchan->id);
773 return -EBUSY;
774 } else if (uc->rchan) {
775 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
776 uc->id, uc->rchan->id);
777 return -EBUSY;
778 }
779
780 /* Can be optimized, but let's have it like this for now */
781 end = min(ud->tchan_cnt, ud->rchan_cnt);
782 for (chan_id = 0; chan_id < end; chan_id++) {
783 if (!test_bit(chan_id, ud->tchan_map) &&
784 !test_bit(chan_id, ud->rchan_map))
785 break;
786 }
787
788 if (chan_id == end)
789 return -ENOENT;
790
791 __set_bit(chan_id, ud->tchan_map);
792 __set_bit(chan_id, ud->rchan_map);
793 uc->tchan = &ud->tchans[chan_id];
794 uc->rchan = &ud->rchans[chan_id];
795
796 pr_debug("chan%d: got t/rchan%d pair\n", uc->id, chan_id);
797
Vignesh Rffcc66e2019-02-05 17:31:24 +0530798 return 0;
799}
800
801static int udma_get_rflow(struct udma_chan *uc, int flow_id)
802{
803 struct udma_dev *ud = uc->ud;
804
805 if (uc->rflow) {
806 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
807 uc->id, uc->rflow->id);
808 return 0;
809 }
810
811 if (!uc->rchan)
812 dev_warn(ud->dev, "chan%d: does not have rchan??\n", uc->id);
813
814 uc->rflow = __udma_reserve_rflow(ud, flow_id);
815 if (IS_ERR(uc->rflow))
816 return PTR_ERR(uc->rflow);
817
818 pr_debug("chan%d: got rflow%d\n", uc->id, uc->rflow->id);
819 return 0;
820}
821
822static void udma_put_rchan(struct udma_chan *uc)
823{
824 struct udma_dev *ud = uc->ud;
825
826 if (uc->rchan) {
827 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
828 uc->rchan->id);
829 __clear_bit(uc->rchan->id, ud->rchan_map);
830 uc->rchan = NULL;
831 }
832}
833
834static void udma_put_tchan(struct udma_chan *uc)
835{
836 struct udma_dev *ud = uc->ud;
837
838 if (uc->tchan) {
839 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
840 uc->tchan->id);
841 __clear_bit(uc->tchan->id, ud->tchan_map);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530842 if (uc->tchan->tflow_id >= 0)
843 __clear_bit(uc->tchan->tflow_id, ud->tflow_map);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530844 uc->tchan = NULL;
845 }
846}
847
848static void udma_put_rflow(struct udma_chan *uc)
849{
850 struct udma_dev *ud = uc->ud;
851
852 if (uc->rflow) {
853 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
854 uc->rflow->id);
855 __clear_bit(uc->rflow->id, ud->rflow_map);
856 uc->rflow = NULL;
857 }
858}
859
860static void udma_free_tx_resources(struct udma_chan *uc)
861{
862 if (!uc->tchan)
863 return;
864
865 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
866 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
867 uc->tchan->t_ring = NULL;
868 uc->tchan->tc_ring = NULL;
869
870 udma_put_tchan(uc);
871}
872
873static int udma_alloc_tx_resources(struct udma_chan *uc)
874{
875 struct k3_nav_ring_cfg ring_cfg;
876 struct udma_dev *ud = uc->ud;
877 int ret;
878
879 ret = udma_get_tchan(uc);
880 if (ret)
881 return ret;
882
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530883 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
884 &uc->tchan->t_ring,
885 &uc->tchan->tc_ring);
886 if (ret) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530887 ret = -EBUSY;
888 goto err_tx_ring;
889 }
890
Vignesh Rffcc66e2019-02-05 17:31:24 +0530891 memset(&ring_cfg, 0, sizeof(ring_cfg));
892 ring_cfg.size = 16;
893 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendrace1a3072019-12-09 10:25:37 +0530894 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530895
896 ret = k3_nav_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
897 ret |= k3_nav_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
898
899 if (ret)
900 goto err_ringcfg;
901
902 return 0;
903
904err_ringcfg:
905 k3_nav_ringacc_ring_free(uc->tchan->tc_ring);
906 uc->tchan->tc_ring = NULL;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530907 k3_nav_ringacc_ring_free(uc->tchan->t_ring);
908 uc->tchan->t_ring = NULL;
909err_tx_ring:
910 udma_put_tchan(uc);
911
912 return ret;
913}
914
915static void udma_free_rx_resources(struct udma_chan *uc)
916{
917 if (!uc->rchan)
918 return;
919
Vignesh Raghavendra7be51212020-07-06 13:26:26 +0530920 if (uc->rflow) {
921 k3_nav_ringacc_ring_free(uc->rflow->fd_ring);
922 k3_nav_ringacc_ring_free(uc->rflow->r_ring);
923 uc->rflow->fd_ring = NULL;
924 uc->rflow->r_ring = NULL;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530925
Vignesh Raghavendra7be51212020-07-06 13:26:26 +0530926 udma_put_rflow(uc);
927 }
928
Vignesh Rffcc66e2019-02-05 17:31:24 +0530929 udma_put_rchan(uc);
930}
931
932static int udma_alloc_rx_resources(struct udma_chan *uc)
933{
934 struct k3_nav_ring_cfg ring_cfg;
935 struct udma_dev *ud = uc->ud;
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530936 struct udma_rflow *rflow;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530937 int fd_ring_id;
938 int ret;
939
940 ret = udma_get_rchan(uc);
941 if (ret)
942 return ret;
943
944 /* For MEM_TO_MEM we don't need rflow or rings */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +0530945 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh Rffcc66e2019-02-05 17:31:24 +0530946 return 0;
947
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530948 if (uc->config.default_flow_id >= 0)
949 ret = udma_get_rflow(uc, uc->config.default_flow_id);
950 else
951 ret = udma_get_rflow(uc, uc->rchan->id);
952
Vignesh Rffcc66e2019-02-05 17:31:24 +0530953 if (ret) {
954 ret = -EBUSY;
955 goto err_rflow;
956 }
957
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530958 rflow = uc->rflow;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +0530959 if (ud->tflow_cnt) {
960 fd_ring_id = ud->tflow_cnt + rflow->id;
961 } else {
962 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
963 uc->rchan->id;
964 }
965
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530966 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
967 &rflow->fd_ring, &rflow->r_ring);
968 if (ret) {
Vignesh Rffcc66e2019-02-05 17:31:24 +0530969 ret = -EBUSY;
970 goto err_rx_ring;
971 }
972
Vignesh Rffcc66e2019-02-05 17:31:24 +0530973 memset(&ring_cfg, 0, sizeof(ring_cfg));
974 ring_cfg.size = 16;
975 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
Vignesh Raghavendrace1a3072019-12-09 10:25:37 +0530976 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530977
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530978 ret = k3_nav_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
979 ret |= k3_nav_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
Vignesh Rffcc66e2019-02-05 17:31:24 +0530980 if (ret)
981 goto err_ringcfg;
982
983 return 0;
984
985err_ringcfg:
Vignesh Raghavendraddcf5312020-07-06 13:26:27 +0530986 k3_nav_ringacc_ring_free(rflow->r_ring);
987 rflow->r_ring = NULL;
988 k3_nav_ringacc_ring_free(rflow->fd_ring);
989 rflow->fd_ring = NULL;
Vignesh Rffcc66e2019-02-05 17:31:24 +0530990err_rx_ring:
991 udma_put_rflow(uc);
992err_rflow:
993 udma_put_rchan(uc);
994
995 return ret;
996}
997
998static int udma_alloc_tchan_sci_req(struct udma_chan *uc)
999{
1000 struct udma_dev *ud = uc->ud;
1001 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1002 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301003 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301004 u32 mode;
1005 int ret;
1006
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301007 if (uc->config.pkt_mode)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301008 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1009 else
1010 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1011
1012 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
1013 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1014 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301015 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301016 req.index = uc->tchan->id;
1017 req.tx_chan_type = mode;
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301018 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301019 req.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1020 else
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301021 req.tx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1022 uc->config.psd_size,
Vignesh Rffcc66e2019-02-05 17:31:24 +05301023 0) >> 2;
1024 req.txcq_qnum = tc_ring;
1025
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301026 ret = tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301027 if (ret) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301028 dev_err(ud->dev, "tisci tx alloc failed %d\n", ret);
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301029 return ret;
1030 }
Vignesh Rffcc66e2019-02-05 17:31:24 +05301031
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301032 /*
1033 * Above TI SCI call handles firewall configuration, cfg
1034 * register configuration still has to be done locally in
1035 * absence of RM services.
1036 */
1037 if (IS_ENABLED(CONFIG_K3_DM_FW))
1038 udma_alloc_tchan_raw(uc);
1039
1040 return 0;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301041}
1042
1043static int udma_alloc_rchan_sci_req(struct udma_chan *uc)
1044{
1045 struct udma_dev *ud = uc->ud;
Vignesh Raghavendra7be51212020-07-06 13:26:26 +05301046 int fd_ring = k3_nav_ringacc_get_ring_id(uc->rflow->fd_ring);
1047 int rx_ring = k3_nav_ringacc_get_ring_id(uc->rflow->r_ring);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301048 int tc_ring = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1049 struct ti_sci_msg_rm_udmap_rx_ch_cfg req = { 0 };
1050 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301051 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301052 u32 mode;
1053 int ret;
1054
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301055 if (uc->config.pkt_mode)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301056 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1057 else
1058 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1059
1060 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
1061 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301062 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301063 req.nav_id = tisci_rm->tisci_dev_id;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301064 req.index = uc->rchan->id;
1065 req.rx_chan_type = mode;
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301066 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301067 req.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1068 req.rxcq_qnum = tc_ring;
1069 } else {
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301070 req.rx_fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1071 uc->config.psd_size,
Vignesh Rffcc66e2019-02-05 17:31:24 +05301072 0) >> 2;
1073 req.rxcq_qnum = rx_ring;
1074 }
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301075 if (ud->match_data->type == DMA_TYPE_UDMA &&
1076 uc->rflow->id != uc->rchan->id &&
1077 uc->config.dir != DMA_MEM_TO_MEM) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301078 req.flowid_start = uc->rflow->id;
1079 req.flowid_cnt = 1;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301080 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
1081 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301082 }
1083
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301084 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301085 if (ret) {
1086 dev_err(ud->dev, "tisci rx %u cfg failed %d\n",
1087 uc->rchan->id, ret);
1088 return ret;
1089 }
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301090 if (uc->config.dir == DMA_MEM_TO_MEM)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301091 return ret;
1092
1093 flow_req.valid_params =
1094 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
1095 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
1096 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
1097 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
1098 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1099 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
1100 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
1101 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
1102 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
1103 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1104 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1105 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1106 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID |
1107 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID;
1108
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301109 flow_req.nav_id = tisci_rm->tisci_dev_id;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301110 flow_req.flow_index = uc->rflow->id;
1111
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301112 if (uc->config.needs_epib)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301113 flow_req.rx_einfo_present = 1;
1114 else
1115 flow_req.rx_einfo_present = 0;
1116
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301117 if (uc->config.psd_size)
Vignesh Rffcc66e2019-02-05 17:31:24 +05301118 flow_req.rx_psinfo_present = 1;
1119 else
1120 flow_req.rx_psinfo_present = 0;
1121
1122 flow_req.rx_error_handling = 0;
1123 flow_req.rx_desc_type = 0;
1124 flow_req.rx_dest_qnum = rx_ring;
1125 flow_req.rx_src_tag_hi_sel = 2;
1126 flow_req.rx_src_tag_lo_sel = 4;
1127 flow_req.rx_dest_tag_hi_sel = 5;
1128 flow_req.rx_dest_tag_lo_sel = 4;
1129 flow_req.rx_fdq0_sz0_qnum = fd_ring;
1130 flow_req.rx_fdq1_qnum = fd_ring;
1131 flow_req.rx_fdq2_qnum = fd_ring;
1132 flow_req.rx_fdq3_qnum = fd_ring;
1133 flow_req.rx_ps_location = 0;
1134
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301135 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci,
1136 &flow_req);
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301137 if (ret) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301138 dev_err(ud->dev, "tisci rx %u flow %u cfg failed %d\n",
1139 uc->rchan->id, uc->rflow->id, ret);
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301140 return ret;
1141 }
Vignesh Rffcc66e2019-02-05 17:31:24 +05301142
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301143 /*
1144 * Above TI SCI call handles firewall configuration, cfg
1145 * register configuration still has to be done locally in
1146 * absence of RM services.
1147 */
1148 if (IS_ENABLED(CONFIG_K3_DM_FW))
1149 udma_alloc_rchan_raw(uc);
1150
1151 return 0;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301152}
1153
1154static int udma_alloc_chan_resources(struct udma_chan *uc)
1155{
1156 struct udma_dev *ud = uc->ud;
1157 int ret;
1158
1159 pr_debug("%s: chan:%d as %s\n",
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301160 __func__, uc->id, udma_get_dir_text(uc->config.dir));
Vignesh Rffcc66e2019-02-05 17:31:24 +05301161
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301162 switch (uc->config.dir) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301163 case DMA_MEM_TO_MEM:
1164 /* Non synchronized - mem to mem type of transfer */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301165 uc->config.pkt_mode = false;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301166 ret = udma_get_chan_pair(uc);
1167 if (ret)
1168 return ret;
1169
1170 ret = udma_alloc_tx_resources(uc);
1171 if (ret)
1172 goto err_free_res;
1173
1174 ret = udma_alloc_rx_resources(uc);
1175 if (ret)
1176 goto err_free_res;
1177
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301178 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1179 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301180 break;
1181 case DMA_MEM_TO_DEV:
1182 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
1183 ret = udma_alloc_tx_resources(uc);
1184 if (ret)
1185 goto err_free_res;
1186
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301187 uc->config.src_thread = ud->psil_base + uc->tchan->id;
1188 uc->config.dst_thread = uc->config.remote_thread_id;
1189 uc->config.dst_thread |= 0x8000;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301190
1191 break;
1192 case DMA_DEV_TO_MEM:
1193 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
1194 ret = udma_alloc_rx_resources(uc);
1195 if (ret)
1196 goto err_free_res;
1197
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301198 uc->config.src_thread = uc->config.remote_thread_id;
1199 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | 0x8000;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301200
1201 break;
1202 default:
1203 /* Can not happen */
1204 pr_debug("%s: chan:%d invalid direction (%u)\n",
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301205 __func__, uc->id, uc->config.dir);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301206 return -EINVAL;
1207 }
1208
1209 /* We have channel indexes and rings */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301210 if (uc->config.dir == DMA_MEM_TO_MEM) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301211 ret = udma_alloc_tchan_sci_req(uc);
1212 if (ret)
1213 goto err_free_res;
1214
1215 ret = udma_alloc_rchan_sci_req(uc);
1216 if (ret)
1217 goto err_free_res;
1218 } else {
1219 /* Slave transfer */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301220 if (uc->config.dir == DMA_MEM_TO_DEV) {
Vignesh Rffcc66e2019-02-05 17:31:24 +05301221 ret = udma_alloc_tchan_sci_req(uc);
1222 if (ret)
1223 goto err_free_res;
1224 } else {
1225 ret = udma_alloc_rchan_sci_req(uc);
1226 if (ret)
1227 goto err_free_res;
1228 }
1229 }
1230
Peter Ujfalusi54877722019-04-25 12:08:15 +05301231 if (udma_is_chan_running(uc)) {
1232 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
1233 udma_stop(uc);
1234 if (udma_is_chan_running(uc)) {
1235 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
1236 goto err_free_res;
1237 }
1238 }
1239
Vignesh Rffcc66e2019-02-05 17:31:24 +05301240 /* PSI-L pairing */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301241 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301242 if (ret) {
1243 dev_err(ud->dev, "k3_nav_psil_request_link fail\n");
1244 goto err_free_res;
1245 }
1246
1247 return 0;
1248
1249err_free_res:
1250 udma_free_tx_resources(uc);
1251 udma_free_rx_resources(uc);
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301252 uc->config.remote_thread_id = -1;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301253 return ret;
1254}
1255
1256static void udma_free_chan_resources(struct udma_chan *uc)
1257{
Vignesh Raghavendra85bdcf02020-09-17 20:11:22 +05301258 /* Hard reset UDMA channel */
1259 udma_stop_hard(uc);
1260 udma_reset_counters(uc);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301261
1262 /* Release PSI-L pairing */
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301263 udma_navss_psil_unpair(uc->ud, uc->config.src_thread, uc->config.dst_thread);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301264
1265 /* Reset the rings for a new start */
1266 udma_reset_rings(uc);
1267 udma_free_tx_resources(uc);
1268 udma_free_rx_resources(uc);
1269
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301270 uc->config.remote_thread_id = -1;
1271 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301272}
1273
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301274static const char * const range_names[] = {
1275 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
1276 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
1277 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
1278 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
1279 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
1280};
1281
Vignesh Rffcc66e2019-02-05 17:31:24 +05301282static int udma_get_mmrs(struct udevice *dev)
1283{
1284 struct udma_dev *ud = dev_get_priv(dev);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301285 u32 cap2, cap3, cap4;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301286 int i;
1287
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301288 ud->mmrs[MMR_GCFG] = (uint32_t *)devfdt_get_addr_name(dev, mmr_names[MMR_GCFG]);
1289 if (!ud->mmrs[MMR_GCFG])
1290 return -EINVAL;
1291
1292 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
1293 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
1294
1295 switch (ud->match_data->type) {
1296 case DMA_TYPE_UDMA:
1297 ud->rflow_cnt = cap3 & 0x3fff;
1298 ud->tchan_cnt = cap2 & 0x1ff;
1299 ud->echan_cnt = (cap2 >> 9) & 0x1ff;
1300 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1301 break;
1302 case DMA_TYPE_BCDMA:
1303 ud->bchan_cnt = cap2 & 0x1ff;
1304 ud->tchan_cnt = (cap2 >> 9) & 0x1ff;
1305 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1306 break;
1307 case DMA_TYPE_PKTDMA:
1308 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
1309 ud->tchan_cnt = cap2 & 0x1ff;
1310 ud->rchan_cnt = (cap2 >> 18) & 0x1ff;
1311 ud->rflow_cnt = cap3 & 0x3fff;
1312 ud->tflow_cnt = cap4 & 0x3fff;
1313 break;
1314 default:
1315 return -EINVAL;
1316 }
1317
1318 for (i = 1; i < MMR_LAST; i++) {
1319 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
1320 continue;
1321 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
1322 continue;
1323 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
1324 continue;
1325
Vignesh Rffcc66e2019-02-05 17:31:24 +05301326 ud->mmrs[i] = (uint32_t *)devfdt_get_addr_name(dev,
1327 mmr_names[i]);
1328 if (!ud->mmrs[i])
1329 return -EINVAL;
1330 }
1331
1332 return 0;
1333}
1334
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301335static int udma_setup_resources(struct udma_dev *ud)
1336{
1337 struct udevice *dev = ud->dev;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301338 int i;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301339 struct ti_sci_resource_desc *rm_desc;
1340 struct ti_sci_resource *rm_res;
1341 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301342
1343 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1344 sizeof(unsigned long), GFP_KERNEL);
1345 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1346 GFP_KERNEL);
1347 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1348 sizeof(unsigned long), GFP_KERNEL);
1349 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1350 GFP_KERNEL);
1351 ud->rflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
1352 sizeof(unsigned long), GFP_KERNEL);
1353 ud->rflow_map_reserved = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1354 sizeof(unsigned long),
1355 GFP_KERNEL);
1356 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1357 GFP_KERNEL);
1358
1359 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_map ||
1360 !ud->rflow_map_reserved || !ud->tchans || !ud->rchans ||
1361 !ud->rflows)
1362 return -ENOMEM;
1363
1364 /*
1365 * RX flows with the same Ids as RX channels are reserved to be used
1366 * as default flows if remote HW can't generate flow_ids. Those
1367 * RX flows can be requested only explicitly by id.
1368 */
1369 bitmap_set(ud->rflow_map_reserved, 0, ud->rchan_cnt);
1370
1371 /* Get resource ranges from tisci */
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301372 for (i = 0; i < RM_RANGE_LAST; i++) {
1373 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
1374 continue;
1375
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301376 tisci_rm->rm_ranges[i] =
1377 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1378 tisci_rm->tisci_dev_id,
1379 (char *)range_names[i]);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301380 }
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301381
1382 /* tchan ranges */
1383 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1384 if (IS_ERR(rm_res)) {
1385 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1386 } else {
1387 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1388 for (i = 0; i < rm_res->sets; i++) {
1389 rm_desc = &rm_res->desc[i];
1390 bitmap_clear(ud->tchan_map, rm_desc->start,
1391 rm_desc->num);
1392 }
1393 }
1394
1395 /* rchan and matching default flow ranges */
1396 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1397 if (IS_ERR(rm_res)) {
1398 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1399 bitmap_zero(ud->rflow_map, ud->rchan_cnt);
1400 } else {
1401 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1402 bitmap_fill(ud->rflow_map, ud->rchan_cnt);
1403 for (i = 0; i < rm_res->sets; i++) {
1404 rm_desc = &rm_res->desc[i];
1405 bitmap_clear(ud->rchan_map, rm_desc->start,
1406 rm_desc->num);
1407 bitmap_clear(ud->rflow_map, rm_desc->start,
1408 rm_desc->num);
1409 }
1410 }
1411
1412 /* GP rflow ranges */
1413 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1414 if (IS_ERR(rm_res)) {
1415 bitmap_clear(ud->rflow_map, ud->rchan_cnt,
1416 ud->rflow_cnt - ud->rchan_cnt);
1417 } else {
1418 bitmap_set(ud->rflow_map, ud->rchan_cnt,
1419 ud->rflow_cnt - ud->rchan_cnt);
1420 for (i = 0; i < rm_res->sets; i++) {
1421 rm_desc = &rm_res->desc[i];
1422 bitmap_clear(ud->rflow_map, rm_desc->start,
1423 rm_desc->num);
1424 }
1425 }
1426
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301427 return 0;
1428}
1429
1430static int bcdma_setup_resources(struct udma_dev *ud)
1431{
1432 int i;
1433 struct udevice *dev = ud->dev;
1434 struct ti_sci_resource_desc *rm_desc;
1435 struct ti_sci_resource *rm_res;
1436 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1437
1438 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
1439 sizeof(unsigned long), GFP_KERNEL);
1440 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
1441 GFP_KERNEL);
1442 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1443 sizeof(unsigned long), GFP_KERNEL);
1444 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1445 GFP_KERNEL);
1446 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1447 sizeof(unsigned long), GFP_KERNEL);
1448 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1449 GFP_KERNEL);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301450 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
1451 GFP_KERNEL);
1452
1453 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301454 !ud->bchans || !ud->tchans || !ud->rchans ||
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301455 !ud->rflows)
1456 return -ENOMEM;
1457
1458 /* Get resource ranges from tisci */
1459 for (i = 0; i < RM_RANGE_LAST; i++) {
1460 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
1461 continue;
1462
1463 tisci_rm->rm_ranges[i] =
1464 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1465 tisci_rm->tisci_dev_id,
1466 (char *)range_names[i]);
1467 }
1468
1469 /* bchan ranges */
1470 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
1471 if (IS_ERR(rm_res)) {
1472 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
1473 } else {
1474 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
1475 for (i = 0; i < rm_res->sets; i++) {
1476 rm_desc = &rm_res->desc[i];
1477 bitmap_clear(ud->bchan_map, rm_desc->start,
1478 rm_desc->num);
1479 dev_dbg(dev, "ti-sci-res: bchan: %d:%d\n",
1480 rm_desc->start, rm_desc->num);
1481 }
1482 }
1483
1484 /* tchan ranges */
1485 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1486 if (IS_ERR(rm_res)) {
1487 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1488 } else {
1489 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1490 for (i = 0; i < rm_res->sets; i++) {
1491 rm_desc = &rm_res->desc[i];
1492 bitmap_clear(ud->tchan_map, rm_desc->start,
1493 rm_desc->num);
1494 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1495 rm_desc->start, rm_desc->num);
1496 }
1497 }
1498
1499 /* rchan ranges */
1500 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1501 if (IS_ERR(rm_res)) {
1502 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1503 } else {
1504 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1505 for (i = 0; i < rm_res->sets; i++) {
1506 rm_desc = &rm_res->desc[i];
1507 bitmap_clear(ud->rchan_map, rm_desc->start,
1508 rm_desc->num);
1509 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1510 rm_desc->start, rm_desc->num);
1511 }
1512 }
1513
1514 return 0;
1515}
1516
1517static int pktdma_setup_resources(struct udma_dev *ud)
1518{
1519 int i;
1520 struct udevice *dev = ud->dev;
1521 struct ti_sci_resource *rm_res;
1522 struct ti_sci_resource_desc *rm_desc;
1523 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1524
1525 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
1526 sizeof(unsigned long), GFP_KERNEL);
1527 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
1528 GFP_KERNEL);
1529 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
1530 sizeof(unsigned long), GFP_KERNEL);
1531 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
1532 GFP_KERNEL);
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301533 ud->rflow_map = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
1534 sizeof(unsigned long),
1535 GFP_KERNEL);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301536 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
1537 GFP_KERNEL);
1538 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
1539 sizeof(unsigned long), GFP_KERNEL);
1540
1541 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301542 !ud->rchans || !ud->rflows || !ud->rflow_map)
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301543 return -ENOMEM;
1544
1545 /* Get resource ranges from tisci */
1546 for (i = 0; i < RM_RANGE_LAST; i++) {
1547 if (i == RM_RANGE_BCHAN)
1548 continue;
1549
1550 tisci_rm->rm_ranges[i] =
1551 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
1552 tisci_rm->tisci_dev_id,
1553 (char *)range_names[i]);
1554 }
1555
1556 /* tchan ranges */
1557 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
1558 if (IS_ERR(rm_res)) {
1559 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
1560 } else {
1561 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
1562 for (i = 0; i < rm_res->sets; i++) {
1563 rm_desc = &rm_res->desc[i];
1564 bitmap_clear(ud->tchan_map, rm_desc->start,
1565 rm_desc->num);
1566 dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
1567 rm_desc->start, rm_desc->num);
1568 }
1569 }
1570
1571 /* rchan ranges */
1572 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
1573 if (IS_ERR(rm_res)) {
1574 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
1575 } else {
1576 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
1577 for (i = 0; i < rm_res->sets; i++) {
1578 rm_desc = &rm_res->desc[i];
1579 bitmap_clear(ud->rchan_map, rm_desc->start,
1580 rm_desc->num);
1581 dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
1582 rm_desc->start, rm_desc->num);
1583 }
1584 }
1585
1586 /* rflow ranges */
1587 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
1588 if (IS_ERR(rm_res)) {
1589 /* all rflows are assigned exclusively to Linux */
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301590 bitmap_zero(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301591 } else {
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301592 bitmap_fill(ud->rflow_map, ud->rflow_cnt);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301593 for (i = 0; i < rm_res->sets; i++) {
1594 rm_desc = &rm_res->desc[i];
Vignesh Raghavendra4af5e5f2021-12-23 19:27:30 +05301595 bitmap_clear(ud->rflow_map, rm_desc->start,
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301596 rm_desc->num);
1597 dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
1598 rm_desc->start, rm_desc->num);
1599 }
1600 }
1601
1602 /* tflow ranges */
1603 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
1604 if (IS_ERR(rm_res)) {
1605 /* all tflows are assigned exclusively to Linux */
1606 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
1607 } else {
1608 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
1609 for (i = 0; i < rm_res->sets; i++) {
1610 rm_desc = &rm_res->desc[i];
1611 bitmap_clear(ud->tflow_map, rm_desc->start,
1612 rm_desc->num);
1613 dev_dbg(dev, "ti-sci-res: tflow: %d:%d\n",
1614 rm_desc->start, rm_desc->num);
1615 }
1616 }
1617
1618 return 0;
1619}
1620
1621static int setup_resources(struct udma_dev *ud)
1622{
1623 struct udevice *dev = ud->dev;
1624 int ch_count, ret;
1625
1626 switch (ud->match_data->type) {
1627 case DMA_TYPE_UDMA:
1628 ret = udma_setup_resources(ud);
1629 break;
1630 case DMA_TYPE_BCDMA:
1631 ret = bcdma_setup_resources(ud);
1632 break;
1633 case DMA_TYPE_PKTDMA:
1634 ret = pktdma_setup_resources(ud);
1635 break;
1636 default:
1637 return -EINVAL;
1638 }
1639
1640 if (ret)
1641 return ret;
1642
1643 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
1644 if (ud->bchan_cnt)
1645 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301646 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
1647 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
1648 if (!ch_count)
1649 return -ENODEV;
1650
1651 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
1652 GFP_KERNEL);
1653 if (!ud->channels)
1654 return -ENOMEM;
1655
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301656 switch (ud->match_data->type) {
1657 case DMA_TYPE_UDMA:
1658 dev_dbg(dev,
1659 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
1660 ch_count,
1661 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1662 ud->tchan_cnt),
1663 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1664 ud->rchan_cnt),
1665 ud->rflow_cnt - bitmap_weight(ud->rflow_map,
1666 ud->rflow_cnt));
1667 break;
1668 case DMA_TYPE_BCDMA:
1669 dev_dbg(dev,
1670 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
1671 ch_count,
1672 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
1673 ud->bchan_cnt),
1674 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1675 ud->tchan_cnt),
1676 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1677 ud->rchan_cnt));
1678 break;
1679 case DMA_TYPE_PKTDMA:
1680 dev_dbg(dev,
1681 "Channels: %d (tchan: %u, rchan: %u)\n",
1682 ch_count,
1683 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
1684 ud->tchan_cnt),
1685 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
1686 ud->rchan_cnt));
1687 break;
1688 default:
1689 break;
1690 }
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301691
1692 return ch_count;
1693}
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301694
Vignesh Rffcc66e2019-02-05 17:31:24 +05301695static int udma_probe(struct udevice *dev)
1696{
1697 struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
1698 struct udma_dev *ud = dev_get_priv(dev);
1699 int i, ret;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301700 struct udevice *tmp;
1701 struct udevice *tisci_dev = NULL;
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301702 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1703 ofnode navss_ofnode = ofnode_get_parent(dev_ofnode(dev));
1704
Vignesh Rffcc66e2019-02-05 17:31:24 +05301705
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301706 ud->match_data = (void *)dev_get_driver_data(dev);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301707 ret = udma_get_mmrs(dev);
1708 if (ret)
1709 return ret;
1710
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05301711 ud->psil_base = ud->match_data->psil_base;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301712
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301713 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
1714 "ti,sci", &tisci_dev);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301715 if (ret) {
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301716 debug("Failed to get TISCI phandle (%d)\n", ret);
1717 tisci_rm->tisci = NULL;
1718 return -EINVAL;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301719 }
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301720 tisci_rm->tisci = (struct ti_sci_handle *)
1721 (ti_sci_get_handle_from_sysfw(tisci_dev));
Vignesh Rffcc66e2019-02-05 17:31:24 +05301722
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301723 tisci_rm->tisci_dev_id = -1;
1724 ret = dev_read_u32(dev, "ti,sci-dev-id", &tisci_rm->tisci_dev_id);
1725 if (ret) {
1726 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1727 return ret;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301728 }
1729
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301730 tisci_rm->tisci_navss_dev_id = -1;
1731 ret = ofnode_read_u32(navss_ofnode, "ti,sci-dev-id",
1732 &tisci_rm->tisci_navss_dev_id);
1733 if (ret) {
1734 dev_err(dev, "navss sci-dev-id read failure %d\n", ret);
1735 return ret;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301736 }
1737
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301738 tisci_rm->tisci_udmap_ops = &tisci_rm->tisci->ops.rm_udmap_ops;
1739 tisci_rm->tisci_psil_ops = &tisci_rm->tisci->ops.rm_psil_ops;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301740
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301741 if (ud->match_data->type == DMA_TYPE_UDMA) {
1742 ret = uclass_get_device_by_phandle(UCLASS_MISC, dev,
1743 "ti,ringacc", &tmp);
1744 ud->ringacc = dev_get_priv(tmp);
1745 } else {
1746 struct k3_ringacc_init_data ring_init_data;
1747
1748 ring_init_data.tisci = ud->tisci_rm.tisci;
1749 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
1750 if (ud->match_data->type == DMA_TYPE_BCDMA) {
1751 ring_init_data.num_rings = ud->bchan_cnt +
1752 ud->tchan_cnt +
1753 ud->rchan_cnt;
1754 } else {
1755 ring_init_data.num_rings = ud->rflow_cnt +
1756 ud->tflow_cnt;
1757 }
1758
1759 ud->ringacc = k3_ringacc_dmarings_init(dev, &ring_init_data);
1760 }
1761 if (IS_ERR(ud->ringacc))
1762 return PTR_ERR(ud->ringacc);
1763
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301764 ud->dev = dev;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301765 ud->ch_count = setup_resources(ud);
Vignesh Raghavendraa8837cf2019-12-09 10:25:32 +05301766 if (ud->ch_count <= 0)
1767 return ud->ch_count;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301768
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301769 for (i = 0; i < ud->bchan_cnt; i++) {
1770 struct udma_bchan *bchan = &ud->bchans[i];
1771
1772 bchan->id = i;
1773 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
1774 }
Vignesh Rffcc66e2019-02-05 17:31:24 +05301775
Vignesh Rffcc66e2019-02-05 17:31:24 +05301776 for (i = 0; i < ud->tchan_cnt; i++) {
1777 struct udma_tchan *tchan = &ud->tchans[i];
1778
1779 tchan->id = i;
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301780 tchan->reg_chan = ud->mmrs[MMR_TCHAN] + UDMA_CH_100(i);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301781 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + UDMA_CH_1000(i);
1782 }
1783
1784 for (i = 0; i < ud->rchan_cnt; i++) {
1785 struct udma_rchan *rchan = &ud->rchans[i];
1786
1787 rchan->id = i;
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301788 rchan->reg_chan = ud->mmrs[MMR_RCHAN] + UDMA_CH_100(i);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301789 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + UDMA_CH_1000(i);
1790 }
1791
1792 for (i = 0; i < ud->rflow_cnt; i++) {
1793 struct udma_rflow *rflow = &ud->rflows[i];
1794
1795 rflow->id = i;
Vignesh Raghavendra5abb6942021-06-07 19:47:53 +05301796 rflow->reg_rflow = ud->mmrs[MMR_RFLOW] + UDMA_CH_40(i);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301797 }
1798
1799 for (i = 0; i < ud->ch_count; i++) {
1800 struct udma_chan *uc = &ud->channels[i];
1801
1802 uc->ud = ud;
1803 uc->id = i;
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301804 uc->config.remote_thread_id = -1;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301805 uc->bchan = NULL;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301806 uc->tchan = NULL;
1807 uc->rchan = NULL;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301808 uc->config.mapped_channel_id = -1;
1809 uc->config.default_flow_id = -1;
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05301810 uc->config.dir = DMA_MEM_TO_MEM;
Vignesh Rffcc66e2019-02-05 17:31:24 +05301811 sprintf(uc->name, "UDMA chan%d\n", i);
1812 if (!i)
1813 uc->in_use = true;
1814 }
1815
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301816 pr_debug("%s(rev: 0x%08x) CAP0-3: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1817 dev->name,
Vignesh Rffcc66e2019-02-05 17:31:24 +05301818 udma_read(ud->mmrs[MMR_GCFG], 0),
1819 udma_read(ud->mmrs[MMR_GCFG], 0x20),
1820 udma_read(ud->mmrs[MMR_GCFG], 0x24),
1821 udma_read(ud->mmrs[MMR_GCFG], 0x28),
1822 udma_read(ud->mmrs[MMR_GCFG], 0x2c));
1823
1824 uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM | DMA_SUPPORTS_MEM_TO_DEV;
1825
1826 return ret;
1827}
1828
Vignesh Raghavendrab0ab0082019-12-09 10:25:38 +05301829static int udma_push_to_ring(struct k3_nav_ring *ring, void *elem)
1830{
1831 u64 addr = 0;
1832
1833 memcpy(&addr, &elem, sizeof(elem));
1834 return k3_nav_ringacc_ring_push(ring, &addr);
1835}
1836
Vignesh Rffcc66e2019-02-05 17:31:24 +05301837static int *udma_prep_dma_memcpy(struct udma_chan *uc, dma_addr_t dest,
1838 dma_addr_t src, size_t len)
1839{
1840 u32 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
1841 struct cppi5_tr_type15_t *tr_req;
1842 int num_tr;
1843 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
1844 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
1845 unsigned long dummy;
1846 void *tr_desc;
1847 size_t desc_size;
1848
1849 if (len < SZ_64K) {
1850 num_tr = 1;
1851 tr0_cnt0 = len;
1852 tr0_cnt1 = 1;
1853 } else {
1854 unsigned long align_to = __ffs(src | dest);
1855
1856 if (align_to > 3)
1857 align_to = 3;
1858 /*
1859 * Keep simple: tr0: SZ_64K-alignment blocks,
1860 * tr1: the remaining
1861 */
1862 num_tr = 2;
1863 tr0_cnt0 = (SZ_64K - BIT(align_to));
1864 if (len / tr0_cnt0 >= SZ_64K) {
1865 dev_err(uc->ud->dev, "size %zu is not supported\n",
1866 len);
1867 return NULL;
1868 }
1869
1870 tr0_cnt1 = len / tr0_cnt0;
1871 tr1_cnt0 = len % tr0_cnt0;
1872 }
1873
1874 desc_size = cppi5_trdesc_calc_size(num_tr, tr_size);
1875 tr_desc = dma_alloc_coherent(desc_size, &dummy);
1876 if (!tr_desc)
1877 return NULL;
1878 memset(tr_desc, 0, desc_size);
1879
1880 cppi5_trdesc_init(tr_desc, num_tr, tr_size, 0, 0);
1881 cppi5_desc_set_pktids(tr_desc, uc->id, 0x3fff);
1882 cppi5_desc_set_retpolicy(tr_desc, 0, tc_ring_id);
1883
1884 tr_req = tr_desc + tr_size;
1885
1886 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
1887 CPPI5_TR_EVENT_SIZE_COMPLETION, 1);
1888 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
1889
1890 tr_req[0].addr = src;
1891 tr_req[0].icnt0 = tr0_cnt0;
1892 tr_req[0].icnt1 = tr0_cnt1;
1893 tr_req[0].icnt2 = 1;
1894 tr_req[0].icnt3 = 1;
1895 tr_req[0].dim1 = tr0_cnt0;
1896
1897 tr_req[0].daddr = dest;
1898 tr_req[0].dicnt0 = tr0_cnt0;
1899 tr_req[0].dicnt1 = tr0_cnt1;
1900 tr_req[0].dicnt2 = 1;
1901 tr_req[0].dicnt3 = 1;
1902 tr_req[0].ddim1 = tr0_cnt0;
1903
1904 if (num_tr == 2) {
1905 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
1906 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
1907 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
1908
1909 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
1910 tr_req[1].icnt0 = tr1_cnt0;
1911 tr_req[1].icnt1 = 1;
1912 tr_req[1].icnt2 = 1;
1913 tr_req[1].icnt3 = 1;
1914
1915 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
1916 tr_req[1].dicnt0 = tr1_cnt0;
1917 tr_req[1].dicnt1 = 1;
1918 tr_req[1].dicnt2 = 1;
1919 tr_req[1].dicnt3 = 1;
1920 }
1921
1922 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP);
1923
Vignesh Raghavendraf03cb5c2019-12-09 10:25:39 +05301924 flush_dcache_range((unsigned long)tr_desc,
1925 ALIGN((unsigned long)tr_desc + desc_size,
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05301926 ARCH_DMA_MINALIGN));
Vignesh Rffcc66e2019-02-05 17:31:24 +05301927
Vignesh Raghavendrab0ab0082019-12-09 10:25:38 +05301928 udma_push_to_ring(uc->tchan->t_ring, tr_desc);
Vignesh Rffcc66e2019-02-05 17:31:24 +05301929
1930 return 0;
1931}
1932
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05301933#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1934 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1935 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1936
1937#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1938 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1939 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1940
1941#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1942 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1943
1944#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1945 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1946 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1947 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1948 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1949 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1950 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1951 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1952 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1953
1954#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1955 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1956 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1957 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1958 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1959 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1960 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1961 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1962 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1963 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1964
1965static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1966{
1967 struct udma_dev *ud = uc->ud;
1968 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1969 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1970 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1971 struct udma_bchan *bchan = uc->bchan;
1972 int ret = 0;
1973
1974 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1975 req_tx.nav_id = tisci_rm->tisci_dev_id;
1976 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1977 req_tx.index = bchan->id;
1978
1979 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1980 if (ret)
1981 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1982
1983 return ret;
1984}
1985
1986static struct udma_bchan *__bcdma_reserve_bchan(struct udma_dev *ud, int id)
1987{
1988 if (id >= 0) {
1989 if (test_bit(id, ud->bchan_map)) {
1990 dev_err(ud->dev, "bchan%d is in use\n", id);
1991 return ERR_PTR(-ENOENT);
1992 }
1993 } else {
1994 id = find_next_zero_bit(ud->bchan_map, ud->bchan_cnt, 0);
1995 if (id == ud->bchan_cnt)
1996 return ERR_PTR(-ENOENT);
1997 }
1998 __set_bit(id, ud->bchan_map);
1999 return &ud->bchans[id];
2000}
2001
2002static int bcdma_get_bchan(struct udma_chan *uc)
2003{
2004 struct udma_dev *ud = uc->ud;
2005
2006 if (uc->bchan) {
2007 dev_err(ud->dev, "chan%d: already have bchan%d allocated\n",
2008 uc->id, uc->bchan->id);
2009 return 0;
2010 }
2011
2012 uc->bchan = __bcdma_reserve_bchan(ud, -1);
2013 if (IS_ERR(uc->bchan))
2014 return PTR_ERR(uc->bchan);
2015
2016 uc->tchan = uc->bchan;
2017
2018 return 0;
2019}
2020
2021static void bcdma_put_bchan(struct udma_chan *uc)
2022{
2023 struct udma_dev *ud = uc->ud;
2024
2025 if (uc->bchan) {
2026 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
2027 uc->bchan->id);
2028 __clear_bit(uc->bchan->id, ud->bchan_map);
2029 uc->bchan = NULL;
2030 uc->tchan = NULL;
2031 }
2032}
2033
2034static void bcdma_free_bchan_resources(struct udma_chan *uc)
2035{
2036 if (!uc->bchan)
2037 return;
2038
2039 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2040 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2041 uc->bchan->tc_ring = NULL;
2042 uc->bchan->t_ring = NULL;
2043
2044 bcdma_put_bchan(uc);
2045}
2046
2047static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
2048{
2049 struct k3_nav_ring_cfg ring_cfg;
2050 struct udma_dev *ud = uc->ud;
2051 int ret;
2052
2053 ret = bcdma_get_bchan(uc);
2054 if (ret)
2055 return ret;
2056
2057 ret = k3_nav_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
2058 &uc->bchan->t_ring,
2059 &uc->bchan->tc_ring);
2060 if (ret) {
2061 ret = -EBUSY;
2062 goto err_ring;
2063 }
2064
2065 memset(&ring_cfg, 0, sizeof(ring_cfg));
2066 ring_cfg.size = 16;
2067 ring_cfg.elm_size = K3_NAV_RINGACC_RING_ELSIZE_8;
2068 ring_cfg.mode = K3_NAV_RINGACC_RING_MODE_RING;
2069
2070 ret = k3_nav_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
2071 if (ret)
2072 goto err_ringcfg;
2073
2074 return 0;
2075
2076err_ringcfg:
2077 k3_nav_ringacc_ring_free(uc->bchan->tc_ring);
2078 uc->bchan->tc_ring = NULL;
2079 k3_nav_ringacc_ring_free(uc->bchan->t_ring);
2080 uc->bchan->t_ring = NULL;
2081err_ring:
2082 bcdma_put_bchan(uc);
2083
2084 return ret;
2085}
2086
2087static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
2088{
2089 struct udma_dev *ud = uc->ud;
2090 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2091 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2092 struct udma_tchan *tchan = uc->tchan;
2093 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2094 int ret = 0;
2095
2096 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2097 req_tx.nav_id = tisci_rm->tisci_dev_id;
2098 req_tx.index = tchan->id;
2099 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2100 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
2101 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2102 /* wait for peer to complete the teardown for PDMAs */
2103 req_tx.valid_params |=
2104 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2105 req_tx.tx_tdtype = 1;
2106 }
2107
2108 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2109 if (ret)
2110 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2111
2112 return ret;
2113}
2114
2115#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2116
2117static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2118{
2119 struct udma_dev *ud = uc->ud;
2120 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2121 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2122 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2123 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2124 int ret = 0;
2125
2126 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2127 req_rx.nav_id = tisci_rm->tisci_dev_id;
2128 req_rx.index = uc->rchan->id;
2129
2130 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2131 if (ret) {
2132 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2133 return ret;
2134 }
2135
2136 flow_req.valid_params =
2137 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2138 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2139 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2140
2141 flow_req.nav_id = tisci_rm->tisci_dev_id;
2142 flow_req.flow_index = uc->rflow->id;
2143
2144 if (uc->config.needs_epib)
2145 flow_req.rx_einfo_present = 1;
2146 else
2147 flow_req.rx_einfo_present = 0;
2148 if (uc->config.psd_size)
2149 flow_req.rx_psinfo_present = 1;
2150 else
2151 flow_req.rx_psinfo_present = 0;
2152 flow_req.rx_error_handling = 1;
2153
2154 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2155
2156 if (ret)
2157 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2158 ret);
2159
2160 return ret;
2161}
2162
2163static int bcdma_alloc_chan_resources(struct udma_chan *uc)
2164{
2165 int ret;
2166
2167 uc->config.pkt_mode = false;
2168
2169 switch (uc->config.dir) {
2170 case DMA_MEM_TO_MEM:
2171 /* Non synchronized - mem to mem type of transfer */
2172 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2173 uc->id);
2174
2175 ret = bcdma_alloc_bchan_resources(uc);
2176 if (ret)
2177 return ret;
2178
2179 ret = bcdma_tisci_m2m_channel_config(uc);
2180 break;
2181 default:
2182 /* Can not happen */
2183 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2184 __func__, uc->id, uc->config.dir);
2185 return -EINVAL;
2186 }
2187
2188 /* check if the channel configuration was successful */
2189 if (ret)
2190 goto err_res_free;
2191
2192 if (udma_is_chan_running(uc)) {
2193 dev_warn(uc->ud->dev, "chan%d: is running!\n", uc->id);
2194 udma_stop(uc);
2195 if (udma_is_chan_running(uc)) {
2196 dev_err(uc->ud->dev, "chan%d: won't stop!\n", uc->id);
2197 goto err_res_free;
2198 }
2199 }
2200
2201 udma_reset_rings(uc);
2202
2203 return 0;
2204
2205err_res_free:
2206 bcdma_free_bchan_resources(uc);
2207 udma_free_tx_resources(uc);
2208 udma_free_rx_resources(uc);
2209
2210 udma_reset_uchan(uc);
2211
2212 return ret;
2213}
2214
2215static int pktdma_alloc_chan_resources(struct udma_chan *uc)
2216{
2217 struct udma_dev *ud = uc->ud;
2218 int ret;
2219
2220 switch (uc->config.dir) {
2221 case DMA_MEM_TO_DEV:
2222 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2223 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2224 uc->id);
2225
2226 ret = udma_alloc_tx_resources(uc);
2227 if (ret) {
2228 uc->config.remote_thread_id = -1;
2229 return ret;
2230 }
2231
2232 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2233 uc->config.dst_thread = uc->config.remote_thread_id;
2234 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2235
2236 ret = pktdma_tisci_tx_channel_config(uc);
2237 break;
2238 case DMA_DEV_TO_MEM:
2239 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2240 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2241 uc->id);
2242
2243 ret = udma_alloc_rx_resources(uc);
2244 if (ret) {
2245 uc->config.remote_thread_id = -1;
2246 return ret;
2247 }
2248
2249 uc->config.src_thread = uc->config.remote_thread_id;
2250 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2251 K3_PSIL_DST_THREAD_ID_OFFSET;
2252
2253 ret = pktdma_tisci_rx_channel_config(uc);
2254 break;
2255 default:
2256 /* Can not happen */
2257 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2258 __func__, uc->id, uc->config.dir);
2259 return -EINVAL;
2260 }
2261
2262 /* check if the channel configuration was successful */
2263 if (ret)
2264 goto err_res_free;
2265
2266 /* PSI-L pairing */
2267 ret = udma_navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2268 if (ret) {
2269 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2270 uc->config.src_thread, uc->config.dst_thread);
2271 goto err_res_free;
2272 }
2273
2274 if (udma_is_chan_running(uc)) {
2275 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2276 udma_stop(uc);
2277 if (udma_is_chan_running(uc)) {
2278 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2279 goto err_res_free;
2280 }
2281 }
2282
2283 udma_reset_rings(uc);
2284
2285 if (uc->tchan)
2286 dev_dbg(ud->dev,
2287 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2288 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2289 uc->config.remote_thread_id);
2290 else if (uc->rchan)
2291 dev_dbg(ud->dev,
2292 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2293 uc->id, uc->rchan->id, uc->rflow->id,
2294 uc->config.remote_thread_id);
2295 return 0;
2296
2297err_res_free:
2298 udma_free_tx_resources(uc);
2299 udma_free_rx_resources(uc);
2300
2301 udma_reset_uchan(uc);
2302
2303 return ret;
2304}
2305
Vignesh Rffcc66e2019-02-05 17:31:24 +05302306static int udma_transfer(struct udevice *dev, int direction,
2307 void *dst, void *src, size_t len)
2308{
2309 struct udma_dev *ud = dev_get_priv(dev);
2310 /* Channel0 is reserved for memcpy */
2311 struct udma_chan *uc = &ud->channels[0];
2312 dma_addr_t paddr = 0;
2313 int ret;
2314
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302315 switch (ud->match_data->type) {
2316 case DMA_TYPE_UDMA:
2317 ret = udma_alloc_chan_resources(uc);
2318 break;
2319 case DMA_TYPE_BCDMA:
2320 ret = bcdma_alloc_chan_resources(uc);
2321 break;
2322 default:
2323 return -EINVAL;
2324 };
Vignesh Rffcc66e2019-02-05 17:31:24 +05302325 if (ret)
2326 return ret;
2327
2328 udma_prep_dma_memcpy(uc, (dma_addr_t)dst, (dma_addr_t)src, len);
2329 udma_start(uc);
2330 udma_poll_completion(uc, &paddr);
2331 udma_stop(uc);
2332
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302333 switch (ud->match_data->type) {
2334 case DMA_TYPE_UDMA:
2335 udma_free_chan_resources(uc);
2336 break;
2337 case DMA_TYPE_BCDMA:
2338 bcdma_free_bchan_resources(uc);
2339 break;
2340 default:
2341 return -EINVAL;
2342 };
2343
Vignesh Rffcc66e2019-02-05 17:31:24 +05302344 return 0;
2345}
2346
2347static int udma_request(struct dma *dma)
2348{
2349 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302350 struct udma_chan_config *ucc;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302351 struct udma_chan *uc;
2352 unsigned long dummy;
2353 int ret;
2354
2355 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2356 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2357 return -EINVAL;
2358 }
2359
2360 uc = &ud->channels[dma->id];
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302361 ucc = &uc->config;
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302362 switch (ud->match_data->type) {
2363 case DMA_TYPE_UDMA:
2364 ret = udma_alloc_chan_resources(uc);
2365 break;
2366 case DMA_TYPE_BCDMA:
2367 ret = bcdma_alloc_chan_resources(uc);
2368 break;
2369 case DMA_TYPE_PKTDMA:
2370 ret = pktdma_alloc_chan_resources(uc);
2371 break;
2372 default:
2373 return -EINVAL;
2374 }
Vignesh Rffcc66e2019-02-05 17:31:24 +05302375 if (ret) {
2376 dev_err(dma->dev, "alloc dma res failed %d\n", ret);
2377 return -EINVAL;
2378 }
2379
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302380 if (uc->config.dir == DMA_MEM_TO_DEV) {
2381 uc->desc_tx = dma_alloc_coherent(ucc->hdesc_size, &dummy);
2382 memset(uc->desc_tx, 0, ucc->hdesc_size);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302383 } else {
2384 uc->desc_rx = dma_alloc_coherent(
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302385 ucc->hdesc_size * UDMA_RX_DESC_NUM, &dummy);
2386 memset(uc->desc_rx, 0, ucc->hdesc_size * UDMA_RX_DESC_NUM);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302387 }
2388
2389 uc->in_use = true;
2390 uc->desc_rx_cur = 0;
2391 uc->num_rx_bufs = 0;
2392
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302393 if (uc->config.dir == DMA_DEV_TO_MEM) {
Vignesh Raghavendra5e6d9cc2019-12-04 22:17:21 +05302394 uc->cfg_data.flow_id_base = uc->rflow->id;
2395 uc->cfg_data.flow_id_cnt = 1;
2396 }
2397
Vignesh Rffcc66e2019-02-05 17:31:24 +05302398 return 0;
2399}
2400
Simon Glassaae95882020-02-03 07:35:55 -07002401static int udma_rfree(struct dma *dma)
Vignesh Rffcc66e2019-02-05 17:31:24 +05302402{
2403 struct udma_dev *ud = dev_get_priv(dma->dev);
2404 struct udma_chan *uc;
2405
2406 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2407 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2408 return -EINVAL;
2409 }
2410 uc = &ud->channels[dma->id];
2411
2412 if (udma_is_chan_running(uc))
2413 udma_stop(uc);
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302414
2415 udma_navss_psil_unpair(ud, uc->config.src_thread,
2416 uc->config.dst_thread);
2417
2418 bcdma_free_bchan_resources(uc);
2419 udma_free_tx_resources(uc);
2420 udma_free_rx_resources(uc);
2421 udma_reset_uchan(uc);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302422
2423 uc->in_use = false;
2424
2425 return 0;
2426}
2427
2428static int udma_enable(struct dma *dma)
2429{
2430 struct udma_dev *ud = dev_get_priv(dma->dev);
2431 struct udma_chan *uc;
2432 int ret;
2433
2434 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2435 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2436 return -EINVAL;
2437 }
2438 uc = &ud->channels[dma->id];
2439
2440 ret = udma_start(uc);
2441
2442 return ret;
2443}
2444
2445static int udma_disable(struct dma *dma)
2446{
2447 struct udma_dev *ud = dev_get_priv(dma->dev);
2448 struct udma_chan *uc;
2449 int ret = 0;
2450
2451 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2452 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2453 return -EINVAL;
2454 }
2455 uc = &ud->channels[dma->id];
2456
2457 if (udma_is_chan_running(uc))
2458 ret = udma_stop(uc);
2459 else
2460 dev_err(dma->dev, "%s not running\n", __func__);
2461
2462 return ret;
2463}
2464
2465static int udma_send(struct dma *dma, void *src, size_t len, void *metadata)
2466{
2467 struct udma_dev *ud = dev_get_priv(dma->dev);
2468 struct cppi5_host_desc_t *desc_tx;
2469 dma_addr_t dma_src = (dma_addr_t)src;
2470 struct ti_udma_drv_packet_data packet_data = { 0 };
2471 dma_addr_t paddr;
2472 struct udma_chan *uc;
2473 u32 tc_ring_id;
2474 int ret;
2475
Keerthya3f25b92019-04-24 16:33:54 +05302476 if (metadata)
Vignesh Rffcc66e2019-02-05 17:31:24 +05302477 packet_data = *((struct ti_udma_drv_packet_data *)metadata);
2478
2479 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2480 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2481 return -EINVAL;
2482 }
2483 uc = &ud->channels[dma->id];
2484
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302485 if (uc->config.dir != DMA_MEM_TO_DEV)
Vignesh Rffcc66e2019-02-05 17:31:24 +05302486 return -EINVAL;
2487
2488 tc_ring_id = k3_nav_ringacc_get_ring_id(uc->tchan->tc_ring);
2489
2490 desc_tx = uc->desc_tx;
2491
2492 cppi5_hdesc_reset_hbdesc(desc_tx);
2493
2494 cppi5_hdesc_init(desc_tx,
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302495 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2496 uc->config.psd_size);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302497 cppi5_hdesc_set_pktlen(desc_tx, len);
2498 cppi5_hdesc_attach_buf(desc_tx, dma_src, len, dma_src, len);
2499 cppi5_desc_set_pktids(&desc_tx->hdr, uc->id, 0x3fff);
2500 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, tc_ring_id);
2501 /* pass below information from caller */
2502 cppi5_hdesc_set_pkttype(desc_tx, packet_data.pkt_type);
2503 cppi5_desc_set_tags_ids(&desc_tx->hdr, 0, packet_data.dest_tag);
2504
Vignesh Raghavendraf03cb5c2019-12-09 10:25:39 +05302505 flush_dcache_range((unsigned long)dma_src,
2506 ALIGN((unsigned long)dma_src + len,
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05302507 ARCH_DMA_MINALIGN));
Vignesh Raghavendraf03cb5c2019-12-09 10:25:39 +05302508 flush_dcache_range((unsigned long)desc_tx,
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302509 ALIGN((unsigned long)desc_tx + uc->config.hdesc_size,
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05302510 ARCH_DMA_MINALIGN));
Vignesh Rffcc66e2019-02-05 17:31:24 +05302511
Vignesh Raghavendrab0ab0082019-12-09 10:25:38 +05302512 ret = udma_push_to_ring(uc->tchan->t_ring, uc->desc_tx);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302513 if (ret) {
2514 dev_err(dma->dev, "TX dma push fail ch_id %lu %d\n",
2515 dma->id, ret);
2516 return ret;
2517 }
2518
2519 udma_poll_completion(uc, &paddr);
2520
2521 return 0;
2522}
2523
2524static int udma_receive(struct dma *dma, void **dst, void *metadata)
2525{
2526 struct udma_dev *ud = dev_get_priv(dma->dev);
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302527 struct udma_chan_config *ucc;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302528 struct cppi5_host_desc_t *desc_rx;
2529 dma_addr_t buf_dma;
2530 struct udma_chan *uc;
2531 u32 buf_dma_len, pkt_len;
2532 u32 port_id = 0;
2533 int ret;
2534
2535 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2536 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2537 return -EINVAL;
2538 }
2539 uc = &ud->channels[dma->id];
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302540 ucc = &uc->config;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302541
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302542 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh Rffcc66e2019-02-05 17:31:24 +05302543 return -EINVAL;
2544 if (!uc->num_rx_bufs)
2545 return -EINVAL;
2546
Vignesh Raghavendra7be51212020-07-06 13:26:26 +05302547 ret = k3_nav_ringacc_ring_pop(uc->rflow->r_ring, &desc_rx);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302548 if (ret && ret != -ENODATA) {
2549 dev_err(dma->dev, "rx dma fail ch_id:%lu %d\n", dma->id, ret);
2550 return ret;
2551 } else if (ret == -ENODATA) {
2552 return 0;
2553 }
2554
2555 /* invalidate cache data */
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05302556 invalidate_dcache_range((ulong)desc_rx,
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302557 (ulong)(desc_rx + ucc->hdesc_size));
Vignesh Rffcc66e2019-02-05 17:31:24 +05302558
2559 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
2560 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
2561
2562 /* invalidate cache data */
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05302563 invalidate_dcache_range((ulong)buf_dma,
2564 (ulong)(buf_dma + buf_dma_len));
Vignesh Rffcc66e2019-02-05 17:31:24 +05302565
2566 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
2567
2568 *dst = (void *)buf_dma;
2569 uc->num_rx_bufs--;
2570
2571 return pkt_len;
2572}
2573
2574static int udma_of_xlate(struct dma *dma, struct ofnode_phandle_args *args)
2575{
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302576 struct udma_chan_config *ucc;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302577 struct udma_dev *ud = dev_get_priv(dma->dev);
2578 struct udma_chan *uc = &ud->channels[0];
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302579 struct psil_endpoint_config *ep_config;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302580 u32 val;
2581
2582 for (val = 0; val < ud->ch_count; val++) {
2583 uc = &ud->channels[val];
2584 if (!uc->in_use)
2585 break;
2586 }
2587
2588 if (val == ud->ch_count)
2589 return -EBUSY;
2590
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302591 ucc = &uc->config;
2592 ucc->remote_thread_id = args->args[0];
2593 if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
2594 ucc->dir = DMA_MEM_TO_DEV;
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302595 else
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302596 ucc->dir = DMA_DEV_TO_MEM;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302597
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302598 ep_config = psil_get_ep_config(ucc->remote_thread_id);
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302599 if (IS_ERR(ep_config)) {
2600 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302601 uc->config.remote_thread_id);
2602 ucc->dir = DMA_MEM_TO_MEM;
2603 ucc->remote_thread_id = -1;
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302604 return false;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302605 }
2606
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302607 ucc->pkt_mode = ep_config->pkt_mode;
2608 ucc->channel_tpl = ep_config->channel_tpl;
2609 ucc->notdpkt = ep_config->notdpkt;
2610 ucc->ep_type = ep_config->ep_type;
Vignesh Rffcc66e2019-02-05 17:31:24 +05302611
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302612 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
2613 ep_config->mapped_channel_id >= 0) {
2614 ucc->mapped_channel_id = ep_config->mapped_channel_id;
2615 ucc->default_flow_id = ep_config->default_flow_id;
2616 } else {
2617 ucc->mapped_channel_id = -1;
2618 ucc->default_flow_id = -1;
2619 }
2620
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302621 ucc->needs_epib = ep_config->needs_epib;
2622 ucc->psd_size = ep_config->psd_size;
2623 ucc->metadata_size = (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + ucc->psd_size;
2624
2625 ucc->hdesc_size = cppi5_hdesc_calc_size(ucc->needs_epib,
2626 ucc->psd_size, 0);
2627 ucc->hdesc_size = ALIGN(ucc->hdesc_size, ARCH_DMA_MINALIGN);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302628
2629 dma->id = uc->id;
2630 pr_debug("Allocated dma chn:%lu epib:%d psdata:%u meta:%u thread_id:%x\n",
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302631 dma->id, ucc->needs_epib,
2632 ucc->psd_size, ucc->metadata_size,
2633 ucc->remote_thread_id);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302634
2635 return 0;
2636}
2637
2638int udma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
2639{
2640 struct udma_dev *ud = dev_get_priv(dma->dev);
2641 struct cppi5_host_desc_t *desc_rx;
2642 dma_addr_t dma_dst;
2643 struct udma_chan *uc;
2644 u32 desc_num;
2645
2646 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2647 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2648 return -EINVAL;
2649 }
2650 uc = &ud->channels[dma->id];
2651
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302652 if (uc->config.dir != DMA_DEV_TO_MEM)
Vignesh Rffcc66e2019-02-05 17:31:24 +05302653 return -EINVAL;
2654
2655 if (uc->num_rx_bufs >= UDMA_RX_DESC_NUM)
2656 return -EINVAL;
2657
2658 desc_num = uc->desc_rx_cur % UDMA_RX_DESC_NUM;
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302659 desc_rx = uc->desc_rx + (desc_num * uc->config.hdesc_size);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302660 dma_dst = (dma_addr_t)dst;
2661
2662 cppi5_hdesc_reset_hbdesc(desc_rx);
2663
2664 cppi5_hdesc_init(desc_rx,
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302665 uc->config.needs_epib ? CPPI5_INFO0_HDESC_EPIB_PRESENT : 0,
2666 uc->config.psd_size);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302667 cppi5_hdesc_set_pktlen(desc_rx, size);
2668 cppi5_hdesc_attach_buf(desc_rx, dma_dst, size, dma_dst, size);
2669
Vignesh Raghavendraf03cb5c2019-12-09 10:25:39 +05302670 flush_dcache_range((unsigned long)desc_rx,
Vignesh Raghavendraaf374c22020-07-06 13:26:25 +05302671 ALIGN((unsigned long)desc_rx + uc->config.hdesc_size,
Vignesh Raghavendrac0b94902019-12-09 10:25:35 +05302672 ARCH_DMA_MINALIGN));
Vignesh Rffcc66e2019-02-05 17:31:24 +05302673
Vignesh Raghavendra7be51212020-07-06 13:26:26 +05302674 udma_push_to_ring(uc->rflow->fd_ring, desc_rx);
Vignesh Rffcc66e2019-02-05 17:31:24 +05302675
2676 uc->num_rx_bufs++;
2677 uc->desc_rx_cur++;
2678
2679 return 0;
2680}
2681
Vignesh Raghavendra5e6d9cc2019-12-04 22:17:21 +05302682static int udma_get_cfg(struct dma *dma, u32 id, void **data)
2683{
2684 struct udma_dev *ud = dev_get_priv(dma->dev);
2685 struct udma_chan *uc;
2686
2687 if (dma->id >= (ud->rchan_cnt + ud->tchan_cnt)) {
2688 dev_err(dma->dev, "invalid dma ch_id %lu\n", dma->id);
2689 return -EINVAL;
2690 }
2691
2692 switch (id) {
2693 case TI_UDMA_CHAN_PRIV_INFO:
2694 uc = &ud->channels[dma->id];
2695 *data = &uc->cfg_data;
2696 return 0;
2697 }
2698
2699 return -EINVAL;
2700}
2701
Vignesh Rffcc66e2019-02-05 17:31:24 +05302702static const struct dma_ops udma_ops = {
2703 .transfer = udma_transfer,
2704 .of_xlate = udma_of_xlate,
2705 .request = udma_request,
Simon Glassaae95882020-02-03 07:35:55 -07002706 .rfree = udma_rfree,
Vignesh Rffcc66e2019-02-05 17:31:24 +05302707 .enable = udma_enable,
2708 .disable = udma_disable,
2709 .send = udma_send,
2710 .receive = udma_receive,
2711 .prepare_rcv_buf = udma_prepare_rcv_buf,
Vignesh Raghavendra5e6d9cc2019-12-04 22:17:21 +05302712 .get_cfg = udma_get_cfg,
Vignesh Rffcc66e2019-02-05 17:31:24 +05302713};
2714
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302715static struct udma_match_data am654_main_data = {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302716 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302717 .psil_base = 0x1000,
2718 .enable_memcpy_support = true,
2719 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302720 .oes = {
2721 .udma_rchan = 0x200,
2722 },
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302723 .tpl_levels = 2,
2724 .level_start_idx = {
2725 [0] = 8, /* Normal channels */
2726 [1] = 0, /* High Throughput channels */
2727 },
2728};
2729
2730static struct udma_match_data am654_mcu_data = {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302731 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302732 .psil_base = 0x6000,
2733 .enable_memcpy_support = true,
2734 .statictr_z_mask = GENMASK(11, 0),
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302735 .oes = {
2736 .udma_rchan = 0x200,
2737 },
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302738 .tpl_levels = 2,
2739 .level_start_idx = {
2740 [0] = 2, /* Normal channels */
2741 [1] = 0, /* High Throughput channels */
2742 },
2743};
2744
2745static struct udma_match_data j721e_main_data = {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302746 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302747 .psil_base = 0x1000,
2748 .enable_memcpy_support = true,
2749 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2750 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302751 .oes = {
2752 .udma_rchan = 0x400,
2753 },
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302754 .tpl_levels = 3,
2755 .level_start_idx = {
2756 [0] = 16, /* Normal channels */
2757 [1] = 4, /* High Throughput channels */
2758 [2] = 0, /* Ultra High Throughput channels */
2759 },
2760};
2761
2762static struct udma_match_data j721e_mcu_data = {
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302763 .type = DMA_TYPE_UDMA,
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302764 .psil_base = 0x6000,
2765 .enable_memcpy_support = true,
2766 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2767 .statictr_z_mask = GENMASK(23, 0),
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302768 .oes = {
2769 .udma_rchan = 0x400,
2770 },
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302771 .tpl_levels = 2,
2772 .level_start_idx = {
2773 [0] = 2, /* Normal channels */
2774 [1] = 0, /* High Throughput channels */
2775 },
2776};
2777
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302778static struct udma_match_data am64_bcdma_data = {
2779 .type = DMA_TYPE_BCDMA,
2780 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
2781 .enable_memcpy_support = true, /* Supported via bchan */
2782 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2783 .statictr_z_mask = GENMASK(23, 0),
2784 .oes = {
2785 .bcdma_bchan_data = 0x2200,
2786 .bcdma_bchan_ring = 0x2400,
2787 .bcdma_tchan_data = 0x2800,
2788 .bcdma_tchan_ring = 0x2a00,
2789 .bcdma_rchan_data = 0x2e00,
2790 .bcdma_rchan_ring = 0x3000,
2791 },
2792 /* No throughput levels */
2793};
2794
2795static struct udma_match_data am64_pktdma_data = {
2796 .type = DMA_TYPE_PKTDMA,
2797 .psil_base = 0x1000,
2798 .enable_memcpy_support = false,
2799 .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
2800 .statictr_z_mask = GENMASK(23, 0),
2801 .oes = {
2802 .pktdma_tchan_flow = 0x1200,
2803 .pktdma_rchan_flow = 0x1600,
2804 },
2805 /* No throughput levels */
2806};
2807
Vignesh Rffcc66e2019-02-05 17:31:24 +05302808static const struct udevice_id udma_ids[] = {
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302809 {
2810 .compatible = "ti,am654-navss-main-udmap",
2811 .data = (ulong)&am654_main_data,
2812 },
2813 {
2814 .compatible = "ti,am654-navss-mcu-udmap",
2815 .data = (ulong)&am654_mcu_data,
2816 }, {
2817 .compatible = "ti,j721e-navss-main-udmap",
2818 .data = (ulong)&j721e_main_data,
2819 }, {
2820 .compatible = "ti,j721e-navss-mcu-udmap",
2821 .data = (ulong)&j721e_mcu_data,
2822 },
Vignesh Raghavendra9a928512021-05-10 20:06:08 +05302823 {
2824 .compatible = "ti,am64-dmss-bcdma",
2825 .data = (ulong)&am64_bcdma_data,
2826 },
2827 {
2828 .compatible = "ti,am64-dmss-pktdma",
2829 .data = (ulong)&am64_pktdma_data,
2830 },
Vignesh Raghavendra5c92fff2020-07-07 13:43:34 +05302831 { /* Sentinel */ },
Vignesh Rffcc66e2019-02-05 17:31:24 +05302832};
2833
2834U_BOOT_DRIVER(ti_edma3) = {
2835 .name = "ti-udma",
2836 .id = UCLASS_DMA,
2837 .of_match = udma_ids,
2838 .ops = &udma_ops,
2839 .probe = udma_probe,
Simon Glass41575d82020-12-03 16:55:17 -07002840 .priv_auto = sizeof(struct udma_dev),
Vignesh Rffcc66e2019-02-05 17:31:24 +05302841};