blob: c5099ad084d59c093cf51ea2aa48674f9251cf29 [file] [log] [blame]
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
6 */
7
8#include <common.h>
Vignesh Raghavendra9d32a942019-12-09 10:25:33 +05309#include <cpu_func.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060010#include <log.h>
Simon Glass90526e92020-05-10 11:39:56 -060011#include <asm/cache.h>
Grygorii Strashko432f66f2019-02-05 17:31:22 +053012#include <asm/io.h>
13#include <malloc.h>
Grygorii Strashko432f66f2019-02-05 17:31:22 +053014#include <asm/bitops.h>
15#include <dm.h>
Simon Glass336d4612020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070017#include <dm/devres.h>
Grygorii Strashko432f66f2019-02-05 17:31:22 +053018#include <dm/read.h>
19#include <dm/uclass.h>
Simon Glasscd93d622020-05-10 11:40:13 -060020#include <linux/bitops.h>
Grygorii Strashko432f66f2019-02-05 17:31:22 +053021#include <linux/compat.h>
Masahiro Yamada9d86b892020-02-14 16:40:19 +090022#include <linux/dma-mapping.h>
Simon Glass61b29b82020-02-03 07:36:15 -070023#include <linux/err.h>
Grygorii Strashko432f66f2019-02-05 17:31:22 +053024#include <linux/soc/ti/k3-navss-ringacc.h>
25#include <linux/soc/ti/ti_sci_protocol.h>
26
27#define set_bit(bit, bitmap) __set_bit(bit, bitmap)
28#define clear_bit(bit, bitmap) __clear_bit(bit, bitmap)
29#define dma_free_coherent(dev, size, cpu_addr, dma_handle) \
30 dma_free_coherent(cpu_addr)
31#define dma_zalloc_coherent(dev, size, dma_handle, flag) \
32({ \
33 void *ring_mem_virt; \
34 ring_mem_virt = dma_alloc_coherent((size), \
35 (unsigned long *)(dma_handle)); \
36 if (ring_mem_virt) \
37 memset(ring_mem_virt, 0, (size)); \
38 ring_mem_virt; \
39})
40
41static LIST_HEAD(k3_nav_ringacc_list);
42
43static void ringacc_writel(u32 v, void __iomem *reg)
44{
45 pr_debug("WRITEL(32): v(%08X)-->reg(%p)\n", v, reg);
46 writel(v, reg);
47}
48
49static u32 ringacc_readl(void __iomem *reg)
50{
51 u32 v;
52
53 v = readl(reg);
54 pr_debug("READL(32): v(%08X)<--reg(%p)\n", v, reg);
55 return v;
56}
57
58#define KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
59
60/**
61 * struct k3_nav_ring_rt_regs - The RA Control/Status Registers region
62 */
63struct k3_nav_ring_rt_regs {
64 u32 resv_16[4];
65 u32 db; /* RT Ring N Doorbell Register */
66 u32 resv_4[1];
67 u32 occ; /* RT Ring N Occupancy Register */
68 u32 indx; /* RT Ring N Current Index Register */
69 u32 hwocc; /* RT Ring N Hardware Occupancy Register */
70 u32 hwindx; /* RT Ring N Current Index Register */
71};
72
73#define KNAV_RINGACC_RT_REGS_STEP 0x1000
74
75/**
76 * struct k3_nav_ring_fifo_regs - The Ring Accelerator Queues Registers region
77 */
78struct k3_nav_ring_fifo_regs {
79 u32 head_data[128]; /* Ring Head Entry Data Registers */
80 u32 tail_data[128]; /* Ring Tail Entry Data Registers */
81 u32 peek_head_data[128]; /* Ring Peek Head Entry Data Regs */
82 u32 peek_tail_data[128]; /* Ring Peek Tail Entry Data Regs */
83};
84
85/**
86 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
87 */
88struct k3_ringacc_proxy_gcfg_regs {
89 u32 revision; /* Revision Register */
90 u32 config; /* Config Register */
91};
92
93#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
94
95/**
96 * struct k3_ringacc_proxy_target_regs - RA Proxy Datapath MMIO Region
97 */
98struct k3_ringacc_proxy_target_regs {
99 u32 control; /* Proxy Control Register */
100 u32 status; /* Proxy Status Register */
101 u8 resv_512[504];
102 u32 data[128]; /* Proxy Data Register */
103};
104
105#define K3_RINGACC_PROXY_TARGET_STEP 0x1000
106#define K3_RINGACC_PROXY_NOT_USED (-1)
107
108enum k3_ringacc_proxy_access_mode {
109 PROXY_ACCESS_MODE_HEAD = 0,
110 PROXY_ACCESS_MODE_TAIL = 1,
111 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
112 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
113};
114
115#define KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
116#define KNAV_RINGACC_FIFO_REGS_STEP 0x1000
117#define KNAV_RINGACC_MAX_DB_RING_CNT (127U)
118
119/**
120 * struct k3_nav_ring_ops - Ring operations
121 */
122struct k3_nav_ring_ops {
123 int (*push_tail)(struct k3_nav_ring *ring, void *elm);
124 int (*push_head)(struct k3_nav_ring *ring, void *elm);
125 int (*pop_tail)(struct k3_nav_ring *ring, void *elm);
126 int (*pop_head)(struct k3_nav_ring *ring, void *elm);
127};
128
129/**
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530130 * struct k3_nav_ring_state - Internal state tracking structure
131 *
132 * @free: Number of free entries
133 * @occ: Occupancy
134 * @windex: Write index
135 * @rindex: Read index
136 */
137struct k3_nav_ring_state {
138 u32 free;
139 u32 occ;
140 u32 windex;
141 u32 rindex;
142 u32 tdown_complete:1;
143};
144
145/**
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530146 * struct k3_nav_ring - RA Ring descriptor
147 *
148 * @rt - Ring control/status registers
149 * @fifos - Ring queues registers
150 * @proxy - Ring Proxy Datapath registers
151 * @ring_mem_dma - Ring buffer dma address
152 * @ring_mem_virt - Ring buffer virt address
153 * @ops - Ring operations
154 * @size - Ring size in elements
155 * @elm_size - Size of the ring element
156 * @mode - Ring mode
157 * @flags - flags
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530158 * @ring_id - Ring Id
159 * @parent - Pointer on struct @k3_nav_ringacc
160 * @use_count - Use count for shared rings
161 * @proxy_id - RA Ring Proxy Id (only if @K3_NAV_RINGACC_RING_USE_PROXY)
162 */
163struct k3_nav_ring {
164 struct k3_nav_ring_rt_regs __iomem *rt;
165 struct k3_nav_ring_fifo_regs __iomem *fifos;
166 struct k3_ringacc_proxy_target_regs __iomem *proxy;
167 dma_addr_t ring_mem_dma;
168 void *ring_mem_virt;
169 struct k3_nav_ring_ops *ops;
170 u32 size;
171 enum k3_nav_ring_size elm_size;
172 enum k3_nav_ring_mode mode;
173 u32 flags;
174#define KNAV_RING_FLAG_BUSY BIT(1)
175#define K3_NAV_RING_FLAG_SHARED BIT(2)
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530176 struct k3_nav_ring_state state;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530177 u32 ring_id;
178 struct k3_nav_ringacc *parent;
179 u32 use_count;
180 int proxy_id;
181};
182
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +0530183struct k3_nav_ringacc_ops {
184 int (*init)(struct udevice *dev, struct k3_nav_ringacc *ringacc);
185};
186
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530187/**
188 * struct k3_nav_ringacc - Rings accelerator descriptor
189 *
190 * @dev - pointer on RA device
191 * @proxy_gcfg - RA proxy global config registers
192 * @proxy_target_base - RA proxy datapath region
193 * @num_rings - number of ring in RA
194 * @rm_gp_range - general purpose rings range from tisci
195 * @dma_ring_reset_quirk - DMA reset w/a enable
196 * @num_proxies - number of RA proxies
197 * @rings - array of rings descriptors (struct @k3_nav_ring)
198 * @list - list of RAs in the system
199 * @tisci - pointer ti-sci handle
200 * @tisci_ring_ops - ti-sci rings ops
201 * @tisci_dev_id - ti-sci device id
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +0530202 * @ops: SoC specific ringacc operation
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530203 */
204struct k3_nav_ringacc {
205 struct udevice *dev;
206 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
207 void __iomem *proxy_target_base;
208 u32 num_rings; /* number of rings in Ringacc module */
209 unsigned long *rings_inuse;
210 struct ti_sci_resource *rm_gp_range;
211 bool dma_ring_reset_quirk;
212 u32 num_proxies;
213 unsigned long *proxy_inuse;
214
215 struct k3_nav_ring *rings;
216 struct list_head list;
217
218 const struct ti_sci_handle *tisci;
219 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
220 u32 tisci_dev_id;
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +0530221
222 const struct k3_nav_ringacc_ops *ops;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530223};
224
225static long k3_nav_ringacc_ring_get_fifo_pos(struct k3_nav_ring *ring)
226{
227 return KNAV_RINGACC_FIFO_WINDOW_SIZE_BYTES -
228 (4 << ring->elm_size);
229}
230
231static void *k3_nav_ringacc_get_elm_addr(struct k3_nav_ring *ring, u32 idx)
232{
233 return (idx * (4 << ring->elm_size) + ring->ring_mem_virt);
234}
235
236static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem);
237static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem);
238
239static struct k3_nav_ring_ops k3_nav_mode_ring_ops = {
240 .push_tail = k3_nav_ringacc_ring_push_mem,
241 .pop_head = k3_nav_ringacc_ring_pop_mem,
242};
243
244static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem);
245static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem);
246static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
247 void *elem);
248static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring,
249 void *elem);
250
251static struct k3_nav_ring_ops k3_nav_mode_msg_ops = {
252 .push_tail = k3_nav_ringacc_ring_push_io,
253 .push_head = k3_nav_ringacc_ring_push_head_io,
254 .pop_tail = k3_nav_ringacc_ring_pop_tail_io,
255 .pop_head = k3_nav_ringacc_ring_pop_io,
256};
257
258static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring,
259 void *elem);
260static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring,
261 void *elem);
262static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem);
263static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem);
264
265static struct k3_nav_ring_ops k3_nav_mode_proxy_ops = {
266 .push_tail = k3_ringacc_ring_push_tail_proxy,
267 .push_head = k3_ringacc_ring_push_head_proxy,
268 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
269 .pop_head = k3_ringacc_ring_pop_head_proxy,
270};
271
272struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc)
273{
274 return ringacc->dev;
275}
276
277struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
278 int id, u32 flags)
279{
280 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
281
282 if (id == K3_NAV_RINGACC_RING_ID_ANY) {
283 /* Request for any general purpose ring */
284 struct ti_sci_resource_desc *gp_rings =
285 &ringacc->rm_gp_range->desc[0];
286 unsigned long size;
287
288 size = gp_rings->start + gp_rings->num;
289 id = find_next_zero_bit(ringacc->rings_inuse,
290 size, gp_rings->start);
291 if (id == size)
292 goto error;
293 } else if (id < 0) {
294 goto error;
295 }
296
297 if (test_bit(id, ringacc->rings_inuse) &&
298 !(ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED))
299 goto error;
300 else if (ringacc->rings[id].flags & K3_NAV_RING_FLAG_SHARED)
301 goto out;
302
303 if (flags & K3_NAV_RINGACC_RING_USE_PROXY) {
304 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
305 ringacc->num_proxies, 0);
306 if (proxy_id == ringacc->num_proxies)
307 goto error;
308 }
309
310 if (!try_module_get(ringacc->dev->driver->owner))
311 goto error;
312
313 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
314 set_bit(proxy_id, ringacc->proxy_inuse);
315 ringacc->rings[id].proxy_id = proxy_id;
316 pr_debug("Giving ring#%d proxy#%d\n",
317 id, proxy_id);
318 } else {
319 pr_debug("Giving ring#%d\n", id);
320 }
321
322 set_bit(id, ringacc->rings_inuse);
323out:
324 ringacc->rings[id].use_count++;
325 return &ringacc->rings[id];
326
327error:
328 return NULL;
329}
330
Vignesh Raghavendra5d257842020-07-06 13:26:23 +0530331int k3_nav_ringacc_request_rings_pair(struct k3_nav_ringacc *ringacc,
332 int fwd_id, int compl_id,
333 struct k3_nav_ring **fwd_ring,
334 struct k3_nav_ring **compl_ring)
335{
336 int ret = 0;
337
338 if (!fwd_ring || !compl_ring)
339 return -EINVAL;
340
341 *fwd_ring = k3_nav_ringacc_request_ring(ringacc, fwd_id, 0);
342 if (!(*fwd_ring))
343 return -ENODEV;
344
345 *compl_ring = k3_nav_ringacc_request_ring(ringacc, compl_id, 0);
346 if (!(*compl_ring)) {
347 k3_nav_ringacc_ring_free(*fwd_ring);
348 ret = -ENODEV;
349 }
350
351 return ret;
352}
353
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530354static void k3_ringacc_ring_reset_sci(struct k3_nav_ring *ring)
355{
356 struct k3_nav_ringacc *ringacc = ring->parent;
357 int ret;
358
359 ret = ringacc->tisci_ring_ops->config(
360 ringacc->tisci,
361 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
362 ringacc->tisci_dev_id,
363 ring->ring_id,
364 0,
365 0,
366 ring->size,
367 0,
368 0,
369 0);
370 if (ret)
371 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
372 ret, ring->ring_id);
373}
374
375void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring)
376{
377 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
378 return;
379
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530380 memset(&ring->state, 0, sizeof(ring->state));
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530381
382 k3_ringacc_ring_reset_sci(ring);
383}
384
385static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_nav_ring *ring,
386 enum k3_nav_ring_mode mode)
387{
388 struct k3_nav_ringacc *ringacc = ring->parent;
389 int ret;
390
391 ret = ringacc->tisci_ring_ops->config(
392 ringacc->tisci,
393 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
394 ringacc->tisci_dev_id,
395 ring->ring_id,
396 0,
397 0,
398 0,
399 mode,
400 0,
401 0);
402 if (ret)
403 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
404 ret, ring->ring_id);
405}
406
407void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ)
408{
409 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
410 return;
411
Vignesh Raghavendra62a96202019-08-30 11:02:24 +0530412 if (!ring->parent->dma_ring_reset_quirk) {
413 k3_nav_ringacc_ring_reset(ring);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530414 return;
Vignesh Raghavendra62a96202019-08-30 11:02:24 +0530415 }
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530416
417 if (!occ)
418 occ = ringacc_readl(&ring->rt->occ);
419
420 if (occ) {
421 u32 db_ring_cnt, db_ring_cnt_cur;
422
423 pr_debug("%s %u occ: %u\n", __func__,
424 ring->ring_id, occ);
425 /* 2. Reset the ring */
426 k3_ringacc_ring_reset_sci(ring);
427
428 /*
429 * 3. Setup the ring in ring/doorbell mode
430 * (if not already in this mode)
431 */
432 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
433 k3_ringacc_ring_reconfig_qmode_sci(
434 ring, K3_NAV_RINGACC_RING_MODE_RING);
435 /*
436 * 4. Ring the doorbell 2**22 – ringOcc times.
437 * This will wrap the internal UDMAP ring state occupancy
438 * counter (which is 21-bits wide) to 0.
439 */
440 db_ring_cnt = (1U << 22) - occ;
441
442 while (db_ring_cnt != 0) {
443 /*
444 * Ring the doorbell with the maximum count each
445 * iteration if possible to minimize the total
446 * of writes
447 */
448 if (db_ring_cnt > KNAV_RINGACC_MAX_DB_RING_CNT)
449 db_ring_cnt_cur = KNAV_RINGACC_MAX_DB_RING_CNT;
450 else
451 db_ring_cnt_cur = db_ring_cnt;
452
453 writel(db_ring_cnt_cur, &ring->rt->db);
454 db_ring_cnt -= db_ring_cnt_cur;
455 }
456
457 /* 5. Restore the original ring mode (if not ring mode) */
458 if (ring->mode != K3_NAV_RINGACC_RING_MODE_RING)
459 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
460 }
461
462 /* 2. Reset the ring */
463 k3_nav_ringacc_ring_reset(ring);
464}
465
466static void k3_ringacc_ring_free_sci(struct k3_nav_ring *ring)
467{
468 struct k3_nav_ringacc *ringacc = ring->parent;
469 int ret;
470
471 ret = ringacc->tisci_ring_ops->config(
472 ringacc->tisci,
473 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
474 ringacc->tisci_dev_id,
475 ring->ring_id,
476 0,
477 0,
478 0,
479 0,
480 0,
481 0);
482 if (ret)
483 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
484 ret, ring->ring_id);
485}
486
487int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring)
488{
489 struct k3_nav_ringacc *ringacc;
490
491 if (!ring)
492 return -EINVAL;
493
494 ringacc = ring->parent;
495
496 pr_debug("%s flags: 0x%08x\n", __func__, ring->flags);
497
498 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
499 return -EINVAL;
500
501 if (--ring->use_count)
502 goto out;
503
504 if (!(ring->flags & KNAV_RING_FLAG_BUSY))
505 goto no_init;
506
507 k3_ringacc_ring_free_sci(ring);
508
509 dma_free_coherent(ringacc->dev,
510 ring->size * (4 << ring->elm_size),
511 ring->ring_mem_virt, ring->ring_mem_dma);
512 ring->flags &= ~KNAV_RING_FLAG_BUSY;
513 ring->ops = NULL;
514 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
515 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
516 ring->proxy = NULL;
517 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
518 }
519
520no_init:
521 clear_bit(ring->ring_id, ringacc->rings_inuse);
522
523 module_put(ringacc->dev->driver->owner);
524
525out:
526 return 0;
527}
528
529u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring)
530{
531 if (!ring)
532 return -EINVAL;
533
534 return ring->ring_id;
535}
536
537static int k3_nav_ringacc_ring_cfg_sci(struct k3_nav_ring *ring)
538{
539 struct k3_nav_ringacc *ringacc = ring->parent;
540 u32 ring_idx;
541 int ret;
542
543 if (!ringacc->tisci)
544 return -EINVAL;
545
546 ring_idx = ring->ring_id;
547 ret = ringacc->tisci_ring_ops->config(
548 ringacc->tisci,
549 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
550 ringacc->tisci_dev_id,
551 ring_idx,
552 lower_32_bits(ring->ring_mem_dma),
553 upper_32_bits(ring->ring_mem_dma),
554 ring->size,
555 ring->mode,
556 ring->elm_size,
557 0);
558 if (ret)
559 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
560 ret, ring_idx);
561
562 return ret;
563}
564
565int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
566 struct k3_nav_ring_cfg *cfg)
567{
568 struct k3_nav_ringacc *ringacc = ring->parent;
569 int ret = 0;
570
571 if (!ring || !cfg)
572 return -EINVAL;
573 if (cfg->elm_size > K3_NAV_RINGACC_RING_ELSIZE_256 ||
574 cfg->mode > K3_NAV_RINGACC_RING_MODE_QM ||
575 cfg->size & ~KNAV_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
576 !test_bit(ring->ring_id, ringacc->rings_inuse))
577 return -EINVAL;
578
579 if (ring->use_count != 1)
580 return 0;
581
582 ring->size = cfg->size;
583 ring->elm_size = cfg->elm_size;
584 ring->mode = cfg->mode;
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530585 memset(&ring->state, 0, sizeof(ring->state));
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530586
587 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
588 ring->proxy = ringacc->proxy_target_base +
589 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
590
591 switch (ring->mode) {
592 case K3_NAV_RINGACC_RING_MODE_RING:
593 ring->ops = &k3_nav_mode_ring_ops;
594 break;
595 case K3_NAV_RINGACC_RING_MODE_QM:
596 /*
597 * In Queue mode elm_size can be 8 only and each operation
598 * uses 2 element slots
599 */
600 if (cfg->elm_size != K3_NAV_RINGACC_RING_ELSIZE_8 ||
601 cfg->size % 2)
602 goto err_free_proxy;
603 case K3_NAV_RINGACC_RING_MODE_MESSAGE:
604 if (ring->proxy)
605 ring->ops = &k3_nav_mode_proxy_ops;
606 else
607 ring->ops = &k3_nav_mode_msg_ops;
608 break;
609 default:
610 ring->ops = NULL;
611 ret = -EINVAL;
612 goto err_free_proxy;
613 };
614
615 ring->ring_mem_virt =
616 dma_zalloc_coherent(ringacc->dev,
617 ring->size * (4 << ring->elm_size),
618 &ring->ring_mem_dma, GFP_KERNEL);
619 if (!ring->ring_mem_virt) {
620 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
621 ret = -ENOMEM;
622 goto err_free_ops;
623 }
624
625 ret = k3_nav_ringacc_ring_cfg_sci(ring);
626
627 if (ret)
628 goto err_free_mem;
629
630 ring->flags |= KNAV_RING_FLAG_BUSY;
631 ring->flags |= (cfg->flags & K3_NAV_RINGACC_RING_SHARED) ?
632 K3_NAV_RING_FLAG_SHARED : 0;
633
634 return 0;
635
636err_free_mem:
637 dma_free_coherent(ringacc->dev,
638 ring->size * (4 << ring->elm_size),
639 ring->ring_mem_virt,
640 ring->ring_mem_dma);
641err_free_ops:
642 ring->ops = NULL;
643err_free_proxy:
644 ring->proxy = NULL;
645 return ret;
646}
647
648u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring)
649{
650 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
651 return -EINVAL;
652
653 return ring->size;
654}
655
656u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring)
657{
658 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
659 return -EINVAL;
660
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530661 if (!ring->state.free)
662 ring->state.free = ring->size - ringacc_readl(&ring->rt->occ);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530663
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530664 return ring->state.free;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530665}
666
667u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring)
668{
669 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
670 return -EINVAL;
671
672 return ringacc_readl(&ring->rt->occ);
673}
674
675u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring)
676{
677 return !k3_nav_ringacc_ring_get_free(ring);
678}
679
680enum k3_ringacc_access_mode {
681 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
682 K3_RINGACC_ACCESS_MODE_POP_HEAD,
683 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
684 K3_RINGACC_ACCESS_MODE_POP_TAIL,
685 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
686 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
687};
688
689static int k3_ringacc_ring_cfg_proxy(struct k3_nav_ring *ring,
690 enum k3_ringacc_proxy_access_mode mode)
691{
692 u32 val;
693
694 val = ring->ring_id;
695 val |= mode << 16;
696 val |= ring->elm_size << 24;
697 ringacc_writel(val, &ring->proxy->control);
698 return 0;
699}
700
701static int k3_nav_ringacc_ring_access_proxy(
702 struct k3_nav_ring *ring, void *elem,
703 enum k3_ringacc_access_mode access_mode)
704{
705 void __iomem *ptr;
706
707 ptr = (void __iomem *)&ring->proxy->data;
708
709 switch (access_mode) {
710 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
711 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
712 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
713 break;
714 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
715 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
716 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
717 break;
718 default:
719 return -EINVAL;
720 }
721
722 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
723
724 switch (access_mode) {
725 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
726 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
727 pr_debug("proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n",
728 ptr, access_mode);
729 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530730 ring->state.occ--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530731 break;
732 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
733 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
734 pr_debug("proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n",
735 ptr, access_mode);
736 memcpy_toio(ptr, elem, (4 << ring->elm_size));
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530737 ring->state.free--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530738 break;
739 default:
740 return -EINVAL;
741 }
742
743 pr_debug("proxy: free%d occ%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530744 ring->state.free, ring->state.occ);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530745 return 0;
746}
747
748static int k3_ringacc_ring_push_head_proxy(struct k3_nav_ring *ring, void *elem)
749{
750 return k3_nav_ringacc_ring_access_proxy(
751 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
752}
753
754static int k3_ringacc_ring_push_tail_proxy(struct k3_nav_ring *ring, void *elem)
755{
756 return k3_nav_ringacc_ring_access_proxy(
757 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
758}
759
760static int k3_ringacc_ring_pop_head_proxy(struct k3_nav_ring *ring, void *elem)
761{
762 return k3_nav_ringacc_ring_access_proxy(
763 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
764}
765
766static int k3_ringacc_ring_pop_tail_proxy(struct k3_nav_ring *ring, void *elem)
767{
768 return k3_nav_ringacc_ring_access_proxy(
769 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
770}
771
772static int k3_nav_ringacc_ring_access_io(
773 struct k3_nav_ring *ring, void *elem,
774 enum k3_ringacc_access_mode access_mode)
775{
776 void __iomem *ptr;
777
778 switch (access_mode) {
779 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
780 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
781 ptr = (void __iomem *)&ring->fifos->head_data;
782 break;
783 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
784 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
785 ptr = (void __iomem *)&ring->fifos->tail_data;
786 break;
787 default:
788 return -EINVAL;
789 }
790
791 ptr += k3_nav_ringacc_ring_get_fifo_pos(ring);
792
793 switch (access_mode) {
794 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
795 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
796 pr_debug("memcpy_fromio(x): --> ptr(%p), mode:%d\n",
797 ptr, access_mode);
798 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530799 ring->state.occ--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530800 break;
801 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
802 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
803 pr_debug("memcpy_toio(x): --> ptr(%p), mode:%d\n",
804 ptr, access_mode);
805 memcpy_toio(ptr, elem, (4 << ring->elm_size));
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530806 ring->state.free--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530807 break;
808 default:
809 return -EINVAL;
810 }
811
812 pr_debug("free%d index%d occ%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530813 ring->state.free, ring->state.windex, ring->state.occ, ring->state.rindex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530814 return 0;
815}
816
817static int k3_nav_ringacc_ring_push_head_io(struct k3_nav_ring *ring,
818 void *elem)
819{
820 return k3_nav_ringacc_ring_access_io(
821 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
822}
823
824static int k3_nav_ringacc_ring_push_io(struct k3_nav_ring *ring, void *elem)
825{
826 return k3_nav_ringacc_ring_access_io(
827 ring, elem, K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
828}
829
830static int k3_nav_ringacc_ring_pop_io(struct k3_nav_ring *ring, void *elem)
831{
832 return k3_nav_ringacc_ring_access_io(
833 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
834}
835
836static int k3_nav_ringacc_ring_pop_tail_io(struct k3_nav_ring *ring, void *elem)
837{
838 return k3_nav_ringacc_ring_access_io(
839 ring, elem, K3_RINGACC_ACCESS_MODE_POP_HEAD);
840}
841
842static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
843{
844 void *elem_ptr;
845
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530846 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.windex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530847
848 memcpy(elem_ptr, elem, (4 << ring->elm_size));
849
Vignesh Raghavendra9d32a942019-12-09 10:25:33 +0530850 flush_dcache_range((unsigned long)ring->ring_mem_virt,
851 ALIGN((unsigned long)ring->ring_mem_virt +
852 ring->size * (4 << ring->elm_size),
853 ARCH_DMA_MINALIGN));
854
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530855 ring->state.windex = (ring->state.windex + 1) % ring->size;
856 ring->state.free--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530857 ringacc_writel(1, &ring->rt->db);
858
859 pr_debug("ring_push_mem: free%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530860 ring->state.free, ring->state.windex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530861
862 return 0;
863}
864
865static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
866{
867 void *elem_ptr;
868
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530869 elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->state.rindex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530870
Vignesh Raghavendra9d32a942019-12-09 10:25:33 +0530871 invalidate_dcache_range((unsigned long)ring->ring_mem_virt,
872 ALIGN((unsigned long)ring->ring_mem_virt +
873 ring->size * (4 << ring->elm_size),
874 ARCH_DMA_MINALIGN));
875
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530876 memcpy(elem, elem_ptr, (4 << ring->elm_size));
877
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530878 ring->state.rindex = (ring->state.rindex + 1) % ring->size;
879 ring->state.occ--;
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530880 ringacc_writel(-1, &ring->rt->db);
881
882 pr_debug("ring_pop_mem: occ%d index%d pos_ptr%p\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530883 ring->state.occ, ring->state.rindex, elem_ptr);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530884 return 0;
885}
886
887int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem)
888{
889 int ret = -EOPNOTSUPP;
890
891 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
892 return -EINVAL;
893
894 pr_debug("ring_push%d: free%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530895 ring->ring_id, ring->state.free, ring->state.windex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530896
897 if (k3_nav_ringacc_ring_is_full(ring))
898 return -ENOMEM;
899
900 if (ring->ops && ring->ops->push_tail)
901 ret = ring->ops->push_tail(ring, elem);
902
903 return ret;
904}
905
906int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem)
907{
908 int ret = -EOPNOTSUPP;
909
910 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
911 return -EINVAL;
912
913 pr_debug("ring_push_head: free%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530914 ring->state.free, ring->state.windex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530915
916 if (k3_nav_ringacc_ring_is_full(ring))
917 return -ENOMEM;
918
919 if (ring->ops && ring->ops->push_head)
920 ret = ring->ops->push_head(ring, elem);
921
922 return ret;
923}
924
925int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem)
926{
927 int ret = -EOPNOTSUPP;
928
929 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
930 return -EINVAL;
931
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530932 if (!ring->state.occ)
933 ring->state.occ = k3_nav_ringacc_ring_get_occ(ring);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530934
935 pr_debug("ring_pop%d: occ%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530936 ring->ring_id, ring->state.occ, ring->state.rindex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530937
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530938 if (!ring->state.occ && !ring->state.tdown_complete)
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530939 return -ENODATA;
940
941 if (ring->ops && ring->ops->pop_head)
942 ret = ring->ops->pop_head(ring, elem);
943
944 return ret;
945}
946
947int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem)
948{
949 int ret = -EOPNOTSUPP;
950
951 if (!ring || !(ring->flags & KNAV_RING_FLAG_BUSY))
952 return -EINVAL;
953
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530954 if (!ring->state.occ)
955 ring->state.occ = k3_nav_ringacc_ring_get_occ(ring);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530956
957 pr_debug("ring_pop_tail: occ%d index%d\n",
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530958 ring->state.occ, ring->state.rindex);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530959
Vignesh Raghavendradb08a1d2020-07-06 13:26:22 +0530960 if (!ring->state.occ)
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530961 return -ENODATA;
962
963 if (ring->ops && ring->ops->pop_tail)
964 ret = ring->ops->pop_tail(ring, elem);
965
966 return ret;
967}
968
969static int k3_nav_ringacc_probe_dt(struct k3_nav_ringacc *ringacc)
970{
971 struct udevice *dev = ringacc->dev;
972 struct udevice *tisci_dev = NULL;
973 int ret;
974
975 ringacc->num_rings = dev_read_u32_default(dev, "ti,num-rings", 0);
976 if (!ringacc->num_rings) {
977 dev_err(dev, "ti,num-rings read failure %d\n", ret);
978 return -EINVAL;
979 }
980
981 ringacc->dma_ring_reset_quirk =
982 dev_read_bool(dev, "ti,dma-ring-reset-quirk");
983
Vignesh Raghavendraec0aeac2019-12-09 10:25:34 +0530984 ret = uclass_get_device_by_phandle(UCLASS_FIRMWARE, dev,
985 "ti,sci", &tisci_dev);
Grygorii Strashko432f66f2019-02-05 17:31:22 +0530986 if (ret) {
987 pr_debug("TISCI RA RM get failed (%d)\n", ret);
988 ringacc->tisci = NULL;
989 return -ENODEV;
990 }
991 ringacc->tisci = (struct ti_sci_handle *)
992 (ti_sci_get_handle_from_sysfw(tisci_dev));
993
994 ret = dev_read_u32_default(dev, "ti,sci", 0);
995 if (!ret) {
996 dev_err(dev, "TISCI RA RM disabled\n");
997 ringacc->tisci = NULL;
998 return ret;
999 }
1000
1001 ret = dev_read_u32(dev, "ti,sci-dev-id", &ringacc->tisci_dev_id);
1002 if (ret) {
1003 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
1004 ringacc->tisci = NULL;
1005 return ret;
1006 }
1007
1008 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(
1009 ringacc->tisci, dev,
1010 ringacc->tisci_dev_id,
1011 "ti,sci-rm-range-gp-rings");
1012 if (IS_ERR(ringacc->rm_gp_range))
1013 ret = PTR_ERR(ringacc->rm_gp_range);
1014
1015 return 0;
1016}
1017
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +05301018static int k3_nav_ringacc_init(struct udevice *dev, struct k3_nav_ringacc *ringacc)
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301019{
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301020 void __iomem *base_fifo, *base_rt;
1021 int ret, i;
1022
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301023 ret = k3_nav_ringacc_probe_dt(ringacc);
1024 if (ret)
1025 return ret;
1026
1027 base_rt = (uint32_t *)devfdt_get_addr_name(dev, "rt");
1028 pr_debug("rt %p\n", base_rt);
1029 if (IS_ERR(base_rt))
1030 return PTR_ERR(base_rt);
1031
1032 base_fifo = (uint32_t *)devfdt_get_addr_name(dev, "fifos");
1033 pr_debug("fifos %p\n", base_fifo);
1034 if (IS_ERR(base_fifo))
1035 return PTR_ERR(base_fifo);
1036
1037 ringacc->proxy_gcfg = (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1038 devfdt_get_addr_name(dev, "proxy_gcfg");
1039 if (IS_ERR(ringacc->proxy_gcfg))
1040 return PTR_ERR(ringacc->proxy_gcfg);
1041 ringacc->proxy_target_base =
1042 (struct k3_ringacc_proxy_gcfg_regs __iomem *)
1043 devfdt_get_addr_name(dev, "proxy_target");
1044 if (IS_ERR(ringacc->proxy_target_base))
1045 return PTR_ERR(ringacc->proxy_target_base);
1046
1047 ringacc->num_proxies = ringacc_readl(&ringacc->proxy_gcfg->config) &
1048 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1049
1050 ringacc->rings = devm_kzalloc(dev,
1051 sizeof(*ringacc->rings) *
1052 ringacc->num_rings,
1053 GFP_KERNEL);
1054 ringacc->rings_inuse = devm_kcalloc(dev,
1055 BITS_TO_LONGS(ringacc->num_rings),
1056 sizeof(unsigned long), GFP_KERNEL);
1057 ringacc->proxy_inuse = devm_kcalloc(dev,
1058 BITS_TO_LONGS(ringacc->num_proxies),
1059 sizeof(unsigned long), GFP_KERNEL);
1060
1061 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1062 return -ENOMEM;
1063
1064 for (i = 0; i < ringacc->num_rings; i++) {
1065 ringacc->rings[i].rt = base_rt +
1066 KNAV_RINGACC_RT_REGS_STEP * i;
1067 ringacc->rings[i].fifos = base_fifo +
1068 KNAV_RINGACC_FIFO_REGS_STEP * i;
1069 ringacc->rings[i].parent = ringacc;
1070 ringacc->rings[i].ring_id = i;
1071 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1072 }
1073 dev_set_drvdata(dev, ringacc);
1074
1075 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1076
1077 list_add_tail(&ringacc->list, &k3_nav_ringacc_list);
1078
1079 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1080 ringacc->num_rings,
1081 ringacc->rm_gp_range->desc[0].start,
1082 ringacc->rm_gp_range->desc[0].num,
1083 ringacc->tisci_dev_id);
1084 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1085 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1086 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1087 ringacc_readl(&ringacc->proxy_gcfg->revision),
1088 ringacc->num_proxies);
1089 return 0;
1090}
1091
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +05301092struct ringacc_match_data {
1093 struct k3_nav_ringacc_ops ops;
1094};
1095
1096static struct ringacc_match_data k3_nav_ringacc_data = {
1097 .ops = {
1098 .init = k3_nav_ringacc_init,
1099 },
1100};
1101
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301102static const struct udevice_id knav_ringacc_ids[] = {
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +05301103 { .compatible = "ti,am654-navss-ringacc", .data = (ulong)&k3_nav_ringacc_data, },
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301104 {},
1105};
1106
Vignesh Raghavendrab3f95992020-07-06 13:26:24 +05301107static int k3_nav_ringacc_probe(struct udevice *dev)
1108{
1109 struct k3_nav_ringacc *ringacc;
1110 int ret;
1111 const struct ringacc_match_data *match_data;
1112
1113 match_data = (struct ringacc_match_data *)dev_get_driver_data(dev);
1114
1115 ringacc = dev_get_priv(dev);
1116 if (!ringacc)
1117 return -ENOMEM;
1118
1119 ringacc->dev = dev;
1120 ringacc->ops = &match_data->ops;
1121 ret = ringacc->ops->init(dev, ringacc);
1122 if (ret)
1123 return ret;
1124
1125 return 0;
1126}
1127
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301128U_BOOT_DRIVER(k3_navss_ringacc) = {
1129 .name = "k3-navss-ringacc",
1130 .id = UCLASS_MISC,
1131 .of_match = knav_ringacc_ids,
1132 .probe = k3_nav_ringacc_probe,
Simon Glass41575d82020-12-03 16:55:17 -07001133 .priv_auto = sizeof(struct k3_nav_ringacc),
Grygorii Strashko432f66f2019-02-05 17:31:22 +05301134};