blob: e8dc0095ab588df590974af849625d48d1e990b0 [file] [log] [blame]
Chunfeng Yune09b88c2020-10-16 11:38:39 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * mtu3_qmu.c - Queue Management Unit driver for device controller
4 *
5 * Copyright (C) 2016 MediaTek Inc.
6 *
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
8 */
9
10/*
11 * Queue Management Unit (QMU) is designed to unload SW effort
12 * to serve DMA interrupts.
13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14 * SW links data buffers and triggers QMU to send / receive data to
15 * host / from device at a time.
16 * And now only GPD is supported.
17 *
18 * For more detailed information, please refer to QMU Programming Guide
19 */
20
21#include <asm/cache.h>
22#include <cpu_func.h>
23#include <linux/iopoll.h>
24#include <linux/types.h>
25
26#include "mtu3.h"
27
28#define QMU_CHECKSUM_LEN 16
29
30#define GPD_FLAGS_HWO BIT(0)
31#define GPD_FLAGS_BDP BIT(1)
32#define GPD_FLAGS_BPS BIT(2)
33#define GPD_FLAGS_IOC BIT(7)
34
35#define GPD_EXT_FLAG_ZLP BIT(5)
36
37#define DCACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
38
39void mtu3_flush_cache(uintptr_t addr, u32 len)
40{
41 WARN_ON(!(void *)addr || len == 0);
42
43 flush_dcache_range(addr & ~(DCACHELINE_SIZE - 1),
44 ALIGN(addr + len, DCACHELINE_SIZE));
45}
46
47void mtu3_inval_cache(uintptr_t addr, u32 len)
48{
49 WARN_ON(!(void *)addr || len == 0);
50
51 invalidate_dcache_range(addr & ~(DCACHELINE_SIZE - 1),
52 ALIGN(addr + len, DCACHELINE_SIZE));
53}
54
55static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
56 dma_addr_t dma_addr)
57{
58 dma_addr_t dma_base = ring->dma;
59 struct qmu_gpd *gpd_head = ring->start;
60 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
61
62 if (offset >= MAX_GPD_NUM)
63 return NULL;
64
65 return gpd_head + offset;
66}
67
68static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
69 struct qmu_gpd *gpd)
70{
71 dma_addr_t dma_base = ring->dma;
72 struct qmu_gpd *gpd_head = ring->start;
73 u32 offset;
74
75 offset = gpd - gpd_head;
76 if (offset >= MAX_GPD_NUM)
77 return 0;
78
79 return dma_base + (offset * sizeof(*gpd));
80}
81
82static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
83{
84 ring->start = gpd;
85 ring->enqueue = gpd;
86 ring->dequeue = gpd;
87 ring->end = gpd + MAX_GPD_NUM - 1;
88}
89
90static void reset_gpd_list(struct mtu3_ep *mep)
91{
92 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
93 struct qmu_gpd *gpd = ring->start;
94
95 if (gpd) {
96 gpd->flag &= ~GPD_FLAGS_HWO;
97 gpd_ring_init(ring, gpd);
98 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd));
99 }
100}
101
102int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
103{
104 struct qmu_gpd *gpd;
105 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
106
107 /* software own all gpds as default */
108 gpd = memalign(DCACHELINE_SIZE, QMU_GPD_RING_SIZE);
109 if (!gpd)
110 return -ENOMEM;
111
112 memset(gpd, 0, QMU_GPD_RING_SIZE);
113 ring->dma = (dma_addr_t)gpd;
114 gpd_ring_init(ring, gpd);
Chunfeng Yun2c4f2172021-10-21 13:33:07 +0800115 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd));
Chunfeng Yune09b88c2020-10-16 11:38:39 +0800116
117 return 0;
118}
119
120void mtu3_gpd_ring_free(struct mtu3_ep *mep)
121{
122 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
123
124 kfree(ring->start);
125 memset(ring, 0, sizeof(*ring));
126}
127
128void mtu3_qmu_resume(struct mtu3_ep *mep)
129{
130 struct mtu3 *mtu = mep->mtu;
131 void __iomem *mbase = mtu->mac_base;
132 int epnum = mep->epnum;
133 u32 offset;
134
135 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
136
137 mtu3_writel(mbase, offset, QMU_Q_RESUME);
138 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
139 mtu3_writel(mbase, offset, QMU_Q_RESUME);
140}
141
142static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
143{
144 if (ring->enqueue < ring->end)
145 ring->enqueue++;
146 else
147 ring->enqueue = ring->start;
148
149 return ring->enqueue;
150}
151
152static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
153{
154 if (ring->dequeue < ring->end)
155 ring->dequeue++;
156 else
157 ring->dequeue = ring->start;
158
159 return ring->dequeue;
160}
161
162/* check if a ring is emtpy */
163static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
164{
165 struct qmu_gpd *enq = ring->enqueue;
166 struct qmu_gpd *next;
167
168 if (ring->enqueue < ring->end)
169 next = enq + 1;
170 else
171 next = ring->start;
172
173 /* one gpd is reserved to simplify gpd preparation */
174 return next == ring->dequeue;
175}
176
177int mtu3_prepare_transfer(struct mtu3_ep *mep)
178{
179 return gpd_ring_empty(&mep->gpd_ring);
180}
181
182static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
183{
184 struct qmu_gpd *enq;
185 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
186 struct qmu_gpd *gpd = ring->enqueue;
187 struct usb_request *req = &mreq->request;
188
189 /* set all fields to zero as default value */
190 memset(gpd, 0, sizeof(*gpd));
191
192 gpd->buffer = cpu_to_le32((u32)req->dma);
193 gpd->buf_len = cpu_to_le16(req->length);
194
195 /* get the next GPD */
196 enq = advance_enq_gpd(ring);
197 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
198 mep->epnum, gpd, enq);
199
200 enq->flag &= ~GPD_FLAGS_HWO;
201 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
Chunfeng Yun044d7002021-03-03 16:07:05 +0800202 mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd));
Chunfeng Yune09b88c2020-10-16 11:38:39 +0800203
204 if (req->zero)
205 gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
206
207 gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO;
208
209 mreq->gpd = gpd;
210
211 if (req->length)
212 mtu3_flush_cache((uintptr_t)req->buf, req->length);
213
214 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd));
215
216 return 0;
217}
218
219static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
220{
221 struct qmu_gpd *enq;
222 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
223 struct qmu_gpd *gpd = ring->enqueue;
224 struct usb_request *req = &mreq->request;
225
226 /* set all fields to zero as default value */
227 memset(gpd, 0, sizeof(*gpd));
228
229 gpd->buffer = cpu_to_le32((u32)req->dma);
230 gpd->data_buf_len = cpu_to_le16(req->length);
231
232 /* get the next GPD */
233 enq = advance_enq_gpd(ring);
234 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
235 mep->epnum, gpd, enq);
236
237 enq->flag &= ~GPD_FLAGS_HWO;
238 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
Chunfeng Yun044d7002021-03-03 16:07:05 +0800239 mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd));
240
Chunfeng Yune09b88c2020-10-16 11:38:39 +0800241 gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO;
242
243 mreq->gpd = gpd;
244
245 mtu3_inval_cache((uintptr_t)req->buf, req->length);
246 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd));
247
248 return 0;
249}
250
251void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
252{
253 if (mep->is_in)
254 mtu3_prepare_tx_gpd(mep, mreq);
255 else
256 mtu3_prepare_rx_gpd(mep, mreq);
257}
258
259int mtu3_qmu_start(struct mtu3_ep *mep)
260{
261 struct mtu3 *mtu = mep->mtu;
262 void __iomem *mbase = mtu->mac_base;
263 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
264 u8 epnum = mep->epnum;
265
266 if (mep->is_in) {
267 /* set QMU start address */
268 mtu3_writel(mbase, USB_QMU_TQSAR(epnum), ring->dma);
269 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
270 /* send zero length packet according to ZLP flag in GPD */
271 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
272 mtu3_writel(mbase, U3D_TQERRIESR0,
273 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
274
275 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
276 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
277 return 0;
278 }
279 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
280
281 } else {
282 mtu3_writel(mbase, USB_QMU_RQSAR(epnum), ring->dma);
283 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
284 /* don't expect ZLP */
285 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
286 /* move to next GPD when receive ZLP */
287 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
288 mtu3_writel(mbase, U3D_RQERRIESR0,
289 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
290 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
291
292 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
293 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
294 return 0;
295 }
296 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
297 }
298
299 return 0;
300}
301
302/* may called in atomic context */
303void mtu3_qmu_stop(struct mtu3_ep *mep)
304{
305 struct mtu3 *mtu = mep->mtu;
306 void __iomem *mbase = mtu->mac_base;
307 int epnum = mep->epnum;
308 u32 value = 0;
309 u32 qcsr;
310 int ret;
311
312 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
313
314 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
315 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
316 return;
317 }
318 mtu3_writel(mbase, qcsr, QMU_Q_STOP);
319
320 ret = readl_poll_timeout(mbase + qcsr, value,
321 !(value & QMU_Q_ACTIVE), 1000);
322 if (ret) {
323 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
324 return;
325 }
326
327 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
328}
329
330void mtu3_qmu_flush(struct mtu3_ep *mep)
331{
332 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
333 ((mep->is_in) ? "TX" : "RX"));
334
335 /*Stop QMU */
336 mtu3_qmu_stop(mep);
337 reset_gpd_list(mep);
338}
339
340/*
341 * NOTE: request list maybe is already empty as following case:
342 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
343 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
344 * tasklet process both of them)-->qmu_interrupt for second one.
345 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
346 */
347static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
348{
349 struct mtu3_ep *mep = mtu->in_eps + epnum;
350 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
351 void __iomem *mbase = mtu->mac_base;
352 struct qmu_gpd *gpd = ring->dequeue;
353 struct qmu_gpd *gpd_current = NULL;
354 struct usb_request *req = NULL;
355 struct mtu3_request *mreq;
356 dma_addr_t cur_gpd_dma;
357
358 /*transfer phy address got from QMU register to virtual address */
359 cur_gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
360 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
361 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd));
362
363 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
364 __func__, epnum, gpd, gpd_current, ring->enqueue);
365
366 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
367 mreq = next_request(mep);
368
369 if (!mreq || mreq->gpd != gpd) {
370 dev_err(mtu->dev, "no correct TX req is found\n");
371 break;
372 }
373
374 req = &mreq->request;
375 req->actual = le16_to_cpu(gpd->buf_len);
376 mtu3_req_complete(mep, req, 0);
377
378 gpd = advance_deq_gpd(ring);
379 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd));
380 }
381
382 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
383 __func__, epnum, ring->dequeue, ring->enqueue);
384}
385
386static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
387{
388 struct mtu3_ep *mep = mtu->out_eps + epnum;
389 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
390 void __iomem *mbase = mtu->mac_base;
391 struct qmu_gpd *gpd = ring->dequeue;
392 struct qmu_gpd *gpd_current = NULL;
393 struct usb_request *req = NULL;
394 struct mtu3_request *mreq;
395 dma_addr_t cur_gpd_dma;
396
397 cur_gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
398 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
399 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd));
400
401 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
402 __func__, epnum, gpd, gpd_current, ring->enqueue);
403
404 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
405 mreq = next_request(mep);
406
407 if (!mreq || mreq->gpd != gpd) {
408 dev_err(mtu->dev, "no correct RX req is found\n");
409 break;
410 }
411 req = &mreq->request;
412
413 req->actual = le16_to_cpu(gpd->buf_len);
414 mtu3_req_complete(mep, req, 0);
415
416 gpd = advance_deq_gpd(ring);
417 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd));
418 }
419
420 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
421 __func__, epnum, ring->dequeue, ring->enqueue);
422}
423
424static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
425{
426 int i;
427
428 for (i = 1; i < mtu->num_eps; i++) {
429 if (done_status & QMU_RX_DONE_INT(i))
430 qmu_done_rx(mtu, i);
431 if (done_status & QMU_TX_DONE_INT(i))
432 qmu_done_tx(mtu, i);
433 }
434}
435
436static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
437{
438 void __iomem *mbase = mtu->mac_base;
439 u32 errval;
440 int i;
441
442 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
443 errval = mtu3_readl(mbase, U3D_RQERRIR0);
444 for (i = 1; i < mtu->num_eps; i++) {
445 if (errval & QMU_RX_CS_ERR(i))
446 dev_err(mtu->dev, "Rx %d CS error!\n", i);
447
448 if (errval & QMU_RX_LEN_ERR(i))
449 dev_err(mtu->dev, "RX %d Length error\n", i);
450 }
451 mtu3_writel(mbase, U3D_RQERRIR0, errval);
452 }
453
454 if (qmu_status & RXQ_ZLPERR_INT) {
455 errval = mtu3_readl(mbase, U3D_RQERRIR1);
456 for (i = 1; i < mtu->num_eps; i++) {
457 if (errval & QMU_RX_ZLP_ERR(i))
458 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
459 }
460 mtu3_writel(mbase, U3D_RQERRIR1, errval);
461 }
462
463 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
464 errval = mtu3_readl(mbase, U3D_TQERRIR0);
465 for (i = 1; i < mtu->num_eps; i++) {
466 if (errval & QMU_TX_CS_ERR(i))
467 dev_err(mtu->dev, "Tx %d checksum error!\n", i);
468
469 if (errval & QMU_TX_LEN_ERR(i))
470 dev_err(mtu->dev, "Tx %d zlp error!\n", i);
471 }
472 mtu3_writel(mbase, U3D_TQERRIR0, errval);
473 }
474}
475
476irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
477{
478 void __iomem *mbase = mtu->mac_base;
479 u32 qmu_status;
480 u32 qmu_done_status;
481
482 /* U3D_QISAR1 is read update */
483 qmu_status = mtu3_readl(mbase, U3D_QISAR1);
484 qmu_status &= mtu3_readl(mbase, U3D_QIER1);
485
486 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
487 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
488 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
489 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
490 (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
491 qmu_status);
492
493 if (qmu_done_status)
494 qmu_done_isr(mtu, qmu_done_status);
495
496 if (qmu_status)
497 qmu_exception_isr(mtu, qmu_status);
498
499 return IRQ_HANDLED;
500}
501
502void mtu3_qmu_init(struct mtu3 *mtu)
503{
504 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
505}
506
507void mtu3_qmu_exit(struct mtu3 *mtu)
508{
509}