blob: 5fa013659c5cb6d0bd47ee5056cb7e9a6cb6cd11 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Ilya Yanokeb819552012-11-06 13:48:21 +00002/*
3 * MUSB OTG driver host support
4 *
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
8 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
Ilya Yanokeb819552012-11-06 13:48:21 +00009 */
10
Ilya Yanokeb819552012-11-06 13:48:21 +000011#ifndef __UBOOT__
Simon Glass336d4612020-02-03 07:36:16 -070012#include <dm/device_compat.h>
Simon Glass61b29b82020-02-03 07:36:15 -070013#include <dm/devres.h>
Ilya Yanokeb819552012-11-06 13:48:21 +000014#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/errno.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/dma-mapping.h>
23#else
24#include <common.h>
25#include <usb.h>
Simon Glasseb41d8a2020-05-10 11:40:08 -060026#include <linux/bug.h>
Ilya Yanokeb819552012-11-06 13:48:21 +000027#include "linux-compat.h"
28#include "usb-compat.h"
29#endif
30
31#include "musb_core.h"
32#include "musb_host.h"
33
34
35/* MUSB HOST status 22-mar-2006
36 *
37 * - There's still lots of partial code duplication for fault paths, so
38 * they aren't handled as consistently as they need to be.
39 *
40 * - PIO mostly behaved when last tested.
41 * + including ep0, with all usbtest cases 9, 10
42 * + usbtest 14 (ep0out) doesn't seem to run at all
43 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
44 * configurations, but otherwise double buffering passes basic tests.
45 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
46 *
47 * - DMA (CPPI) ... partially behaves, not currently recommended
48 * + about 1/15 the speed of typical EHCI implementations (PCI)
49 * + RX, all too often reqpkt seems to misbehave after tx
50 * + TX, no known issues (other than evident silicon issue)
51 *
52 * - DMA (Mentor/OMAP) ...has at least toggle update problems
53 *
54 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
55 * starvation ... nothing yet for TX, interrupt, or bulk.
56 *
57 * - Not tested with HNP, but some SRP paths seem to behave.
58 *
59 * NOTE 24-August-2006:
60 *
61 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
62 * extra endpoint for periodic use enabling hub + keybd + mouse. That
63 * mostly works, except that with "usbnet" it's easy to trigger cases
64 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
65 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
66 * although ARP RX wins. (That test was done with a full speed link.)
67 */
68
69
70/*
71 * NOTE on endpoint usage:
72 *
73 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
74 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
75 * (Yes, bulk _could_ use more of the endpoints than that, and would even
76 * benefit from it.)
77 *
78 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
79 * So far that scheduling is both dumb and optimistic: the endpoint will be
80 * "claimed" until its software queue is no longer refilled. No multiplexing
81 * of transfers between endpoints, or anything clever.
82 */
83
84
85static void musb_ep_program(struct musb *musb, u8 epnum,
86 struct urb *urb, int is_out,
87 u8 *buf, u32 offset, u32 len);
88
89/*
90 * Clear TX fifo. Needed to avoid BABBLE errors.
91 */
92static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
93{
94 struct musb *musb = ep->musb;
95 void __iomem *epio = ep->regs;
96 u16 csr;
97 u16 lastcsr = 0;
98 int retries = 1000;
99
100 csr = musb_readw(epio, MUSB_TXCSR);
101 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
102 if (csr != lastcsr)
103 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
104 lastcsr = csr;
105 csr |= MUSB_TXCSR_FLUSHFIFO;
106 musb_writew(epio, MUSB_TXCSR, csr);
107 csr = musb_readw(epio, MUSB_TXCSR);
108 if (WARN(retries-- < 1,
109 "Could not flush host TX%d fifo: csr: %04x\n",
110 ep->epnum, csr))
111 return;
112 mdelay(1);
113 }
114}
115
116static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
117{
118 void __iomem *epio = ep->regs;
119 u16 csr;
120 int retries = 5;
121
122 /* scrub any data left in the fifo */
123 do {
124 csr = musb_readw(epio, MUSB_TXCSR);
125 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
126 break;
127 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
128 csr = musb_readw(epio, MUSB_TXCSR);
129 udelay(10);
130 } while (--retries);
131
132 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
133 ep->epnum, csr);
134
135 /* and reset for the next transfer */
136 musb_writew(epio, MUSB_TXCSR, 0);
137}
138
139/*
140 * Start transmit. Caller is responsible for locking shared resources.
141 * musb must be locked.
142 */
143static inline void musb_h_tx_start(struct musb_hw_ep *ep)
144{
145 u16 txcsr;
146
147 /* NOTE: no locks here; caller should lock and select EP */
148 if (ep->epnum) {
149 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
150 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
151 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
152 } else {
153 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
154 musb_writew(ep->regs, MUSB_CSR0, txcsr);
155 }
156
157}
158
159static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
160{
161 u16 txcsr;
162
163 /* NOTE: no locks here; caller should lock and select EP */
164 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
165 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
166 if (is_cppi_enabled())
167 txcsr |= MUSB_TXCSR_DMAMODE;
168 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
169}
170
171static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
172{
173 if (is_in != 0 || ep->is_shared_fifo)
174 ep->in_qh = qh;
175 if (is_in == 0 || ep->is_shared_fifo)
176 ep->out_qh = qh;
177}
178
179static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
180{
181 return is_in ? ep->in_qh : ep->out_qh;
182}
183
184/*
185 * Start the URB at the front of an endpoint's queue
186 * end must be claimed from the caller.
187 *
188 * Context: controller locked, irqs blocked
189 */
190static void
191musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
192{
193 u16 frame;
194 u32 len;
195 void __iomem *mbase = musb->mregs;
196 struct urb *urb = next_urb(qh);
197 void *buf = urb->transfer_buffer;
198 u32 offset = 0;
199 struct musb_hw_ep *hw_ep = qh->hw_ep;
200 unsigned pipe = urb->pipe;
201 u8 address = usb_pipedevice(pipe);
202 int epnum = hw_ep->epnum;
203
204 /* initialize software qh state */
205 qh->offset = 0;
206 qh->segsize = 0;
207
208 /* gather right source of data */
209 switch (qh->type) {
210 case USB_ENDPOINT_XFER_CONTROL:
211 /* control transfers always start with SETUP */
212 is_in = 0;
213 musb->ep0_stage = MUSB_EP0_START;
214 buf = urb->setup_packet;
215 len = 8;
216 break;
217#ifndef __UBOOT__
218 case USB_ENDPOINT_XFER_ISOC:
219 qh->iso_idx = 0;
220 qh->frame = 0;
221 offset = urb->iso_frame_desc[0].offset;
222 len = urb->iso_frame_desc[0].length;
223 break;
224#endif
225 default: /* bulk, interrupt */
226 /* actual_length may be nonzero on retry paths */
227 buf = urb->transfer_buffer + urb->actual_length;
228 len = urb->transfer_buffer_length - urb->actual_length;
229 }
230
231 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
232 qh, urb, address, qh->epnum,
233 is_in ? "in" : "out",
234 ({char *s; switch (qh->type) {
235 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
236 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
237#ifndef __UBOOT__
238 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
239#endif
240 default: s = "-intr"; break;
241 }; s; }),
242 epnum, buf + offset, len);
243
244 /* Configure endpoint */
245 musb_ep_set_qh(hw_ep, is_in, qh);
246 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
247
248 /* transmit may have more work: start it when it is time */
249 if (is_in)
250 return;
251
252 /* determine if the time is right for a periodic transfer */
253 switch (qh->type) {
254#ifndef __UBOOT__
255 case USB_ENDPOINT_XFER_ISOC:
256#endif
257 case USB_ENDPOINT_XFER_INT:
258 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
259 frame = musb_readw(mbase, MUSB_FRAME);
260 /* FIXME this doesn't implement that scheduling policy ...
261 * or handle framecounter wrapping
262 */
263#ifndef __UBOOT__
264 if ((urb->transfer_flags & URB_ISO_ASAP)
265 || (frame >= urb->start_frame)) {
266 /* REVISIT the SOF irq handler shouldn't duplicate
267 * this code; and we don't init urb->start_frame...
268 */
269 qh->frame = 0;
270 goto start;
271 } else {
272#endif
273 qh->frame = urb->start_frame;
274 /* enable SOF interrupt so we can count down */
275 dev_dbg(musb->controller, "SOF for %d\n", epnum);
276#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
277 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
278#endif
279#ifndef __UBOOT__
280 }
281#endif
282 break;
283 default:
284start:
285 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
286 hw_ep->tx_channel ? "dma" : "pio");
287
288 if (!hw_ep->tx_channel)
289 musb_h_tx_start(hw_ep);
290 else if (is_cppi_enabled() || tusb_dma_omap())
291 musb_h_tx_dma_start(hw_ep);
292 }
293}
294
295/* Context: caller owns controller lock, IRQs are blocked */
296static void musb_giveback(struct musb *musb, struct urb *urb, int status)
297__releases(musb->lock)
298__acquires(musb->lock)
299{
300 dev_dbg(musb->controller,
301 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
302 urb, urb->complete, status,
303 usb_pipedevice(urb->pipe),
304 usb_pipeendpoint(urb->pipe),
305 usb_pipein(urb->pipe) ? "in" : "out",
306 urb->actual_length, urb->transfer_buffer_length
307 );
308
309 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
310 spin_unlock(&musb->lock);
311 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
312 spin_lock(&musb->lock);
313}
314
315/* For bulk/interrupt endpoints only */
316static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
317 struct urb *urb)
318{
319 void __iomem *epio = qh->hw_ep->regs;
320 u16 csr;
321
322 /*
323 * FIXME: the current Mentor DMA code seems to have
324 * problems getting toggle correct.
325 */
326
327 if (is_in)
328 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
329 else
330 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
331
332 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
333}
334
335/*
336 * Advance this hardware endpoint's queue, completing the specified URB and
337 * advancing to either the next URB queued to that qh, or else invalidating
338 * that qh and advancing to the next qh scheduled after the current one.
339 *
340 * Context: caller owns controller lock, IRQs are blocked
341 */
342static void musb_advance_schedule(struct musb *musb, struct urb *urb,
343 struct musb_hw_ep *hw_ep, int is_in)
344{
345 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
346 struct musb_hw_ep *ep = qh->hw_ep;
347 int ready = qh->is_ready;
348 int status;
349
350 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
351
352 /* save toggle eagerly, for paranoia */
353 switch (qh->type) {
354 case USB_ENDPOINT_XFER_BULK:
355 case USB_ENDPOINT_XFER_INT:
356 musb_save_toggle(qh, is_in, urb);
357 break;
358#ifndef __UBOOT__
359 case USB_ENDPOINT_XFER_ISOC:
360 if (status == 0 && urb->error_count)
361 status = -EXDEV;
362 break;
363#endif
364 }
365
366 qh->is_ready = 0;
367 musb_giveback(musb, urb, status);
368 qh->is_ready = ready;
369
370 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
371 * invalidate qh as soon as list_empty(&hep->urb_list)
372 */
373 if (list_empty(&qh->hep->urb_list)) {
374 struct list_head *head;
375 struct dma_controller *dma = musb->dma_controller;
376
377 if (is_in) {
378 ep->rx_reinit = 1;
379 if (ep->rx_channel) {
380 dma->channel_release(ep->rx_channel);
381 ep->rx_channel = NULL;
382 }
383 } else {
384 ep->tx_reinit = 1;
385 if (ep->tx_channel) {
386 dma->channel_release(ep->tx_channel);
387 ep->tx_channel = NULL;
388 }
389 }
390
391 /* Clobber old pointers to this qh */
392 musb_ep_set_qh(ep, is_in, NULL);
393 qh->hep->hcpriv = NULL;
394
395 switch (qh->type) {
396
397 case USB_ENDPOINT_XFER_CONTROL:
398 case USB_ENDPOINT_XFER_BULK:
399 /* fifo policy for these lists, except that NAKing
400 * should rotate a qh to the end (for fairness).
401 */
402 if (qh->mux == 1) {
403 head = qh->ring.prev;
404 list_del(&qh->ring);
405 kfree(qh);
406 qh = first_qh(head);
407 break;
408 }
409
410 case USB_ENDPOINT_XFER_ISOC:
411 case USB_ENDPOINT_XFER_INT:
412 /* this is where periodic bandwidth should be
413 * de-allocated if it's tracked and allocated;
414 * and where we'd update the schedule tree...
415 */
416 kfree(qh);
417 qh = NULL;
418 break;
419 }
420 }
421
422 if (qh != NULL && qh->is_ready) {
423 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
424 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
425 musb_start_urb(musb, is_in, qh);
426 }
427}
428
429static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
430{
431 /* we don't want fifo to fill itself again;
432 * ignore dma (various models),
433 * leave toggle alone (may not have been saved yet)
434 */
435 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
436 csr &= ~(MUSB_RXCSR_H_REQPKT
437 | MUSB_RXCSR_H_AUTOREQ
438 | MUSB_RXCSR_AUTOCLEAR);
439
440 /* write 2x to allow double buffering */
441 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
442 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
443
444 /* flush writebuffer */
445 return musb_readw(hw_ep->regs, MUSB_RXCSR);
446}
447
448/*
449 * PIO RX for a packet (or part of it).
450 */
451static bool
452musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
453{
454 u16 rx_count;
455 u8 *buf;
456 u16 csr;
457 bool done = false;
458 u32 length;
459 int do_flush = 0;
460 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
461 void __iomem *epio = hw_ep->regs;
462 struct musb_qh *qh = hw_ep->in_qh;
463 int pipe = urb->pipe;
464 void *buffer = urb->transfer_buffer;
465
466 /* musb_ep_select(mbase, epnum); */
467 rx_count = musb_readw(epio, MUSB_RXCOUNT);
468 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
469 urb->transfer_buffer, qh->offset,
470 urb->transfer_buffer_length);
471
472 /* unload FIFO */
473#ifndef __UBOOT__
474 if (usb_pipeisoc(pipe)) {
475 int status = 0;
476 struct usb_iso_packet_descriptor *d;
477
478 if (iso_err) {
479 status = -EILSEQ;
480 urb->error_count++;
481 }
482
483 d = urb->iso_frame_desc + qh->iso_idx;
484 buf = buffer + d->offset;
485 length = d->length;
486 if (rx_count > length) {
487 if (status == 0) {
488 status = -EOVERFLOW;
489 urb->error_count++;
490 }
491 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
492 do_flush = 1;
493 } else
494 length = rx_count;
495 urb->actual_length += length;
496 d->actual_length = length;
497
498 d->status = status;
499
500 /* see if we are done */
501 done = (++qh->iso_idx >= urb->number_of_packets);
502 } else {
503#endif
504 /* non-isoch */
505 buf = buffer + qh->offset;
506 length = urb->transfer_buffer_length - qh->offset;
507 if (rx_count > length) {
508 if (urb->status == -EINPROGRESS)
509 urb->status = -EOVERFLOW;
510 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
511 do_flush = 1;
512 } else
513 length = rx_count;
514 urb->actual_length += length;
515 qh->offset += length;
516
517 /* see if we are done */
518 done = (urb->actual_length == urb->transfer_buffer_length)
519 || (rx_count < qh->maxpacket)
520 || (urb->status != -EINPROGRESS);
521 if (done
522 && (urb->status == -EINPROGRESS)
523 && (urb->transfer_flags & URB_SHORT_NOT_OK)
524 && (urb->actual_length
525 < urb->transfer_buffer_length))
526 urb->status = -EREMOTEIO;
527#ifndef __UBOOT__
528 }
529#endif
530
531 musb_read_fifo(hw_ep, length, buf);
532
533 csr = musb_readw(epio, MUSB_RXCSR);
534 csr |= MUSB_RXCSR_H_WZC_BITS;
535 if (unlikely(do_flush))
536 musb_h_flush_rxfifo(hw_ep, csr);
537 else {
538 /* REVISIT this assumes AUTOCLEAR is never set */
539 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
540 if (!done)
541 csr |= MUSB_RXCSR_H_REQPKT;
542 musb_writew(epio, MUSB_RXCSR, csr);
543 }
544
545 return done;
546}
547
548/* we don't always need to reinit a given side of an endpoint...
549 * when we do, use tx/rx reinit routine and then construct a new CSR
550 * to address data toggle, NYET, and DMA or PIO.
551 *
552 * it's possible that driver bugs (especially for DMA) or aborting a
553 * transfer might have left the endpoint busier than it should be.
554 * the busy/not-empty tests are basically paranoia.
555 */
556static void
557musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
558{
559 u16 csr;
560
561 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
562 * That always uses tx_reinit since ep0 repurposes TX register
563 * offsets; the initial SETUP packet is also a kind of OUT.
564 */
565
566 /* if programmed for Tx, put it in RX mode */
567 if (ep->is_shared_fifo) {
568 csr = musb_readw(ep->regs, MUSB_TXCSR);
569 if (csr & MUSB_TXCSR_MODE) {
570 musb_h_tx_flush_fifo(ep);
571 csr = musb_readw(ep->regs, MUSB_TXCSR);
572 musb_writew(ep->regs, MUSB_TXCSR,
573 csr | MUSB_TXCSR_FRCDATATOG);
574 }
575
576 /*
577 * Clear the MODE bit (and everything else) to enable Rx.
578 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
579 */
580 if (csr & MUSB_TXCSR_DMAMODE)
581 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
582 musb_writew(ep->regs, MUSB_TXCSR, 0);
583
584 /* scrub all previous state, clearing toggle */
585 } else {
586 csr = musb_readw(ep->regs, MUSB_RXCSR);
587 if (csr & MUSB_RXCSR_RXPKTRDY)
588 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
589 musb_readw(ep->regs, MUSB_RXCOUNT));
590
591 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
592 }
593
594 /* target addr and (for multipoint) hub addr/port */
595 if (musb->is_multipoint) {
596 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
597 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
598 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
599
600 } else
601 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
602
603 /* protocol/endpoint, interval/NAKlimit, i/o size */
604 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
605 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
606 /* NOTE: bulk combining rewrites high bits of maxpacket */
607 /* Set RXMAXP with the FIFO size of the endpoint
608 * to disable double buffer mode.
609 */
610 if (musb->double_buffer_not_ok)
611 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
612 else
613 musb_writew(ep->regs, MUSB_RXMAXP,
614 qh->maxpacket | ((qh->hb_mult - 1) << 11));
615
616 ep->rx_reinit = 0;
617}
618
619static bool musb_tx_dma_program(struct dma_controller *dma,
620 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
621 struct urb *urb, u32 offset, u32 length)
622{
623 struct dma_channel *channel = hw_ep->tx_channel;
624 void __iomem *epio = hw_ep->regs;
625 u16 pkt_size = qh->maxpacket;
626 u16 csr;
627 u8 mode;
628
629#ifdef CONFIG_USB_INVENTRA_DMA
630 if (length > channel->max_len)
631 length = channel->max_len;
632
633 csr = musb_readw(epio, MUSB_TXCSR);
634 if (length > pkt_size) {
635 mode = 1;
636 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
637 /* autoset shouldn't be set in high bandwidth */
638 if (qh->hb_mult == 1)
639 csr |= MUSB_TXCSR_AUTOSET;
640 } else {
641 mode = 0;
642 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
643 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
644 }
645 channel->desired_mode = mode;
646 musb_writew(epio, MUSB_TXCSR, csr);
647#else
648 if (!is_cppi_enabled() && !tusb_dma_omap())
649 return false;
650
651 channel->actual_len = 0;
652
653 /*
654 * TX uses "RNDIS" mode automatically but needs help
655 * to identify the zero-length-final-packet case.
656 */
657 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
658#endif
659
660 qh->segsize = length;
661
662 /*
663 * Ensure the data reaches to main memory before starting
664 * DMA transfer
665 */
666 wmb();
667
668 if (!dma->channel_program(channel, pkt_size, mode,
669 urb->transfer_dma + offset, length)) {
670 dma->channel_release(channel);
671 hw_ep->tx_channel = NULL;
672
673 csr = musb_readw(epio, MUSB_TXCSR);
674 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
675 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
676 return false;
677 }
678 return true;
679}
680
681/*
682 * Program an HDRC endpoint as per the given URB
683 * Context: irqs blocked, controller lock held
684 */
685static void musb_ep_program(struct musb *musb, u8 epnum,
686 struct urb *urb, int is_out,
687 u8 *buf, u32 offset, u32 len)
688{
689 struct dma_controller *dma_controller;
690 struct dma_channel *dma_channel;
691 u8 dma_ok;
692 void __iomem *mbase = musb->mregs;
693 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
694 void __iomem *epio = hw_ep->regs;
695 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
696 u16 packet_sz = qh->maxpacket;
697
698 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
699 "h_addr%02x h_port%02x bytes %d\n",
700 is_out ? "-->" : "<--",
701 epnum, urb, urb->dev->speed,
702 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
703 qh->h_addr_reg, qh->h_port_reg,
704 len);
705
706 musb_ep_select(mbase, epnum);
707
708 /* candidate for DMA? */
709 dma_controller = musb->dma_controller;
710 if (is_dma_capable() && epnum && dma_controller) {
711 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
712 if (!dma_channel) {
713 dma_channel = dma_controller->channel_alloc(
714 dma_controller, hw_ep, is_out);
715 if (is_out)
716 hw_ep->tx_channel = dma_channel;
717 else
718 hw_ep->rx_channel = dma_channel;
719 }
720 } else
721 dma_channel = NULL;
722
723 /* make sure we clear DMAEnab, autoSet bits from previous run */
724
725 /* OUT/transmit/EP0 or IN/receive? */
726 if (is_out) {
727 u16 csr;
728 u16 int_txe;
729 u16 load_count;
730
731 csr = musb_readw(epio, MUSB_TXCSR);
732
733 /* disable interrupt in case we flush */
734 int_txe = musb_readw(mbase, MUSB_INTRTXE);
735 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
736
737 /* general endpoint setup */
738 if (epnum) {
739 /* flush all old state, set default */
740 musb_h_tx_flush_fifo(hw_ep);
741
742 /*
743 * We must not clear the DMAMODE bit before or in
744 * the same cycle with the DMAENAB bit, so we clear
745 * the latter first...
746 */
747 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
748 | MUSB_TXCSR_AUTOSET
749 | MUSB_TXCSR_DMAENAB
750 | MUSB_TXCSR_FRCDATATOG
751 | MUSB_TXCSR_H_RXSTALL
752 | MUSB_TXCSR_H_ERROR
753 | MUSB_TXCSR_TXPKTRDY
754 );
755 csr |= MUSB_TXCSR_MODE;
756
757 if (usb_gettoggle(urb->dev, qh->epnum, 1))
758 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
759 | MUSB_TXCSR_H_DATATOGGLE;
760 else
761 csr |= MUSB_TXCSR_CLRDATATOG;
762
763 musb_writew(epio, MUSB_TXCSR, csr);
764 /* REVISIT may need to clear FLUSHFIFO ... */
765 csr &= ~MUSB_TXCSR_DMAMODE;
766 musb_writew(epio, MUSB_TXCSR, csr);
767 csr = musb_readw(epio, MUSB_TXCSR);
768 } else {
769 /* endpoint 0: just flush */
770 musb_h_ep0_flush_fifo(hw_ep);
771 }
772
773 /* target addr and (for multipoint) hub addr/port */
774 if (musb->is_multipoint) {
775 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
776 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
777 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
778/* FIXME if !epnum, do the same for RX ... */
779 } else
780 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
781
782 /* protocol/endpoint/interval/NAKlimit */
783 if (epnum) {
784 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
785 if (musb->double_buffer_not_ok)
786 musb_writew(epio, MUSB_TXMAXP,
787 hw_ep->max_packet_sz_tx);
788 else if (can_bulk_split(musb, qh->type))
789 musb_writew(epio, MUSB_TXMAXP, packet_sz
790 | ((hw_ep->max_packet_sz_tx /
791 packet_sz) - 1) << 11);
792 else
793 musb_writew(epio, MUSB_TXMAXP,
794 qh->maxpacket |
795 ((qh->hb_mult - 1) << 11));
796 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
797 } else {
798 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
799 if (musb->is_multipoint)
800 musb_writeb(epio, MUSB_TYPE0,
801 qh->type_reg);
802 }
803
804 if (can_bulk_split(musb, qh->type))
805 load_count = min((u32) hw_ep->max_packet_sz_tx,
806 len);
807 else
808 load_count = min((u32) packet_sz, len);
809
810 if (dma_channel && musb_tx_dma_program(dma_controller,
811 hw_ep, qh, urb, offset, len))
812 load_count = 0;
813
814 if (load_count) {
815 /* PIO to load FIFO */
816 qh->segsize = load_count;
817 musb_write_fifo(hw_ep, load_count, buf);
818 }
819
820 /* re-enable interrupt */
821 musb_writew(mbase, MUSB_INTRTXE, int_txe);
822
823 /* IN/receive */
824 } else {
825 u16 csr;
826
827 if (hw_ep->rx_reinit) {
828 musb_rx_reinit(musb, qh, hw_ep);
829
830 /* init new state: toggle and NYET, maybe DMA later */
831 if (usb_gettoggle(urb->dev, qh->epnum, 0))
832 csr = MUSB_RXCSR_H_WR_DATATOGGLE
833 | MUSB_RXCSR_H_DATATOGGLE;
834 else
835 csr = 0;
836 if (qh->type == USB_ENDPOINT_XFER_INT)
837 csr |= MUSB_RXCSR_DISNYET;
838
839 } else {
840 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
841
842 if (csr & (MUSB_RXCSR_RXPKTRDY
843 | MUSB_RXCSR_DMAENAB
844 | MUSB_RXCSR_H_REQPKT))
845 ERR("broken !rx_reinit, ep%d csr %04x\n",
846 hw_ep->epnum, csr);
847
848 /* scrub any stale state, leaving toggle alone */
849 csr &= MUSB_RXCSR_DISNYET;
850 }
851
852 /* kick things off */
853
854 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
855 /* Candidate for DMA */
856 dma_channel->actual_len = 0L;
857 qh->segsize = len;
858
859 /* AUTOREQ is in a DMA register */
860 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
861 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
862
863 /*
864 * Unless caller treats short RX transfers as
865 * errors, we dare not queue multiple transfers.
866 */
867 dma_ok = dma_controller->channel_program(dma_channel,
868 packet_sz, !(urb->transfer_flags &
869 URB_SHORT_NOT_OK),
870 urb->transfer_dma + offset,
871 qh->segsize);
872 if (!dma_ok) {
873 dma_controller->channel_release(dma_channel);
874 hw_ep->rx_channel = dma_channel = NULL;
875 } else
876 csr |= MUSB_RXCSR_DMAENAB;
877 }
878
879 csr |= MUSB_RXCSR_H_REQPKT;
880 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
881 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
882 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
883 }
884}
885
886
887/*
888 * Service the default endpoint (ep0) as host.
889 * Return true until it's time to start the status stage.
890 */
891static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
892{
893 bool more = false;
894 u8 *fifo_dest = NULL;
895 u16 fifo_count = 0;
896 struct musb_hw_ep *hw_ep = musb->control_ep;
897 struct musb_qh *qh = hw_ep->in_qh;
898 struct usb_ctrlrequest *request;
899
900 switch (musb->ep0_stage) {
901 case MUSB_EP0_IN:
902 fifo_dest = urb->transfer_buffer + urb->actual_length;
903 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
904 urb->actual_length);
905 if (fifo_count < len)
906 urb->status = -EOVERFLOW;
907
908 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
909
910 urb->actual_length += fifo_count;
911 if (len < qh->maxpacket) {
912 /* always terminate on short read; it's
913 * rarely reported as an error.
914 */
915 } else if (urb->actual_length <
916 urb->transfer_buffer_length)
917 more = true;
918 break;
919 case MUSB_EP0_START:
920 request = (struct usb_ctrlrequest *) urb->setup_packet;
921
922 if (!request->wLength) {
923 dev_dbg(musb->controller, "start no-DATA\n");
924 break;
925 } else if (request->bRequestType & USB_DIR_IN) {
926 dev_dbg(musb->controller, "start IN-DATA\n");
927 musb->ep0_stage = MUSB_EP0_IN;
928 more = true;
929 break;
930 } else {
931 dev_dbg(musb->controller, "start OUT-DATA\n");
932 musb->ep0_stage = MUSB_EP0_OUT;
933 more = true;
934 }
935 /* FALLTHROUGH */
936 case MUSB_EP0_OUT:
937 fifo_count = min_t(size_t, qh->maxpacket,
938 urb->transfer_buffer_length -
939 urb->actual_length);
940 if (fifo_count) {
941 fifo_dest = (u8 *) (urb->transfer_buffer
942 + urb->actual_length);
943 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
944 fifo_count,
945 (fifo_count == 1) ? "" : "s",
946 fifo_dest);
947 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
948
949 urb->actual_length += fifo_count;
950 more = true;
951 }
952 break;
953 default:
954 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
955 break;
956 }
957
958 return more;
959}
960
961/*
962 * Handle default endpoint interrupt as host. Only called in IRQ time
963 * from musb_interrupt().
964 *
965 * called with controller irqlocked
966 */
967irqreturn_t musb_h_ep0_irq(struct musb *musb)
968{
969 struct urb *urb;
970 u16 csr, len;
971 int status = 0;
972 void __iomem *mbase = musb->mregs;
973 struct musb_hw_ep *hw_ep = musb->control_ep;
974 void __iomem *epio = hw_ep->regs;
975 struct musb_qh *qh = hw_ep->in_qh;
976 bool complete = false;
977 irqreturn_t retval = IRQ_NONE;
978
979 /* ep0 only has one queue, "in" */
980 urb = next_urb(qh);
981
982 musb_ep_select(mbase, 0);
983 csr = musb_readw(epio, MUSB_CSR0);
984 len = (csr & MUSB_CSR0_RXPKTRDY)
985 ? musb_readb(epio, MUSB_COUNT0)
986 : 0;
987
988 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
989 csr, qh, len, urb, musb->ep0_stage);
990
991 /* if we just did status stage, we are done */
992 if (MUSB_EP0_STATUS == musb->ep0_stage) {
993 retval = IRQ_HANDLED;
994 complete = true;
995 }
996
997 /* prepare status */
998 if (csr & MUSB_CSR0_H_RXSTALL) {
999 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1000 status = -EPIPE;
1001
1002 } else if (csr & MUSB_CSR0_H_ERROR) {
1003 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1004 status = -EPROTO;
1005
1006 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1007 dev_dbg(musb->controller, "control NAK timeout\n");
1008
1009 /* NOTE: this code path would be a good place to PAUSE a
1010 * control transfer, if another one is queued, so that
1011 * ep0 is more likely to stay busy. That's already done
1012 * for bulk RX transfers.
1013 *
1014 * if (qh->ring.next != &musb->control), then
1015 * we have a candidate... NAKing is *NOT* an error
1016 */
1017 musb_writew(epio, MUSB_CSR0, 0);
1018 retval = IRQ_HANDLED;
1019 }
1020
1021 if (status) {
1022 dev_dbg(musb->controller, "aborting\n");
1023 retval = IRQ_HANDLED;
1024 if (urb)
1025 urb->status = status;
1026 complete = true;
1027
1028 /* use the proper sequence to abort the transfer */
1029 if (csr & MUSB_CSR0_H_REQPKT) {
1030 csr &= ~MUSB_CSR0_H_REQPKT;
1031 musb_writew(epio, MUSB_CSR0, csr);
1032 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1033 musb_writew(epio, MUSB_CSR0, csr);
1034 } else {
1035 musb_h_ep0_flush_fifo(hw_ep);
1036 }
1037
1038 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1039
1040 /* clear it */
1041 musb_writew(epio, MUSB_CSR0, 0);
1042 }
1043
1044 if (unlikely(!urb)) {
1045 /* stop endpoint since we have no place for its data, this
1046 * SHOULD NEVER HAPPEN! */
1047 ERR("no URB for end 0\n");
1048
1049 musb_h_ep0_flush_fifo(hw_ep);
1050 goto done;
1051 }
1052
1053 if (!complete) {
1054 /* call common logic and prepare response */
1055 if (musb_h_ep0_continue(musb, len, urb)) {
1056 /* more packets required */
1057 csr = (MUSB_EP0_IN == musb->ep0_stage)
1058 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1059 } else {
1060 /* data transfer complete; perform status phase */
1061 if (usb_pipeout(urb->pipe)
1062 || !urb->transfer_buffer_length)
1063 csr = MUSB_CSR0_H_STATUSPKT
1064 | MUSB_CSR0_H_REQPKT;
1065 else
1066 csr = MUSB_CSR0_H_STATUSPKT
1067 | MUSB_CSR0_TXPKTRDY;
1068
1069 /* flag status stage */
1070 musb->ep0_stage = MUSB_EP0_STATUS;
1071
1072 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1073
1074 }
1075 musb_writew(epio, MUSB_CSR0, csr);
1076 retval = IRQ_HANDLED;
1077 } else
1078 musb->ep0_stage = MUSB_EP0_IDLE;
1079
1080 /* call completion handler if done */
1081 if (complete)
1082 musb_advance_schedule(musb, urb, hw_ep, 1);
1083done:
1084 return retval;
1085}
1086
1087
1088#ifdef CONFIG_USB_INVENTRA_DMA
1089
1090/* Host side TX (OUT) using Mentor DMA works as follows:
1091 submit_urb ->
1092 - if queue was empty, Program Endpoint
1093 - ... which starts DMA to fifo in mode 1 or 0
1094
1095 DMA Isr (transfer complete) -> TxAvail()
1096 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1097 only in musb_cleanup_urb)
1098 - TxPktRdy has to be set in mode 0 or for
1099 short packets in mode 1.
1100*/
1101
1102#endif
1103
1104/* Service a Tx-Available or dma completion irq for the endpoint */
1105void musb_host_tx(struct musb *musb, u8 epnum)
1106{
1107 int pipe;
1108 bool done = false;
1109 u16 tx_csr;
1110 size_t length = 0;
1111 size_t offset = 0;
1112 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1113 void __iomem *epio = hw_ep->regs;
1114 struct musb_qh *qh = hw_ep->out_qh;
1115 struct urb *urb = next_urb(qh);
1116 u32 status = 0;
1117 void __iomem *mbase = musb->mregs;
1118 struct dma_channel *dma;
1119 bool transfer_pending = false;
1120
1121 musb_ep_select(mbase, epnum);
1122 tx_csr = musb_readw(epio, MUSB_TXCSR);
1123
1124 /* with CPPI, DMA sometimes triggers "extra" irqs */
1125 if (!urb) {
1126 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1127 return;
1128 }
1129
1130 pipe = urb->pipe;
1131 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1132 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1133 dma ? ", dma" : "");
1134
1135 /* check for errors */
1136 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1137 /* dma was disabled, fifo flushed */
1138 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1139
1140 /* stall; record URB status */
1141 status = -EPIPE;
1142
1143 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1144 /* (NON-ISO) dma was disabled, fifo flushed */
1145 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1146
1147 status = -ETIMEDOUT;
1148
1149 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1150 dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1151
1152 /* NOTE: this code path would be a good place to PAUSE a
1153 * transfer, if there's some other (nonperiodic) tx urb
1154 * that could use this fifo. (dma complicates it...)
1155 * That's already done for bulk RX transfers.
1156 *
1157 * if (bulk && qh->ring.next != &musb->out_bulk), then
1158 * we have a candidate... NAKing is *NOT* an error
1159 */
1160 musb_ep_select(mbase, epnum);
1161 musb_writew(epio, MUSB_TXCSR,
1162 MUSB_TXCSR_H_WZC_BITS
1163 | MUSB_TXCSR_TXPKTRDY);
1164 return;
1165 }
1166
1167 if (status) {
1168 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1169 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1170 (void) musb->dma_controller->channel_abort(dma);
1171 }
1172
1173 /* do the proper sequence to abort the transfer in the
1174 * usb core; the dma engine should already be stopped.
1175 */
1176 musb_h_tx_flush_fifo(hw_ep);
1177 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1178 | MUSB_TXCSR_DMAENAB
1179 | MUSB_TXCSR_H_ERROR
1180 | MUSB_TXCSR_H_RXSTALL
1181 | MUSB_TXCSR_H_NAKTIMEOUT
1182 );
1183
1184 musb_ep_select(mbase, epnum);
1185 musb_writew(epio, MUSB_TXCSR, tx_csr);
1186 /* REVISIT may need to clear FLUSHFIFO ... */
1187 musb_writew(epio, MUSB_TXCSR, tx_csr);
1188 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1189
1190 done = true;
1191 }
1192
1193 /* second cppi case */
1194 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1195 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1196 return;
1197 }
1198
1199 if (is_dma_capable() && dma && !status) {
1200 /*
1201 * DMA has completed. But if we're using DMA mode 1 (multi
1202 * packet DMA), we need a terminal TXPKTRDY interrupt before
1203 * we can consider this transfer completed, lest we trash
1204 * its last packet when writing the next URB's data. So we
1205 * switch back to mode 0 to get that interrupt; we'll come
1206 * back here once it happens.
1207 */
1208 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1209 /*
1210 * We shouldn't clear DMAMODE with DMAENAB set; so
1211 * clear them in a safe order. That should be OK
1212 * once TXPKTRDY has been set (and I've never seen
1213 * it being 0 at this moment -- DMA interrupt latency
1214 * is significant) but if it hasn't been then we have
1215 * no choice but to stop being polite and ignore the
1216 * programmer's guide... :-)
1217 *
1218 * Note that we must write TXCSR with TXPKTRDY cleared
1219 * in order not to re-trigger the packet send (this bit
1220 * can't be cleared by CPU), and there's another caveat:
1221 * TXPKTRDY may be set shortly and then cleared in the
1222 * double-buffered FIFO mode, so we do an extra TXCSR
1223 * read for debouncing...
1224 */
1225 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1226 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1227 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1228 MUSB_TXCSR_TXPKTRDY);
1229 musb_writew(epio, MUSB_TXCSR,
1230 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1231 }
1232 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1233 MUSB_TXCSR_TXPKTRDY);
1234 musb_writew(epio, MUSB_TXCSR,
1235 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1236
1237 /*
1238 * There is no guarantee that we'll get an interrupt
1239 * after clearing DMAMODE as we might have done this
1240 * too late (after TXPKTRDY was cleared by controller).
1241 * Re-read TXCSR as we have spoiled its previous value.
1242 */
1243 tx_csr = musb_readw(epio, MUSB_TXCSR);
1244 }
1245
1246 /*
1247 * We may get here from a DMA completion or TXPKTRDY interrupt.
1248 * In any case, we must check the FIFO status here and bail out
1249 * only if the FIFO still has data -- that should prevent the
1250 * "missed" TXPKTRDY interrupts and deal with double-buffered
1251 * FIFO mode too...
1252 */
1253 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1254 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1255 "CSR %04x\n", tx_csr);
1256 return;
1257 }
1258 }
1259
1260 if (!status || dma || usb_pipeisoc(pipe)) {
1261 if (dma)
1262 length = dma->actual_len;
1263 else
1264 length = qh->segsize;
1265 qh->offset += length;
1266
1267 if (usb_pipeisoc(pipe)) {
1268#ifndef __UBOOT__
1269 struct usb_iso_packet_descriptor *d;
1270
1271 d = urb->iso_frame_desc + qh->iso_idx;
1272 d->actual_length = length;
1273 d->status = status;
1274 if (++qh->iso_idx >= urb->number_of_packets) {
1275 done = true;
1276 } else {
1277 d++;
1278 offset = d->offset;
1279 length = d->length;
1280 }
1281#endif
1282 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1283 done = true;
1284 } else {
1285 /* see if we need to send more data, or ZLP */
1286 if (qh->segsize < qh->maxpacket)
1287 done = true;
1288 else if (qh->offset == urb->transfer_buffer_length
1289 && !(urb->transfer_flags
1290 & URB_ZERO_PACKET))
1291 done = true;
1292 if (!done) {
1293 offset = qh->offset;
1294 length = urb->transfer_buffer_length - offset;
1295 transfer_pending = true;
1296 }
1297 }
1298 }
1299
1300 /* urb->status != -EINPROGRESS means request has been faulted,
1301 * so we must abort this transfer after cleanup
1302 */
1303 if (urb->status != -EINPROGRESS) {
1304 done = true;
1305 if (status == 0)
1306 status = urb->status;
1307 }
1308
1309 if (done) {
1310 /* set status */
1311 urb->status = status;
1312 urb->actual_length = qh->offset;
1313 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1314 return;
1315 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1316 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1317 offset, length)) {
1318 if (is_cppi_enabled() || tusb_dma_omap())
1319 musb_h_tx_dma_start(hw_ep);
1320 return;
1321 }
1322 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1323 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1324 return;
1325 }
1326
1327 /*
1328 * PIO: start next packet in this URB.
1329 *
1330 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1331 * (and presumably, FIFO is not half-full) we should write *two*
1332 * packets before updating TXCSR; other docs disagree...
1333 */
1334 if (length > qh->maxpacket)
1335 length = qh->maxpacket;
1336 /* Unmap the buffer so that CPU can use it */
1337 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1338 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1339 qh->segsize = length;
1340
1341 musb_ep_select(mbase, epnum);
1342 musb_writew(epio, MUSB_TXCSR,
1343 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1344}
1345
1346
1347#ifdef CONFIG_USB_INVENTRA_DMA
1348
1349/* Host side RX (IN) using Mentor DMA works as follows:
1350 submit_urb ->
1351 - if queue was empty, ProgramEndpoint
1352 - first IN token is sent out (by setting ReqPkt)
1353 LinuxIsr -> RxReady()
1354 /\ => first packet is received
1355 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1356 | -> DMA Isr (transfer complete) -> RxReady()
1357 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1358 | - if urb not complete, send next IN token (ReqPkt)
1359 | | else complete urb.
1360 | |
1361 ---------------------------
1362 *
1363 * Nuances of mode 1:
1364 * For short packets, no ack (+RxPktRdy) is sent automatically
1365 * (even if AutoClear is ON)
1366 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1367 * automatically => major problem, as collecting the next packet becomes
1368 * difficult. Hence mode 1 is not used.
1369 *
1370 * REVISIT
1371 * All we care about at this driver level is that
1372 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1373 * (b) termination conditions are: short RX, or buffer full;
1374 * (c) fault modes include
1375 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1376 * (and that endpoint's dma queue stops immediately)
1377 * - overflow (full, PLUS more bytes in the terminal packet)
1378 *
1379 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1380 * thus be a great candidate for using mode 1 ... for all but the
1381 * last packet of one URB's transfer.
1382 */
1383
1384#endif
1385
1386/* Schedule next QH from musb->in_bulk and move the current qh to
1387 * the end; avoids starvation for other endpoints.
1388 */
1389static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1390{
1391 struct dma_channel *dma;
1392 struct urb *urb;
1393 void __iomem *mbase = musb->mregs;
1394 void __iomem *epio = ep->regs;
1395 struct musb_qh *cur_qh, *next_qh;
1396 u16 rx_csr;
1397
1398 musb_ep_select(mbase, ep->epnum);
1399 dma = is_dma_capable() ? ep->rx_channel : NULL;
1400
1401 /* clear nak timeout bit */
1402 rx_csr = musb_readw(epio, MUSB_RXCSR);
1403 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1404 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1405 musb_writew(epio, MUSB_RXCSR, rx_csr);
1406
1407 cur_qh = first_qh(&musb->in_bulk);
1408 if (cur_qh) {
1409 urb = next_urb(cur_qh);
1410 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1411 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1412 musb->dma_controller->channel_abort(dma);
1413 urb->actual_length += dma->actual_len;
1414 dma->actual_len = 0L;
1415 }
1416 musb_save_toggle(cur_qh, 1, urb);
1417
1418 /* move cur_qh to end of queue */
1419 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1420
1421 /* get the next qh from musb->in_bulk */
1422 next_qh = first_qh(&musb->in_bulk);
1423
1424 /* set rx_reinit and schedule the next qh */
1425 ep->rx_reinit = 1;
1426 musb_start_urb(musb, 1, next_qh);
1427 }
1428}
1429
1430/*
1431 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1432 * and high-bandwidth IN transfer cases.
1433 */
1434void musb_host_rx(struct musb *musb, u8 epnum)
1435{
1436 struct urb *urb;
1437 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1438 void __iomem *epio = hw_ep->regs;
1439 struct musb_qh *qh = hw_ep->in_qh;
1440 size_t xfer_len;
1441 void __iomem *mbase = musb->mregs;
1442 int pipe;
1443 u16 rx_csr, val;
1444 bool iso_err = false;
1445 bool done = false;
1446 u32 status;
1447 struct dma_channel *dma;
1448
1449 musb_ep_select(mbase, epnum);
1450
1451 urb = next_urb(qh);
1452 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1453 status = 0;
1454 xfer_len = 0;
1455
1456 rx_csr = musb_readw(epio, MUSB_RXCSR);
1457 val = rx_csr;
1458
1459 if (unlikely(!urb)) {
1460 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1461 * usbtest #11 (unlinks) triggers it regularly, sometimes
1462 * with fifo full. (Only with DMA??)
1463 */
1464 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1465 musb_readw(epio, MUSB_RXCOUNT));
1466 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1467 return;
1468 }
1469
1470 pipe = urb->pipe;
1471
1472 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1473 epnum, rx_csr, urb->actual_length,
1474 dma ? dma->actual_len : 0);
1475
1476 /* check for errors, concurrent stall & unlink is not really
1477 * handled yet! */
1478 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1479 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1480
1481 /* stall; record URB status */
1482 status = -EPIPE;
1483
1484 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1485 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1486
1487 status = -EPROTO;
1488 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1489
1490 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1491
1492 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1493 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1494
1495 /* NOTE: NAKing is *NOT* an error, so we want to
1496 * continue. Except ... if there's a request for
1497 * another QH, use that instead of starving it.
1498 *
1499 * Devices like Ethernet and serial adapters keep
1500 * reads posted at all times, which will starve
1501 * other devices without this logic.
1502 */
1503 if (usb_pipebulk(urb->pipe)
1504 && qh->mux == 1
1505 && !list_is_singular(&musb->in_bulk)) {
1506 musb_bulk_rx_nak_timeout(musb, hw_ep);
1507 return;
1508 }
1509 musb_ep_select(mbase, epnum);
1510 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1511 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1512 musb_writew(epio, MUSB_RXCSR, rx_csr);
1513
1514 goto finish;
1515 } else {
1516 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1517 /* packet error reported later */
1518 iso_err = true;
1519 }
1520 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1521 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1522 epnum);
1523 status = -EPROTO;
1524 }
1525
1526 /* faults abort the transfer */
1527 if (status) {
1528 /* clean up dma and collect transfer count */
1529 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1530 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1531 (void) musb->dma_controller->channel_abort(dma);
1532 xfer_len = dma->actual_len;
1533 }
1534 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1535 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1536 done = true;
1537 goto finish;
1538 }
1539
1540 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1541 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1542 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1543 goto finish;
1544 }
1545
1546 /* thorough shutdown for now ... given more precise fault handling
1547 * and better queueing support, we might keep a DMA pipeline going
1548 * while processing this irq for earlier completions.
1549 */
1550
1551 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1552
1553#ifndef CONFIG_USB_INVENTRA_DMA
1554 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1555 /* REVISIT this happened for a while on some short reads...
1556 * the cleanup still needs investigation... looks bad...
1557 * and also duplicates dma cleanup code above ... plus,
1558 * shouldn't this be the "half full" double buffer case?
1559 */
1560 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1561 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1562 (void) musb->dma_controller->channel_abort(dma);
1563 xfer_len = dma->actual_len;
1564 done = true;
1565 }
1566
1567 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1568 xfer_len, dma ? ", dma" : "");
1569 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1570
1571 musb_ep_select(mbase, epnum);
1572 musb_writew(epio, MUSB_RXCSR,
1573 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1574 }
1575#endif
1576 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1577 xfer_len = dma->actual_len;
1578
1579 val &= ~(MUSB_RXCSR_DMAENAB
1580 | MUSB_RXCSR_H_AUTOREQ
1581 | MUSB_RXCSR_AUTOCLEAR
1582 | MUSB_RXCSR_RXPKTRDY);
1583 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1584
1585#ifdef CONFIG_USB_INVENTRA_DMA
1586 if (usb_pipeisoc(pipe)) {
1587 struct usb_iso_packet_descriptor *d;
1588
1589 d = urb->iso_frame_desc + qh->iso_idx;
1590 d->actual_length = xfer_len;
1591
1592 /* even if there was an error, we did the dma
1593 * for iso_frame_desc->length
1594 */
1595 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1596 d->status = 0;
1597
1598 if (++qh->iso_idx >= urb->number_of_packets)
1599 done = true;
1600 else
1601 done = false;
1602
1603 } else {
1604 /* done if urb buffer is full or short packet is recd */
1605 done = (urb->actual_length + xfer_len >=
1606 urb->transfer_buffer_length
1607 || dma->actual_len < qh->maxpacket);
1608 }
1609
1610 /* send IN token for next packet, without AUTOREQ */
1611 if (!done) {
1612 val |= MUSB_RXCSR_H_REQPKT;
1613 musb_writew(epio, MUSB_RXCSR,
1614 MUSB_RXCSR_H_WZC_BITS | val);
1615 }
1616
1617 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1618 done ? "off" : "reset",
1619 musb_readw(epio, MUSB_RXCSR),
1620 musb_readw(epio, MUSB_RXCOUNT));
1621#else
1622 done = true;
1623#endif
1624 } else if (urb->status == -EINPROGRESS) {
1625 /* if no errors, be sure a packet is ready for unloading */
1626 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1627 status = -EPROTO;
1628 ERR("Rx interrupt with no errors or packet!\n");
1629
1630 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1631
1632/* SCRUB (RX) */
1633 /* do the proper sequence to abort the transfer */
1634 musb_ep_select(mbase, epnum);
1635 val &= ~MUSB_RXCSR_H_REQPKT;
1636 musb_writew(epio, MUSB_RXCSR, val);
1637 goto finish;
1638 }
1639
1640 /* we are expecting IN packets */
1641#ifdef CONFIG_USB_INVENTRA_DMA
1642 if (dma) {
1643 struct dma_controller *c;
1644 u16 rx_count;
1645 int ret, length;
1646 dma_addr_t buf;
1647
1648 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1649
1650 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1651 epnum, rx_count,
1652 urb->transfer_dma
1653 + urb->actual_length,
1654 qh->offset,
1655 urb->transfer_buffer_length);
1656
1657 c = musb->dma_controller;
1658
1659 if (usb_pipeisoc(pipe)) {
1660 int d_status = 0;
1661 struct usb_iso_packet_descriptor *d;
1662
1663 d = urb->iso_frame_desc + qh->iso_idx;
1664
1665 if (iso_err) {
1666 d_status = -EILSEQ;
1667 urb->error_count++;
1668 }
1669 if (rx_count > d->length) {
1670 if (d_status == 0) {
1671 d_status = -EOVERFLOW;
1672 urb->error_count++;
1673 }
1674 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1675 rx_count, d->length);
1676
1677 length = d->length;
1678 } else
1679 length = rx_count;
1680 d->status = d_status;
1681 buf = urb->transfer_dma + d->offset;
1682 } else {
1683 length = rx_count;
1684 buf = urb->transfer_dma +
1685 urb->actual_length;
1686 }
1687
1688 dma->desired_mode = 0;
1689#ifdef USE_MODE1
1690 /* because of the issue below, mode 1 will
1691 * only rarely behave with correct semantics.
1692 */
1693 if ((urb->transfer_flags &
1694 URB_SHORT_NOT_OK)
1695 && (urb->transfer_buffer_length -
1696 urb->actual_length)
1697 > qh->maxpacket)
1698 dma->desired_mode = 1;
1699 if (rx_count < hw_ep->max_packet_sz_rx) {
1700 length = rx_count;
1701 dma->desired_mode = 0;
1702 } else {
1703 length = urb->transfer_buffer_length;
1704 }
1705#endif
1706
1707/* Disadvantage of using mode 1:
1708 * It's basically usable only for mass storage class; essentially all
1709 * other protocols also terminate transfers on short packets.
1710 *
1711 * Details:
1712 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1713 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1714 * to use the extra IN token to grab the last packet using mode 0, then
1715 * the problem is that you cannot be sure when the device will send the
1716 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1717 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1718 * transfer, while sometimes it is recd just a little late so that if you
1719 * try to configure for mode 0 soon after the mode 1 transfer is
1720 * completed, you will find rxcount 0. Okay, so you might think why not
1721 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1722 */
1723
1724 val = musb_readw(epio, MUSB_RXCSR);
1725 val &= ~MUSB_RXCSR_H_REQPKT;
1726
1727 if (dma->desired_mode == 0)
1728 val &= ~MUSB_RXCSR_H_AUTOREQ;
1729 else
1730 val |= MUSB_RXCSR_H_AUTOREQ;
1731 val |= MUSB_RXCSR_DMAENAB;
1732
1733 /* autoclear shouldn't be set in high bandwidth */
1734 if (qh->hb_mult == 1)
1735 val |= MUSB_RXCSR_AUTOCLEAR;
1736
1737 musb_writew(epio, MUSB_RXCSR,
1738 MUSB_RXCSR_H_WZC_BITS | val);
1739
1740 /* REVISIT if when actual_length != 0,
1741 * transfer_buffer_length needs to be
1742 * adjusted first...
1743 */
1744 ret = c->channel_program(
1745 dma, qh->maxpacket,
1746 dma->desired_mode, buf, length);
1747
1748 if (!ret) {
1749 c->channel_release(dma);
1750 hw_ep->rx_channel = NULL;
1751 dma = NULL;
1752 val = musb_readw(epio, MUSB_RXCSR);
1753 val &= ~(MUSB_RXCSR_DMAENAB
1754 | MUSB_RXCSR_H_AUTOREQ
1755 | MUSB_RXCSR_AUTOCLEAR);
1756 musb_writew(epio, MUSB_RXCSR, val);
1757 }
1758 }
1759#endif /* Mentor DMA */
1760
1761 if (!dma) {
1762 /* Unmap the buffer so that CPU can use it */
1763 usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1764 done = musb_host_packet_rx(musb, urb,
1765 epnum, iso_err);
1766 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1767 }
1768 }
1769
1770finish:
1771 urb->actual_length += xfer_len;
1772 qh->offset += xfer_len;
1773 if (done) {
1774 if (urb->status == -EINPROGRESS)
1775 urb->status = status;
1776 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1777 }
1778}
1779
1780/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1781 * the software schedule associates multiple such nodes with a given
1782 * host side hardware endpoint + direction; scheduling may activate
1783 * that hardware endpoint.
1784 */
1785static int musb_schedule(
1786 struct musb *musb,
1787 struct musb_qh *qh,
1788 int is_in)
1789{
1790 int idle;
1791 int best_diff;
1792 int best_end, epnum;
1793 struct musb_hw_ep *hw_ep = NULL;
1794 struct list_head *head = NULL;
1795 u8 toggle;
1796 u8 txtype;
1797 struct urb *urb = next_urb(qh);
1798
1799 /* use fixed hardware for control and bulk */
1800 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1801 head = &musb->control;
1802 hw_ep = musb->control_ep;
1803 goto success;
1804 }
1805
1806 /* else, periodic transfers get muxed to other endpoints */
1807
1808 /*
1809 * We know this qh hasn't been scheduled, so all we need to do
1810 * is choose which hardware endpoint to put it on ...
1811 *
1812 * REVISIT what we really want here is a regular schedule tree
1813 * like e.g. OHCI uses.
1814 */
1815 best_diff = 4096;
1816 best_end = -1;
1817
1818 for (epnum = 1, hw_ep = musb->endpoints + 1;
1819 epnum < musb->nr_endpoints;
1820 epnum++, hw_ep++) {
1821 int diff;
1822
1823 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1824 continue;
1825
1826 if (hw_ep == musb->bulk_ep)
1827 continue;
1828
1829 if (is_in)
1830 diff = hw_ep->max_packet_sz_rx;
1831 else
1832 diff = hw_ep->max_packet_sz_tx;
1833 diff -= (qh->maxpacket * qh->hb_mult);
1834
1835 if (diff >= 0 && best_diff > diff) {
1836
1837 /*
1838 * Mentor controller has a bug in that if we schedule
1839 * a BULK Tx transfer on an endpoint that had earlier
1840 * handled ISOC then the BULK transfer has to start on
1841 * a zero toggle. If the BULK transfer starts on a 1
1842 * toggle then this transfer will fail as the mentor
1843 * controller starts the Bulk transfer on a 0 toggle
1844 * irrespective of the programming of the toggle bits
1845 * in the TXCSR register. Check for this condition
1846 * while allocating the EP for a Tx Bulk transfer. If
1847 * so skip this EP.
1848 */
1849 hw_ep = musb->endpoints + epnum;
1850 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1851 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1852 >> 4) & 0x3;
1853 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1854 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1855 continue;
1856
1857 best_diff = diff;
1858 best_end = epnum;
1859 }
1860 }
1861 /* use bulk reserved ep1 if no other ep is free */
1862 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1863 hw_ep = musb->bulk_ep;
1864 if (is_in)
1865 head = &musb->in_bulk;
1866 else
1867 head = &musb->out_bulk;
1868
1869 /* Enable bulk RX NAK timeout scheme when bulk requests are
1870 * multiplexed. This scheme doen't work in high speed to full
1871 * speed scenario as NAK interrupts are not coming from a
1872 * full speed device connected to a high speed device.
1873 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1874 * 4 (8 frame or 8ms) for FS device.
1875 */
1876 if (is_in && qh->dev)
1877 qh->intv_reg =
1878 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1879 goto success;
1880 } else if (best_end < 0) {
1881 return -ENOSPC;
1882 }
1883
1884 idle = 1;
1885 qh->mux = 0;
1886 hw_ep = musb->endpoints + best_end;
1887 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
1888success:
1889 if (head) {
1890 idle = list_empty(head);
1891 list_add_tail(&qh->ring, head);
1892 qh->mux = 1;
1893 }
1894 qh->hw_ep = hw_ep;
1895 qh->hep->hcpriv = qh;
1896 if (idle)
1897 musb_start_urb(musb, is_in, qh);
1898 return 0;
1899}
1900
1901#ifdef __UBOOT__
1902/* check if transaction translator is needed for device */
1903static int tt_needed(struct musb *musb, struct usb_device *dev)
1904{
1905 if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) &&
1906 (dev->speed < USB_SPEED_HIGH))
1907 return 1;
1908 return 0;
1909}
1910#endif
1911
1912#ifndef __UBOOT__
1913static int musb_urb_enqueue(
1914#else
1915int musb_urb_enqueue(
1916#endif
1917 struct usb_hcd *hcd,
1918 struct urb *urb,
1919 gfp_t mem_flags)
1920{
1921 unsigned long flags;
1922 struct musb *musb = hcd_to_musb(hcd);
1923 struct usb_host_endpoint *hep = urb->ep;
1924 struct musb_qh *qh;
1925 struct usb_endpoint_descriptor *epd = &hep->desc;
1926 int ret;
1927 unsigned type_reg;
1928 unsigned interval;
1929
1930 /* host role must be active */
1931 if (!is_host_active(musb) || !musb->is_active)
1932 return -ENODEV;
1933
1934 spin_lock_irqsave(&musb->lock, flags);
1935 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1936 qh = ret ? NULL : hep->hcpriv;
1937 if (qh)
1938 urb->hcpriv = qh;
1939 spin_unlock_irqrestore(&musb->lock, flags);
1940
1941 /* DMA mapping was already done, if needed, and this urb is on
1942 * hep->urb_list now ... so we're done, unless hep wasn't yet
1943 * scheduled onto a live qh.
1944 *
1945 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1946 * disabled, testing for empty qh->ring and avoiding qh setup costs
1947 * except for the first urb queued after a config change.
1948 */
1949 if (qh || ret)
1950 return ret;
1951
1952 /* Allocate and initialize qh, minimizing the work done each time
1953 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1954 *
1955 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1956 * for bugs in other kernel code to break this driver...
1957 */
1958 qh = kzalloc(sizeof *qh, mem_flags);
1959 if (!qh) {
1960 spin_lock_irqsave(&musb->lock, flags);
1961 usb_hcd_unlink_urb_from_ep(hcd, urb);
1962 spin_unlock_irqrestore(&musb->lock, flags);
1963 return -ENOMEM;
1964 }
1965
1966 qh->hep = hep;
1967 qh->dev = urb->dev;
1968 INIT_LIST_HEAD(&qh->ring);
1969 qh->is_ready = 1;
1970
1971 qh->maxpacket = usb_endpoint_maxp(epd);
1972 qh->type = usb_endpoint_type(epd);
1973
1974 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1975 * Some musb cores don't support high bandwidth ISO transfers; and
1976 * we don't (yet!) support high bandwidth interrupt transfers.
1977 */
1978 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
1979 if (qh->hb_mult > 1) {
1980 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
1981
1982 if (ok)
1983 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
1984 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
1985 if (!ok) {
1986 ret = -EMSGSIZE;
1987 goto done;
1988 }
1989 qh->maxpacket &= 0x7ff;
1990 }
1991
1992 qh->epnum = usb_endpoint_num(epd);
1993
1994 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1995 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1996
1997 /* precompute rxtype/txtype/type0 register */
1998 type_reg = (qh->type << 4) | qh->epnum;
1999 switch (urb->dev->speed) {
2000 case USB_SPEED_LOW:
2001 type_reg |= 0xc0;
2002 break;
2003 case USB_SPEED_FULL:
2004 type_reg |= 0x80;
2005 break;
2006 default:
2007 type_reg |= 0x40;
2008 }
2009 qh->type_reg = type_reg;
2010
2011 /* Precompute RXINTERVAL/TXINTERVAL register */
2012 switch (qh->type) {
2013 case USB_ENDPOINT_XFER_INT:
2014 /*
2015 * Full/low speeds use the linear encoding,
2016 * high speed uses the logarithmic encoding.
2017 */
2018 if (urb->dev->speed <= USB_SPEED_FULL) {
2019 interval = max_t(u8, epd->bInterval, 1);
2020 break;
2021 }
2022 /* FALLTHROUGH */
2023 case USB_ENDPOINT_XFER_ISOC:
2024 /* ISO always uses logarithmic encoding */
2025 interval = min_t(u8, epd->bInterval, 16);
2026 break;
2027 default:
2028 /* REVISIT we actually want to use NAK limits, hinting to the
2029 * transfer scheduling logic to try some other qh, e.g. try
2030 * for 2 msec first:
2031 *
2032 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2033 *
2034 * The downside of disabling this is that transfer scheduling
2035 * gets VERY unfair for nonperiodic transfers; a misbehaving
2036 * peripheral could make that hurt. That's perfectly normal
2037 * for reads from network or serial adapters ... so we have
2038 * partial NAKlimit support for bulk RX.
2039 *
2040 * The upside of disabling it is simpler transfer scheduling.
2041 */
2042 interval = 0;
2043 }
2044 qh->intv_reg = interval;
2045
2046 /* precompute addressing for external hub/tt ports */
2047 if (musb->is_multipoint) {
Hans de Goedee740ca32015-06-17 21:33:55 +02002048#ifndef __UBOOT__
Ilya Yanokeb819552012-11-06 13:48:21 +00002049 struct usb_device *parent = urb->dev->parent;
Hans de Goedee740ca32015-06-17 21:33:55 +02002050#else
2051 struct usb_device *parent = usb_dev_get_parent(urb->dev);
2052#endif
Ilya Yanokeb819552012-11-06 13:48:21 +00002053
2054#ifndef __UBOOT__
2055 if (parent != hcd->self.root_hub) {
2056#else
2057 if (parent) {
2058#endif
2059 qh->h_addr_reg = (u8) parent->devnum;
2060
2061#ifndef __UBOOT__
2062 /* set up tt info if needed */
2063 if (urb->dev->tt) {
2064 qh->h_port_reg = (u8) urb->dev->ttport;
2065 if (urb->dev->tt->hub)
2066 qh->h_addr_reg =
2067 (u8) urb->dev->tt->hub->devnum;
2068 if (urb->dev->tt->multi)
2069 qh->h_addr_reg |= 0x80;
2070 }
2071#else
2072 if (tt_needed(musb, urb->dev)) {
Stefan Brünsfaa7db22015-12-22 01:21:03 +01002073 uint8_t portnr = 0;
2074 uint8_t hubaddr = 0;
2075 usb_find_usb2_hub_address_port(urb->dev,
2076 &hubaddr,
2077 &portnr);
2078 qh->h_addr_reg = hubaddr;
Stefan Brünsac3abf02015-12-22 01:21:04 +01002079 qh->h_port_reg = portnr;
Ilya Yanokeb819552012-11-06 13:48:21 +00002080 }
2081#endif
2082 }
2083 }
2084
2085 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2086 * until we get real dma queues (with an entry for each urb/buffer),
2087 * we only have work to do in the former case.
2088 */
2089 spin_lock_irqsave(&musb->lock, flags);
2090 if (hep->hcpriv) {
2091 /* some concurrent activity submitted another urb to hep...
2092 * odd, rare, error prone, but legal.
2093 */
2094 kfree(qh);
2095 qh = NULL;
2096 ret = 0;
2097 } else
2098 ret = musb_schedule(musb, qh,
2099 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2100
2101 if (ret == 0) {
2102 urb->hcpriv = qh;
2103 /* FIXME set urb->start_frame for iso/intr, it's tested in
2104 * musb_start_urb(), but otherwise only konicawc cares ...
2105 */
2106 }
2107 spin_unlock_irqrestore(&musb->lock, flags);
2108
2109done:
2110 if (ret != 0) {
2111 spin_lock_irqsave(&musb->lock, flags);
2112 usb_hcd_unlink_urb_from_ep(hcd, urb);
2113 spin_unlock_irqrestore(&musb->lock, flags);
2114 kfree(qh);
2115 }
2116 return ret;
2117}
2118
Ilya Yanokeb819552012-11-06 13:48:21 +00002119/*
2120 * abort a transfer that's at the head of a hardware queue.
2121 * called with controller locked, irqs blocked
2122 * that hardware queue advances to the next transfer, unless prevented
2123 */
2124static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2125{
2126 struct musb_hw_ep *ep = qh->hw_ep;
2127 struct musb *musb = ep->musb;
2128 void __iomem *epio = ep->regs;
2129 unsigned hw_end = ep->epnum;
2130 void __iomem *regs = ep->musb->mregs;
2131 int is_in = usb_pipein(urb->pipe);
2132 int status = 0;
2133 u16 csr;
2134
2135 musb_ep_select(regs, hw_end);
2136
2137 if (is_dma_capable()) {
2138 struct dma_channel *dma;
2139
2140 dma = is_in ? ep->rx_channel : ep->tx_channel;
2141 if (dma) {
2142 status = ep->musb->dma_controller->channel_abort(dma);
2143 dev_dbg(musb->controller,
2144 "abort %cX%d DMA for urb %p --> %d\n",
2145 is_in ? 'R' : 'T', ep->epnum,
2146 urb, status);
2147 urb->actual_length += dma->actual_len;
2148 }
2149 }
2150
2151 /* turn off DMA requests, discard state, stop polling ... */
2152 if (ep->epnum && is_in) {
2153 /* giveback saves bulk toggle */
2154 csr = musb_h_flush_rxfifo(ep, 0);
2155
2156 /* REVISIT we still get an irq; should likely clear the
2157 * endpoint's irq status here to avoid bogus irqs.
2158 * clearing that status is platform-specific...
2159 */
2160 } else if (ep->epnum) {
2161 musb_h_tx_flush_fifo(ep);
2162 csr = musb_readw(epio, MUSB_TXCSR);
2163 csr &= ~(MUSB_TXCSR_AUTOSET
2164 | MUSB_TXCSR_DMAENAB
2165 | MUSB_TXCSR_H_RXSTALL
2166 | MUSB_TXCSR_H_NAKTIMEOUT
2167 | MUSB_TXCSR_H_ERROR
2168 | MUSB_TXCSR_TXPKTRDY);
2169 musb_writew(epio, MUSB_TXCSR, csr);
2170 /* REVISIT may need to clear FLUSHFIFO ... */
2171 musb_writew(epio, MUSB_TXCSR, csr);
2172 /* flush cpu writebuffer */
2173 csr = musb_readw(epio, MUSB_TXCSR);
2174 } else {
2175 musb_h_ep0_flush_fifo(ep);
2176 }
2177 if (status == 0)
2178 musb_advance_schedule(ep->musb, urb, ep, is_in);
2179 return status;
2180}
2181
Hans de Goedeb918a0c2015-01-11 20:34:52 +01002182#ifndef __UBOOT__
2183static int musb_urb_dequeue(
2184#else
2185int musb_urb_dequeue(
2186#endif
2187 struct usb_hcd *hcd,
2188 struct urb *urb,
2189 int status)
Ilya Yanokeb819552012-11-06 13:48:21 +00002190{
2191 struct musb *musb = hcd_to_musb(hcd);
2192 struct musb_qh *qh;
2193 unsigned long flags;
2194 int is_in = usb_pipein(urb->pipe);
2195 int ret;
2196
2197 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2198 usb_pipedevice(urb->pipe),
2199 usb_pipeendpoint(urb->pipe),
2200 is_in ? "in" : "out");
2201
2202 spin_lock_irqsave(&musb->lock, flags);
2203 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2204 if (ret)
2205 goto done;
2206
2207 qh = urb->hcpriv;
2208 if (!qh)
2209 goto done;
2210
2211 /*
2212 * Any URB not actively programmed into endpoint hardware can be
2213 * immediately given back; that's any URB not at the head of an
2214 * endpoint queue, unless someday we get real DMA queues. And even
2215 * if it's at the head, it might not be known to the hardware...
2216 *
2217 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2218 * has already been updated. This is a synchronous abort; it'd be
2219 * OK to hold off until after some IRQ, though.
2220 *
2221 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2222 */
2223 if (!qh->is_ready
2224 || urb->urb_list.prev != &qh->hep->urb_list
2225 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2226 int ready = qh->is_ready;
2227
2228 qh->is_ready = 0;
2229 musb_giveback(musb, urb, 0);
2230 qh->is_ready = ready;
2231
2232 /* If nothing else (usually musb_giveback) is using it
2233 * and its URB list has emptied, recycle this qh.
2234 */
2235 if (ready && list_empty(&qh->hep->urb_list)) {
2236 qh->hep->hcpriv = NULL;
2237 list_del(&qh->ring);
2238 kfree(qh);
2239 }
2240 } else
2241 ret = musb_cleanup_urb(urb, qh);
2242done:
2243 spin_unlock_irqrestore(&musb->lock, flags);
2244 return ret;
2245}
2246
Hans de Goedeb918a0c2015-01-11 20:34:52 +01002247#ifndef __UBOOT__
Ilya Yanokeb819552012-11-06 13:48:21 +00002248/* disable an endpoint */
2249static void
2250musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2251{
2252 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2253 unsigned long flags;
2254 struct musb *musb = hcd_to_musb(hcd);
2255 struct musb_qh *qh;
2256 struct urb *urb;
2257
2258 spin_lock_irqsave(&musb->lock, flags);
2259
2260 qh = hep->hcpriv;
2261 if (qh == NULL)
2262 goto exit;
2263
2264 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2265
2266 /* Kick the first URB off the hardware, if needed */
2267 qh->is_ready = 0;
2268 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2269 urb = next_urb(qh);
2270
2271 /* make software (then hardware) stop ASAP */
2272 if (!urb->unlinked)
2273 urb->status = -ESHUTDOWN;
2274
2275 /* cleanup */
2276 musb_cleanup_urb(urb, qh);
2277
2278 /* Then nuke all the others ... and advance the
2279 * queue on hw_ep (e.g. bulk ring) when we're done.
2280 */
2281 while (!list_empty(&hep->urb_list)) {
2282 urb = next_urb(qh);
2283 urb->status = -ESHUTDOWN;
2284 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2285 }
2286 } else {
2287 /* Just empty the queue; the hardware is busy with
2288 * other transfers, and since !qh->is_ready nothing
2289 * will activate any of these as it advances.
2290 */
2291 while (!list_empty(&hep->urb_list))
2292 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2293
2294 hep->hcpriv = NULL;
2295 list_del(&qh->ring);
2296 kfree(qh);
2297 }
2298exit:
2299 spin_unlock_irqrestore(&musb->lock, flags);
2300}
2301
2302static int musb_h_get_frame_number(struct usb_hcd *hcd)
2303{
2304 struct musb *musb = hcd_to_musb(hcd);
2305
2306 return musb_readw(musb->mregs, MUSB_FRAME);
2307}
2308
2309static int musb_h_start(struct usb_hcd *hcd)
2310{
2311 struct musb *musb = hcd_to_musb(hcd);
2312
2313 /* NOTE: musb_start() is called when the hub driver turns
2314 * on port power, or when (OTG) peripheral starts.
2315 */
2316 hcd->state = HC_STATE_RUNNING;
2317 musb->port1_status = 0;
2318 return 0;
2319}
2320
2321static void musb_h_stop(struct usb_hcd *hcd)
2322{
2323 musb_stop(hcd_to_musb(hcd));
2324 hcd->state = HC_STATE_HALT;
2325}
2326
2327static int musb_bus_suspend(struct usb_hcd *hcd)
2328{
2329 struct musb *musb = hcd_to_musb(hcd);
2330 u8 devctl;
2331
2332 if (!is_host_active(musb))
2333 return 0;
2334
2335 switch (musb->xceiv->state) {
2336 case OTG_STATE_A_SUSPEND:
2337 return 0;
2338 case OTG_STATE_A_WAIT_VRISE:
2339 /* ID could be grounded even if there's no device
2340 * on the other end of the cable. NOTE that the
2341 * A_WAIT_VRISE timers are messy with MUSB...
2342 */
2343 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2344 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2345 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2346 break;
2347 default:
2348 break;
2349 }
2350
2351 if (musb->is_active) {
2352 WARNING("trying to suspend as %s while active\n",
2353 otg_state_string(musb->xceiv->state));
2354 return -EBUSY;
2355 } else
2356 return 0;
2357}
2358
2359static int musb_bus_resume(struct usb_hcd *hcd)
2360{
2361 /* resuming child port does the work */
2362 return 0;
2363}
2364
2365const struct hc_driver musb_hc_driver = {
2366 .description = "musb-hcd",
2367 .product_desc = "MUSB HDRC host driver",
2368 .hcd_priv_size = sizeof(struct musb),
2369 .flags = HCD_USB2 | HCD_MEMORY,
2370
2371 /* not using irq handler or reset hooks from usbcore, since
2372 * those must be shared with peripheral code for OTG configs
2373 */
2374
2375 .start = musb_h_start,
2376 .stop = musb_h_stop,
2377
2378 .get_frame_number = musb_h_get_frame_number,
2379
2380 .urb_enqueue = musb_urb_enqueue,
2381 .urb_dequeue = musb_urb_dequeue,
2382 .endpoint_disable = musb_h_disable,
2383
2384 .hub_status_data = musb_hub_status_data,
2385 .hub_control = musb_hub_control,
2386 .bus_suspend = musb_bus_suspend,
2387 .bus_resume = musb_bus_resume,
2388 /* .start_port_reset = NULL, */
2389 /* .hub_irq_enable = NULL, */
2390};
2391#endif