blob: c704e6f26638e68fb3d9458054a95ad92bb20c94 [file] [log] [blame]
Ilya Yanokeb819552012-11-06 13:48:21 +00001/*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8 *
Tom Rini5b8031c2016-01-14 22:05:13 -05009 * SPDX-License-Identifier: GPL-2.0
Ilya Yanokeb819552012-11-06 13:48:21 +000010 */
11
Ilya Yanokeb819552012-11-06 13:48:21 +000012#ifndef __UBOOT__
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/timer.h>
16#include <linux/module.h>
17#include <linux/smp.h>
18#include <linux/spinlock.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/slab.h>
22#else
23#include <common.h>
24#include <linux/usb/ch9.h>
25#include "linux-compat.h"
26#endif
27
28#include "musb_core.h"
29
30
31/* MUSB PERIPHERAL status 3-mar-2006:
32 *
33 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
34 * Minor glitches:
35 *
36 * + remote wakeup to Linux hosts work, but saw USBCV failures;
37 * in one test run (operator error?)
38 * + endpoint halt tests -- in both usbtest and usbcv -- seem
39 * to break when dma is enabled ... is something wrongly
40 * clearing SENDSTALL?
41 *
42 * - Mass storage behaved ok when last tested. Network traffic patterns
43 * (with lots of short transfers etc) need retesting; they turn up the
44 * worst cases of the DMA, since short packets are typical but are not
45 * required.
46 *
47 * - TX/IN
48 * + both pio and dma behave in with network and g_zero tests
49 * + no cppi throughput issues other than no-hw-queueing
50 * + failed with FLAT_REG (DaVinci)
51 * + seems to behave with double buffering, PIO -and- CPPI
52 * + with gadgetfs + AIO, requests got lost?
53 *
54 * - RX/OUT
55 * + both pio and dma behave in with network and g_zero tests
56 * + dma is slow in typical case (short_not_ok is clear)
57 * + double buffering ok with PIO
58 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
59 * + request lossage observed with gadgetfs
60 *
61 * - ISO not tested ... might work, but only weakly isochronous
62 *
63 * - Gadget driver disabling of softconnect during bind() is ignored; so
64 * drivers can't hold off host requests until userspace is ready.
65 * (Workaround: they can turn it off later.)
66 *
67 * - PORTABILITY (assumes PIO works):
68 * + DaVinci, basically works with cppi dma
69 * + OMAP 2430, ditto with mentor dma
70 * + TUSB 6010, platform-specific dma in the works
71 */
72
73/* ----------------------------------------------------------------------- */
74
75#define is_buffer_mapped(req) (is_dma_capable() && \
76 (req->map_state != UN_MAPPED))
77
Paul Kocialkowski95de1e22015-08-04 17:04:06 +020078#ifndef CONFIG_USB_MUSB_PIO_ONLY
Ilya Yanokeb819552012-11-06 13:48:21 +000079/* Maps the buffer to dma */
80
81static inline void map_dma_buffer(struct musb_request *request,
82 struct musb *musb, struct musb_ep *musb_ep)
83{
84 int compatible = true;
85 struct dma_controller *dma = musb->dma_controller;
86
87 request->map_state = UN_MAPPED;
88
89 if (!is_dma_capable() || !musb_ep->dma)
90 return;
91
92 /* Check if DMA engine can handle this request.
93 * DMA code must reject the USB request explicitly.
94 * Default behaviour is to map the request.
95 */
96 if (dma->is_compatible)
97 compatible = dma->is_compatible(musb_ep->dma,
98 musb_ep->packet_sz, request->request.buf,
99 request->request.length);
100 if (!compatible)
101 return;
102
103 if (request->request.dma == DMA_ADDR_INVALID) {
104 request->request.dma = dma_map_single(
105 musb->controller,
106 request->request.buf,
107 request->request.length,
108 request->tx
109 ? DMA_TO_DEVICE
110 : DMA_FROM_DEVICE);
111 request->map_state = MUSB_MAPPED;
112 } else {
113 dma_sync_single_for_device(musb->controller,
114 request->request.dma,
115 request->request.length,
116 request->tx
117 ? DMA_TO_DEVICE
118 : DMA_FROM_DEVICE);
119 request->map_state = PRE_MAPPED;
120 }
121}
122
123/* Unmap the buffer from dma and maps it back to cpu */
124static inline void unmap_dma_buffer(struct musb_request *request,
125 struct musb *musb)
126{
127 if (!is_buffer_mapped(request))
128 return;
129
130 if (request->request.dma == DMA_ADDR_INVALID) {
131 dev_vdbg(musb->controller,
132 "not unmapping a never mapped buffer\n");
133 return;
134 }
135 if (request->map_state == MUSB_MAPPED) {
136 dma_unmap_single(musb->controller,
137 request->request.dma,
138 request->request.length,
139 request->tx
140 ? DMA_TO_DEVICE
141 : DMA_FROM_DEVICE);
142 request->request.dma = DMA_ADDR_INVALID;
143 } else { /* PRE_MAPPED */
144 dma_sync_single_for_cpu(musb->controller,
145 request->request.dma,
146 request->request.length,
147 request->tx
148 ? DMA_TO_DEVICE
149 : DMA_FROM_DEVICE);
150 }
151 request->map_state = UN_MAPPED;
152}
153#else
154static inline void map_dma_buffer(struct musb_request *request,
155 struct musb *musb, struct musb_ep *musb_ep)
156{
157}
158
159static inline void unmap_dma_buffer(struct musb_request *request,
160 struct musb *musb)
161{
162}
163#endif
164
165/*
166 * Immediately complete a request.
167 *
168 * @param request the request to complete
169 * @param status the status to complete the request with
170 * Context: controller locked, IRQs blocked.
171 */
172void musb_g_giveback(
173 struct musb_ep *ep,
174 struct usb_request *request,
175 int status)
176__releases(ep->musb->lock)
177__acquires(ep->musb->lock)
178{
179 struct musb_request *req;
180 struct musb *musb;
181 int busy = ep->busy;
182
183 req = to_musb_request(request);
184
185 list_del(&req->list);
186 if (req->request.status == -EINPROGRESS)
187 req->request.status = status;
188 musb = req->musb;
189
190 ep->busy = 1;
191 spin_unlock(&musb->lock);
192 unmap_dma_buffer(req, musb);
193 if (request->status == 0)
194 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
195 ep->end_point.name, request,
196 req->request.actual, req->request.length);
197 else
198 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
199 ep->end_point.name, request,
200 req->request.actual, req->request.length,
201 request->status);
202 req->request.complete(&req->ep->end_point, &req->request);
203 spin_lock(&musb->lock);
204 ep->busy = busy;
205}
206
207/* ----------------------------------------------------------------------- */
208
209/*
210 * Abort requests queued to an endpoint using the status. Synchronous.
211 * caller locked controller and blocked irqs, and selected this ep.
212 */
213static void nuke(struct musb_ep *ep, const int status)
214{
215 struct musb *musb = ep->musb;
216 struct musb_request *req = NULL;
217 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
218
219 ep->busy = 1;
220
221 if (is_dma_capable() && ep->dma) {
222 struct dma_controller *c = ep->musb->dma_controller;
223 int value;
224
225 if (ep->is_in) {
226 /*
227 * The programming guide says that we must not clear
228 * the DMAMODE bit before DMAENAB, so we only
229 * clear it in the second write...
230 */
231 musb_writew(epio, MUSB_TXCSR,
232 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
233 musb_writew(epio, MUSB_TXCSR,
234 0 | MUSB_TXCSR_FLUSHFIFO);
235 } else {
236 musb_writew(epio, MUSB_RXCSR,
237 0 | MUSB_RXCSR_FLUSHFIFO);
238 musb_writew(epio, MUSB_RXCSR,
239 0 | MUSB_RXCSR_FLUSHFIFO);
240 }
241
242 value = c->channel_abort(ep->dma);
243 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
244 ep->name, value);
245 c->channel_release(ep->dma);
246 ep->dma = NULL;
247 }
248
249 while (!list_empty(&ep->req_list)) {
250 req = list_first_entry(&ep->req_list, struct musb_request, list);
251 musb_g_giveback(ep, &req->request, status);
252 }
253}
254
255/* ----------------------------------------------------------------------- */
256
257/* Data transfers - pure PIO, pure DMA, or mixed mode */
258
259/*
260 * This assumes the separate CPPI engine is responding to DMA requests
261 * from the usb core ... sequenced a bit differently from mentor dma.
262 */
263
264static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
265{
266 if (can_bulk_split(musb, ep->type))
267 return ep->hw_ep->max_packet_sz_tx;
268 else
269 return ep->packet_sz;
270}
271
272
273#ifdef CONFIG_USB_INVENTRA_DMA
274
275/* Peripheral tx (IN) using Mentor DMA works as follows:
276 Only mode 0 is used for transfers <= wPktSize,
277 mode 1 is used for larger transfers,
278
279 One of the following happens:
280 - Host sends IN token which causes an endpoint interrupt
281 -> TxAvail
282 -> if DMA is currently busy, exit.
283 -> if queue is non-empty, txstate().
284
285 - Request is queued by the gadget driver.
286 -> if queue was previously empty, txstate()
287
288 txstate()
289 -> start
290 /\ -> setup DMA
291 | (data is transferred to the FIFO, then sent out when
292 | IN token(s) are recd from Host.
293 | -> DMA interrupt on completion
294 | calls TxAvail.
295 | -> stop DMA, ~DMAENAB,
296 | -> set TxPktRdy for last short pkt or zlp
297 | -> Complete Request
298 | -> Continue next request (call txstate)
299 |___________________________________|
300
301 * Non-Mentor DMA engines can of course work differently, such as by
302 * upleveling from irq-per-packet to irq-per-buffer.
303 */
304
305#endif
306
307/*
308 * An endpoint is transmitting data. This can be called either from
309 * the IRQ routine or from ep.queue() to kickstart a request on an
310 * endpoint.
311 *
312 * Context: controller locked, IRQs blocked, endpoint selected
313 */
314static void txstate(struct musb *musb, struct musb_request *req)
315{
316 u8 epnum = req->epnum;
317 struct musb_ep *musb_ep;
318 void __iomem *epio = musb->endpoints[epnum].regs;
319 struct usb_request *request;
320 u16 fifo_count = 0, csr;
321 int use_dma = 0;
322
323 musb_ep = req->ep;
324
325 /* Check if EP is disabled */
326 if (!musb_ep->desc) {
327 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
328 musb_ep->end_point.name);
329 return;
330 }
331
332 /* we shouldn't get here while DMA is active ... but we do ... */
333 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
334 dev_dbg(musb->controller, "dma pending...\n");
335 return;
336 }
337
338 /* read TXCSR before */
339 csr = musb_readw(epio, MUSB_TXCSR);
340
341 request = &req->request;
342 fifo_count = min(max_ep_writesize(musb, musb_ep),
343 (int)(request->length - request->actual));
344
345 if (csr & MUSB_TXCSR_TXPKTRDY) {
346 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
347 musb_ep->end_point.name, csr);
348 return;
349 }
350
351 if (csr & MUSB_TXCSR_P_SENDSTALL) {
352 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
353 musb_ep->end_point.name, csr);
354 return;
355 }
356
357 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
358 epnum, musb_ep->packet_sz, fifo_count,
359 csr);
360
Paul Kocialkowski95de1e22015-08-04 17:04:06 +0200361#ifndef CONFIG_USB_MUSB_PIO_ONLY
Ilya Yanokeb819552012-11-06 13:48:21 +0000362 if (is_buffer_mapped(req)) {
363 struct dma_controller *c = musb->dma_controller;
364 size_t request_size;
365
366 /* setup DMA, then program endpoint CSR */
367 request_size = min_t(size_t, request->length - request->actual,
368 musb_ep->dma->max_len);
369
370 use_dma = (request->dma != DMA_ADDR_INVALID);
371
372 /* MUSB_TXCSR_P_ISO is still set correctly */
373
374#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
375 {
376 if (request_size < musb_ep->packet_sz)
377 musb_ep->dma->desired_mode = 0;
378 else
379 musb_ep->dma->desired_mode = 1;
380
381 use_dma = use_dma && c->channel_program(
382 musb_ep->dma, musb_ep->packet_sz,
383 musb_ep->dma->desired_mode,
384 request->dma + request->actual, request_size);
385 if (use_dma) {
386 if (musb_ep->dma->desired_mode == 0) {
387 /*
388 * We must not clear the DMAMODE bit
389 * before the DMAENAB bit -- and the
390 * latter doesn't always get cleared
391 * before we get here...
392 */
393 csr &= ~(MUSB_TXCSR_AUTOSET
394 | MUSB_TXCSR_DMAENAB);
395 musb_writew(epio, MUSB_TXCSR, csr
396 | MUSB_TXCSR_P_WZC_BITS);
397 csr &= ~MUSB_TXCSR_DMAMODE;
398 csr |= (MUSB_TXCSR_DMAENAB |
399 MUSB_TXCSR_MODE);
400 /* against programming guide */
401 } else {
402 csr |= (MUSB_TXCSR_DMAENAB
403 | MUSB_TXCSR_DMAMODE
404 | MUSB_TXCSR_MODE);
405 if (!musb_ep->hb_mult)
406 csr |= MUSB_TXCSR_AUTOSET;
407 }
408 csr &= ~MUSB_TXCSR_P_UNDERRUN;
409
410 musb_writew(epio, MUSB_TXCSR, csr);
411 }
412 }
413
414#elif defined(CONFIG_USB_TI_CPPI_DMA)
415 /* program endpoint CSR first, then setup DMA */
416 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
417 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
418 MUSB_TXCSR_MODE;
419 musb_writew(epio, MUSB_TXCSR,
420 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
421 | csr);
422
423 /* ensure writebuffer is empty */
424 csr = musb_readw(epio, MUSB_TXCSR);
425
426 /* NOTE host side sets DMAENAB later than this; both are
427 * OK since the transfer dma glue (between CPPI and Mentor
428 * fifos) just tells CPPI it could start. Data only moves
429 * to the USB TX fifo when both fifos are ready.
430 */
431
432 /* "mode" is irrelevant here; handle terminating ZLPs like
433 * PIO does, since the hardware RNDIS mode seems unreliable
434 * except for the last-packet-is-already-short case.
435 */
436 use_dma = use_dma && c->channel_program(
437 musb_ep->dma, musb_ep->packet_sz,
438 0,
439 request->dma + request->actual,
440 request_size);
441 if (!use_dma) {
442 c->channel_release(musb_ep->dma);
443 musb_ep->dma = NULL;
444 csr &= ~MUSB_TXCSR_DMAENAB;
445 musb_writew(epio, MUSB_TXCSR, csr);
446 /* invariant: prequest->buf is non-null */
447 }
448#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
449 use_dma = use_dma && c->channel_program(
450 musb_ep->dma, musb_ep->packet_sz,
451 request->zero,
452 request->dma + request->actual,
453 request_size);
454#endif
455 }
456#endif
457
458 if (!use_dma) {
459 /*
460 * Unmap the dma buffer back to cpu if dma channel
461 * programming fails
462 */
463 unmap_dma_buffer(req, musb);
464
465 musb_write_fifo(musb_ep->hw_ep, fifo_count,
466 (u8 *) (request->buf + request->actual));
467 request->actual += fifo_count;
468 csr |= MUSB_TXCSR_TXPKTRDY;
469 csr &= ~MUSB_TXCSR_P_UNDERRUN;
470 musb_writew(epio, MUSB_TXCSR, csr);
471 }
472
473 /* host may already have the data when this message shows... */
474 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
475 musb_ep->end_point.name, use_dma ? "dma" : "pio",
476 request->actual, request->length,
477 musb_readw(epio, MUSB_TXCSR),
478 fifo_count,
479 musb_readw(epio, MUSB_TXMAXP));
480}
481
482/*
483 * FIFO state update (e.g. data ready).
484 * Called from IRQ, with controller locked.
485 */
486void musb_g_tx(struct musb *musb, u8 epnum)
487{
488 u16 csr;
489 struct musb_request *req;
490 struct usb_request *request;
491 u8 __iomem *mbase = musb->mregs;
492 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
493 void __iomem *epio = musb->endpoints[epnum].regs;
494 struct dma_channel *dma;
495
496 musb_ep_select(mbase, epnum);
497 req = next_request(musb_ep);
498 request = &req->request;
499
500 csr = musb_readw(epio, MUSB_TXCSR);
501 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
502
503 dma = is_dma_capable() ? musb_ep->dma : NULL;
504
505 /*
506 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
507 * probably rates reporting as a host error.
508 */
509 if (csr & MUSB_TXCSR_P_SENTSTALL) {
510 csr |= MUSB_TXCSR_P_WZC_BITS;
511 csr &= ~MUSB_TXCSR_P_SENTSTALL;
512 musb_writew(epio, MUSB_TXCSR, csr);
513 return;
514 }
515
516 if (csr & MUSB_TXCSR_P_UNDERRUN) {
517 /* We NAKed, no big deal... little reason to care. */
518 csr |= MUSB_TXCSR_P_WZC_BITS;
519 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
520 musb_writew(epio, MUSB_TXCSR, csr);
521 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
522 epnum, request);
523 }
524
525 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
526 /*
527 * SHOULD NOT HAPPEN... has with CPPI though, after
528 * changing SENDSTALL (and other cases); harmless?
529 */
530 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
531 return;
532 }
533
534 if (request) {
535 u8 is_dma = 0;
536
537 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
538 is_dma = 1;
539 csr |= MUSB_TXCSR_P_WZC_BITS;
540 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
541 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
542 musb_writew(epio, MUSB_TXCSR, csr);
543 /* Ensure writebuffer is empty. */
544 csr = musb_readw(epio, MUSB_TXCSR);
545 request->actual += musb_ep->dma->actual_len;
546 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
547 epnum, csr, musb_ep->dma->actual_len, request);
548 }
549
550 /*
551 * First, maybe a terminating short packet. Some DMA
552 * engines might handle this by themselves.
553 */
554 if ((request->zero && request->length
555 && (request->length % musb_ep->packet_sz == 0)
556 && (request->actual == request->length))
557#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
558 || (is_dma && (!dma->desired_mode ||
559 (request->actual &
560 (musb_ep->packet_sz - 1))))
561#endif
562 ) {
563 /*
564 * On DMA completion, FIFO may not be
565 * available yet...
566 */
567 if (csr & MUSB_TXCSR_TXPKTRDY)
568 return;
569
570 dev_dbg(musb->controller, "sending zero pkt\n");
571 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
572 | MUSB_TXCSR_TXPKTRDY);
573 request->zero = 0;
574 }
575
576 if (request->actual == request->length) {
577 musb_g_giveback(musb_ep, request, 0);
578 /*
579 * In the giveback function the MUSB lock is
580 * released and acquired after sometime. During
581 * this time period the INDEX register could get
582 * changed by the gadget_queue function especially
583 * on SMP systems. Reselect the INDEX to be sure
584 * we are reading/modifying the right registers
585 */
586 musb_ep_select(mbase, epnum);
587 req = musb_ep->desc ? next_request(musb_ep) : NULL;
588 if (!req) {
589 dev_dbg(musb->controller, "%s idle now\n",
590 musb_ep->end_point.name);
591 return;
592 }
593 }
594
595 txstate(musb, req);
596 }
597}
598
599/* ------------------------------------------------------------ */
600
601#ifdef CONFIG_USB_INVENTRA_DMA
602
603/* Peripheral rx (OUT) using Mentor DMA works as follows:
604 - Only mode 0 is used.
605
606 - Request is queued by the gadget class driver.
607 -> if queue was previously empty, rxstate()
608
609 - Host sends OUT token which causes an endpoint interrupt
610 /\ -> RxReady
611 | -> if request queued, call rxstate
612 | /\ -> setup DMA
613 | | -> DMA interrupt on completion
614 | | -> RxReady
615 | | -> stop DMA
616 | | -> ack the read
617 | | -> if data recd = max expected
618 | | by the request, or host
619 | | sent a short packet,
620 | | complete the request,
621 | | and start the next one.
622 | |_____________________________________|
623 | else just wait for the host
624 | to send the next OUT token.
625 |__________________________________________________|
626
627 * Non-Mentor DMA engines can of course work differently.
628 */
629
630#endif
631
632/*
633 * Context: controller locked, IRQs blocked, endpoint selected
634 */
635static void rxstate(struct musb *musb, struct musb_request *req)
636{
637 const u8 epnum = req->epnum;
638 struct usb_request *request = &req->request;
639 struct musb_ep *musb_ep;
640 void __iomem *epio = musb->endpoints[epnum].regs;
641 unsigned fifo_count = 0;
642 u16 len;
643 u16 csr = musb_readw(epio, MUSB_RXCSR);
644 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
645 u8 use_mode_1;
646
647 if (hw_ep->is_shared_fifo)
648 musb_ep = &hw_ep->ep_in;
649 else
650 musb_ep = &hw_ep->ep_out;
651
652 len = musb_ep->packet_sz;
653
654 /* Check if EP is disabled */
655 if (!musb_ep->desc) {
656 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
657 musb_ep->end_point.name);
658 return;
659 }
660
661 /* We shouldn't get here while DMA is active, but we do... */
662 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
663 dev_dbg(musb->controller, "DMA pending...\n");
664 return;
665 }
666
667 if (csr & MUSB_RXCSR_P_SENDSTALL) {
668 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
669 musb_ep->end_point.name, csr);
670 return;
671 }
672
673 if (is_cppi_enabled() && is_buffer_mapped(req)) {
674 struct dma_controller *c = musb->dma_controller;
675 struct dma_channel *channel = musb_ep->dma;
676
677 /* NOTE: CPPI won't actually stop advancing the DMA
678 * queue after short packet transfers, so this is almost
679 * always going to run as IRQ-per-packet DMA so that
680 * faults will be handled correctly.
681 */
682 if (c->channel_program(channel,
683 musb_ep->packet_sz,
684 !request->short_not_ok,
685 request->dma + request->actual,
686 request->length - request->actual)) {
687
688 /* make sure that if an rxpkt arrived after the irq,
689 * the cppi engine will be ready to take it as soon
690 * as DMA is enabled
691 */
692 csr &= ~(MUSB_RXCSR_AUTOCLEAR
693 | MUSB_RXCSR_DMAMODE);
694 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
695 musb_writew(epio, MUSB_RXCSR, csr);
696 return;
697 }
698 }
699
700 if (csr & MUSB_RXCSR_RXPKTRDY) {
701 len = musb_readw(epio, MUSB_RXCOUNT);
702
703 /*
704 * Enable Mode 1 on RX transfers only when short_not_ok flag
705 * is set. Currently short_not_ok flag is set only from
706 * file_storage and f_mass_storage drivers
707 */
708
709 if (request->short_not_ok && len == musb_ep->packet_sz)
710 use_mode_1 = 1;
711 else
712 use_mode_1 = 0;
713
714 if (request->actual < request->length) {
715#ifdef CONFIG_USB_INVENTRA_DMA
716 if (is_buffer_mapped(req)) {
717 struct dma_controller *c;
718 struct dma_channel *channel;
719 int use_dma = 0;
720
721 c = musb->dma_controller;
722 channel = musb_ep->dma;
723
724 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
725 * mode 0 only. So we do not get endpoint interrupts due to DMA
726 * completion. We only get interrupts from DMA controller.
727 *
728 * We could operate in DMA mode 1 if we knew the size of the tranfer
729 * in advance. For mass storage class, request->length = what the host
730 * sends, so that'd work. But for pretty much everything else,
731 * request->length is routinely more than what the host sends. For
732 * most these gadgets, end of is signified either by a short packet,
733 * or filling the last byte of the buffer. (Sending extra data in
734 * that last pckate should trigger an overflow fault.) But in mode 1,
735 * we don't get DMA completion interrupt for short packets.
736 *
737 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
738 * to get endpoint interrupt on every DMA req, but that didn't seem
739 * to work reliably.
740 *
741 * REVISIT an updated g_file_storage can set req->short_not_ok, which
742 * then becomes usable as a runtime "use mode 1" hint...
743 */
744
745 /* Experimental: Mode1 works with mass storage use cases */
746 if (use_mode_1) {
747 csr |= MUSB_RXCSR_AUTOCLEAR;
748 musb_writew(epio, MUSB_RXCSR, csr);
749 csr |= MUSB_RXCSR_DMAENAB;
750 musb_writew(epio, MUSB_RXCSR, csr);
751
752 /*
753 * this special sequence (enabling and then
754 * disabling MUSB_RXCSR_DMAMODE) is required
755 * to get DMAReq to activate
756 */
757 musb_writew(epio, MUSB_RXCSR,
758 csr | MUSB_RXCSR_DMAMODE);
759 musb_writew(epio, MUSB_RXCSR, csr);
760
761 } else {
762 if (!musb_ep->hb_mult &&
763 musb_ep->hw_ep->rx_double_buffered)
764 csr |= MUSB_RXCSR_AUTOCLEAR;
765 csr |= MUSB_RXCSR_DMAENAB;
766 musb_writew(epio, MUSB_RXCSR, csr);
767 }
768
769 if (request->actual < request->length) {
770 int transfer_size = 0;
771 if (use_mode_1) {
772 transfer_size = min(request->length - request->actual,
773 channel->max_len);
774 musb_ep->dma->desired_mode = 1;
775 } else {
776 transfer_size = min(request->length - request->actual,
777 (unsigned)len);
778 musb_ep->dma->desired_mode = 0;
779 }
780
781 use_dma = c->channel_program(
782 channel,
783 musb_ep->packet_sz,
784 channel->desired_mode,
785 request->dma
786 + request->actual,
787 transfer_size);
788 }
789
790 if (use_dma)
791 return;
792 }
793#elif defined(CONFIG_USB_UX500_DMA)
794 if ((is_buffer_mapped(req)) &&
795 (request->actual < request->length)) {
796
797 struct dma_controller *c;
798 struct dma_channel *channel;
799 int transfer_size = 0;
800
801 c = musb->dma_controller;
802 channel = musb_ep->dma;
803
804 /* In case first packet is short */
805 if (len < musb_ep->packet_sz)
806 transfer_size = len;
807 else if (request->short_not_ok)
808 transfer_size = min(request->length -
809 request->actual,
810 channel->max_len);
811 else
812 transfer_size = min(request->length -
813 request->actual,
814 (unsigned)len);
815
816 csr &= ~MUSB_RXCSR_DMAMODE;
817 csr |= (MUSB_RXCSR_DMAENAB |
818 MUSB_RXCSR_AUTOCLEAR);
819
820 musb_writew(epio, MUSB_RXCSR, csr);
821
822 if (transfer_size <= musb_ep->packet_sz) {
823 musb_ep->dma->desired_mode = 0;
824 } else {
825 musb_ep->dma->desired_mode = 1;
826 /* Mode must be set after DMAENAB */
827 csr |= MUSB_RXCSR_DMAMODE;
828 musb_writew(epio, MUSB_RXCSR, csr);
829 }
830
831 if (c->channel_program(channel,
832 musb_ep->packet_sz,
833 channel->desired_mode,
834 request->dma
835 + request->actual,
836 transfer_size))
837
838 return;
839 }
840#endif /* Mentor's DMA */
841
842 fifo_count = request->length - request->actual;
843 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
844 musb_ep->end_point.name,
845 len, fifo_count,
846 musb_ep->packet_sz);
847
848 fifo_count = min_t(unsigned, len, fifo_count);
849
850#ifdef CONFIG_USB_TUSB_OMAP_DMA
851 if (tusb_dma_omap() && is_buffer_mapped(req)) {
852 struct dma_controller *c = musb->dma_controller;
853 struct dma_channel *channel = musb_ep->dma;
854 u32 dma_addr = request->dma + request->actual;
855 int ret;
856
857 ret = c->channel_program(channel,
858 musb_ep->packet_sz,
859 channel->desired_mode,
860 dma_addr,
861 fifo_count);
862 if (ret)
863 return;
864 }
865#endif
866 /*
867 * Unmap the dma buffer back to cpu if dma channel
868 * programming fails. This buffer is mapped if the
869 * channel allocation is successful
870 */
871 if (is_buffer_mapped(req)) {
872 unmap_dma_buffer(req, musb);
873
874 /*
875 * Clear DMAENAB and AUTOCLEAR for the
876 * PIO mode transfer
877 */
878 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
879 musb_writew(epio, MUSB_RXCSR, csr);
880 }
881
882 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
883 (request->buf + request->actual));
884 request->actual += fifo_count;
885
886 /* REVISIT if we left anything in the fifo, flush
887 * it and report -EOVERFLOW
888 */
889
890 /* ack the read! */
891 csr |= MUSB_RXCSR_P_WZC_BITS;
892 csr &= ~MUSB_RXCSR_RXPKTRDY;
893 musb_writew(epio, MUSB_RXCSR, csr);
894 }
895 }
896
897 /* reach the end or short packet detected */
898 if (request->actual == request->length || len < musb_ep->packet_sz)
899 musb_g_giveback(musb_ep, request, 0);
900}
901
902/*
903 * Data ready for a request; called from IRQ
904 */
905void musb_g_rx(struct musb *musb, u8 epnum)
906{
907 u16 csr;
908 struct musb_request *req;
909 struct usb_request *request;
910 void __iomem *mbase = musb->mregs;
911 struct musb_ep *musb_ep;
912 void __iomem *epio = musb->endpoints[epnum].regs;
913 struct dma_channel *dma;
914 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
915
916 if (hw_ep->is_shared_fifo)
917 musb_ep = &hw_ep->ep_in;
918 else
919 musb_ep = &hw_ep->ep_out;
920
921 musb_ep_select(mbase, epnum);
922
923 req = next_request(musb_ep);
924 if (!req)
925 return;
926
927 request = &req->request;
928
929 csr = musb_readw(epio, MUSB_RXCSR);
930 dma = is_dma_capable() ? musb_ep->dma : NULL;
931
932 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
933 csr, dma ? " (dma)" : "", request);
934
935 if (csr & MUSB_RXCSR_P_SENTSTALL) {
936 csr |= MUSB_RXCSR_P_WZC_BITS;
937 csr &= ~MUSB_RXCSR_P_SENTSTALL;
938 musb_writew(epio, MUSB_RXCSR, csr);
939 return;
940 }
941
942 if (csr & MUSB_RXCSR_P_OVERRUN) {
943 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
944 csr &= ~MUSB_RXCSR_P_OVERRUN;
945 musb_writew(epio, MUSB_RXCSR, csr);
946
947 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
948 if (request->status == -EINPROGRESS)
949 request->status = -EOVERFLOW;
950 }
951 if (csr & MUSB_RXCSR_INCOMPRX) {
952 /* REVISIT not necessarily an error */
953 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
954 }
955
956 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
957 /* "should not happen"; likely RXPKTRDY pending for DMA */
958 dev_dbg(musb->controller, "%s busy, csr %04x\n",
959 musb_ep->end_point.name, csr);
960 return;
961 }
962
963 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
964 csr &= ~(MUSB_RXCSR_AUTOCLEAR
965 | MUSB_RXCSR_DMAENAB
966 | MUSB_RXCSR_DMAMODE);
967 musb_writew(epio, MUSB_RXCSR,
968 MUSB_RXCSR_P_WZC_BITS | csr);
969
970 request->actual += musb_ep->dma->actual_len;
971
972 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
973 epnum, csr,
974 musb_readw(epio, MUSB_RXCSR),
975 musb_ep->dma->actual_len, request);
976
977#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
978 defined(CONFIG_USB_UX500_DMA)
979 /* Autoclear doesn't clear RxPktRdy for short packets */
980 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
981 || (dma->actual_len
982 & (musb_ep->packet_sz - 1))) {
983 /* ack the read! */
984 csr &= ~MUSB_RXCSR_RXPKTRDY;
985 musb_writew(epio, MUSB_RXCSR, csr);
986 }
987
988 /* incomplete, and not short? wait for next IN packet */
989 if ((request->actual < request->length)
990 && (musb_ep->dma->actual_len
991 == musb_ep->packet_sz)) {
992 /* In double buffer case, continue to unload fifo if
993 * there is Rx packet in FIFO.
994 **/
995 csr = musb_readw(epio, MUSB_RXCSR);
996 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
997 hw_ep->rx_double_buffered)
998 goto exit;
999 return;
1000 }
1001#endif
1002 musb_g_giveback(musb_ep, request, 0);
1003 /*
1004 * In the giveback function the MUSB lock is
1005 * released and acquired after sometime. During
1006 * this time period the INDEX register could get
1007 * changed by the gadget_queue function especially
1008 * on SMP systems. Reselect the INDEX to be sure
1009 * we are reading/modifying the right registers
1010 */
1011 musb_ep_select(mbase, epnum);
1012
1013 req = next_request(musb_ep);
1014 if (!req)
1015 return;
1016 }
1017#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1018 defined(CONFIG_USB_UX500_DMA)
1019exit:
1020#endif
1021 /* Analyze request */
1022 rxstate(musb, req);
1023}
1024
1025/* ------------------------------------------------------------ */
1026
1027static int musb_gadget_enable(struct usb_ep *ep,
1028 const struct usb_endpoint_descriptor *desc)
1029{
1030 unsigned long flags;
1031 struct musb_ep *musb_ep;
1032 struct musb_hw_ep *hw_ep;
1033 void __iomem *regs;
1034 struct musb *musb;
1035 void __iomem *mbase;
1036 u8 epnum;
1037 u16 csr;
1038 unsigned tmp;
1039 int status = -EINVAL;
1040
1041 if (!ep || !desc)
1042 return -EINVAL;
1043
1044 musb_ep = to_musb_ep(ep);
1045 hw_ep = musb_ep->hw_ep;
1046 regs = hw_ep->regs;
1047 musb = musb_ep->musb;
1048 mbase = musb->mregs;
1049 epnum = musb_ep->current_epnum;
1050
1051 spin_lock_irqsave(&musb->lock, flags);
1052
1053 if (musb_ep->desc) {
1054 status = -EBUSY;
1055 goto fail;
1056 }
1057 musb_ep->type = usb_endpoint_type(desc);
1058
1059 /* check direction and (later) maxpacket size against endpoint */
1060 if (usb_endpoint_num(desc) != epnum)
1061 goto fail;
1062
1063 /* REVISIT this rules out high bandwidth periodic transfers */
1064 tmp = usb_endpoint_maxp(desc);
1065 if (tmp & ~0x07ff) {
1066 int ok;
1067
1068 if (usb_endpoint_dir_in(desc))
1069 ok = musb->hb_iso_tx;
1070 else
1071 ok = musb->hb_iso_rx;
1072
1073 if (!ok) {
1074 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1075 goto fail;
1076 }
1077 musb_ep->hb_mult = (tmp >> 11) & 3;
1078 } else {
1079 musb_ep->hb_mult = 0;
1080 }
1081
1082 musb_ep->packet_sz = tmp & 0x7ff;
1083 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1084
1085 /* enable the interrupts for the endpoint, set the endpoint
1086 * packet size (or fail), set the mode, clear the fifo
1087 */
1088 musb_ep_select(mbase, epnum);
1089 if (usb_endpoint_dir_in(desc)) {
1090 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1091
1092 if (hw_ep->is_shared_fifo)
1093 musb_ep->is_in = 1;
1094 if (!musb_ep->is_in)
1095 goto fail;
1096
1097 if (tmp > hw_ep->max_packet_sz_tx) {
1098 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1099 goto fail;
1100 }
1101
1102 int_txe |= (1 << epnum);
1103 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1104
1105 /* REVISIT if can_bulk_split(), use by updating "tmp";
1106 * likewise high bandwidth periodic tx
1107 */
1108 /* Set TXMAXP with the FIFO size of the endpoint
1109 * to disable double buffering mode.
1110 */
1111 if (musb->double_buffer_not_ok)
1112 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1113 else
1114 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1115 | (musb_ep->hb_mult << 11));
1116
1117 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1118 if (musb_readw(regs, MUSB_TXCSR)
1119 & MUSB_TXCSR_FIFONOTEMPTY)
1120 csr |= MUSB_TXCSR_FLUSHFIFO;
1121 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1122 csr |= MUSB_TXCSR_P_ISO;
1123
1124 /* set twice in case of double buffering */
1125 musb_writew(regs, MUSB_TXCSR, csr);
1126 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1127 musb_writew(regs, MUSB_TXCSR, csr);
1128
1129 } else {
1130 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1131
1132 if (hw_ep->is_shared_fifo)
1133 musb_ep->is_in = 0;
1134 if (musb_ep->is_in)
1135 goto fail;
1136
1137 if (tmp > hw_ep->max_packet_sz_rx) {
1138 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1139 goto fail;
1140 }
1141
1142 int_rxe |= (1 << epnum);
1143 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1144
1145 /* REVISIT if can_bulk_combine() use by updating "tmp"
1146 * likewise high bandwidth periodic rx
1147 */
1148 /* Set RXMAXP with the FIFO size of the endpoint
1149 * to disable double buffering mode.
1150 */
1151 if (musb->double_buffer_not_ok)
1152 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1153 else
1154 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1155 | (musb_ep->hb_mult << 11));
1156
1157 /* force shared fifo to OUT-only mode */
1158 if (hw_ep->is_shared_fifo) {
1159 csr = musb_readw(regs, MUSB_TXCSR);
1160 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1161 musb_writew(regs, MUSB_TXCSR, csr);
1162 }
1163
1164 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1165 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1166 csr |= MUSB_RXCSR_P_ISO;
1167 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1168 csr |= MUSB_RXCSR_DISNYET;
1169
1170 /* set twice in case of double buffering */
1171 musb_writew(regs, MUSB_RXCSR, csr);
1172 musb_writew(regs, MUSB_RXCSR, csr);
1173 }
1174
1175 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1176 * for some reason you run out of channels here.
1177 */
1178 if (is_dma_capable() && musb->dma_controller) {
1179 struct dma_controller *c = musb->dma_controller;
1180
1181 musb_ep->dma = c->channel_alloc(c, hw_ep,
1182 (desc->bEndpointAddress & USB_DIR_IN));
1183 } else
1184 musb_ep->dma = NULL;
1185
1186 musb_ep->desc = desc;
1187 musb_ep->busy = 0;
1188 musb_ep->wedged = 0;
1189 status = 0;
1190
1191 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1192 musb_driver_name, musb_ep->end_point.name,
1193 ({ char *s; switch (musb_ep->type) {
1194 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1195 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1196 default: s = "iso"; break;
1197 }; s; }),
1198 musb_ep->is_in ? "IN" : "OUT",
1199 musb_ep->dma ? "dma, " : "",
1200 musb_ep->packet_sz);
1201
1202 schedule_work(&musb->irq_work);
1203
1204fail:
1205 spin_unlock_irqrestore(&musb->lock, flags);
1206 return status;
1207}
1208
1209/*
1210 * Disable an endpoint flushing all requests queued.
1211 */
1212static int musb_gadget_disable(struct usb_ep *ep)
1213{
1214 unsigned long flags;
1215 struct musb *musb;
1216 u8 epnum;
1217 struct musb_ep *musb_ep;
1218 void __iomem *epio;
1219 int status = 0;
1220
1221 musb_ep = to_musb_ep(ep);
1222 musb = musb_ep->musb;
1223 epnum = musb_ep->current_epnum;
1224 epio = musb->endpoints[epnum].regs;
1225
1226 spin_lock_irqsave(&musb->lock, flags);
1227 musb_ep_select(musb->mregs, epnum);
1228
1229 /* zero the endpoint sizes */
1230 if (musb_ep->is_in) {
1231 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1232 int_txe &= ~(1 << epnum);
1233 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1234 musb_writew(epio, MUSB_TXMAXP, 0);
1235 } else {
1236 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1237 int_rxe &= ~(1 << epnum);
1238 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1239 musb_writew(epio, MUSB_RXMAXP, 0);
1240 }
1241
1242 musb_ep->desc = NULL;
1243#ifndef __UBOOT__
1244 musb_ep->end_point.desc = NULL;
1245#endif
1246
1247 /* abort all pending DMA and requests */
1248 nuke(musb_ep, -ESHUTDOWN);
1249
1250 schedule_work(&musb->irq_work);
1251
1252 spin_unlock_irqrestore(&(musb->lock), flags);
1253
1254 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1255
1256 return status;
1257}
1258
1259/*
1260 * Allocate a request for an endpoint.
1261 * Reused by ep0 code.
1262 */
1263struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1264{
1265 struct musb_ep *musb_ep = to_musb_ep(ep);
1266 struct musb *musb = musb_ep->musb;
1267 struct musb_request *request = NULL;
1268
1269 request = kzalloc(sizeof *request, gfp_flags);
1270 if (!request) {
1271 dev_dbg(musb->controller, "not enough memory\n");
1272 return NULL;
1273 }
1274
1275 request->request.dma = DMA_ADDR_INVALID;
1276 request->epnum = musb_ep->current_epnum;
1277 request->ep = musb_ep;
1278
1279 return &request->request;
1280}
1281
1282/*
1283 * Free a request
1284 * Reused by ep0 code.
1285 */
1286void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1287{
1288 kfree(to_musb_request(req));
1289}
1290
1291static LIST_HEAD(buffers);
1292
1293struct free_record {
1294 struct list_head list;
1295 struct device *dev;
1296 unsigned bytes;
1297 dma_addr_t dma;
1298};
1299
1300/*
1301 * Context: controller locked, IRQs blocked.
1302 */
1303void musb_ep_restart(struct musb *musb, struct musb_request *req)
1304{
1305 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1306 req->tx ? "TX/IN" : "RX/OUT",
1307 &req->request, req->request.length, req->epnum);
1308
1309 musb_ep_select(musb->mregs, req->epnum);
1310 if (req->tx)
1311 txstate(musb, req);
1312 else
1313 rxstate(musb, req);
1314}
1315
1316static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1317 gfp_t gfp_flags)
1318{
1319 struct musb_ep *musb_ep;
1320 struct musb_request *request;
1321 struct musb *musb;
1322 int status = 0;
1323 unsigned long lockflags;
1324
1325 if (!ep || !req)
1326 return -EINVAL;
1327 if (!req->buf)
1328 return -ENODATA;
1329
1330 musb_ep = to_musb_ep(ep);
1331 musb = musb_ep->musb;
1332
1333 request = to_musb_request(req);
1334 request->musb = musb;
1335
1336 if (request->ep != musb_ep)
1337 return -EINVAL;
1338
1339 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1340
1341 /* request is mine now... */
1342 request->request.actual = 0;
1343 request->request.status = -EINPROGRESS;
1344 request->epnum = musb_ep->current_epnum;
1345 request->tx = musb_ep->is_in;
1346
1347 map_dma_buffer(request, musb, musb_ep);
1348
1349 spin_lock_irqsave(&musb->lock, lockflags);
1350
1351 /* don't queue if the ep is down */
1352 if (!musb_ep->desc) {
1353 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1354 req, ep->name, "disabled");
1355 status = -ESHUTDOWN;
1356 goto cleanup;
1357 }
1358
1359 /* add request to the list */
1360 list_add_tail(&request->list, &musb_ep->req_list);
1361
1362 /* it this is the head of the queue, start i/o ... */
1363 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1364 musb_ep_restart(musb, request);
1365
1366cleanup:
1367 spin_unlock_irqrestore(&musb->lock, lockflags);
1368 return status;
1369}
1370
1371static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1372{
1373 struct musb_ep *musb_ep = to_musb_ep(ep);
1374 struct musb_request *req = to_musb_request(request);
1375 struct musb_request *r;
1376 unsigned long flags;
1377 int status = 0;
1378 struct musb *musb = musb_ep->musb;
1379
1380 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1381 return -EINVAL;
1382
1383 spin_lock_irqsave(&musb->lock, flags);
1384
1385 list_for_each_entry(r, &musb_ep->req_list, list) {
1386 if (r == req)
1387 break;
1388 }
1389 if (r != req) {
1390 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1391 status = -EINVAL;
1392 goto done;
1393 }
1394
1395 /* if the hardware doesn't have the request, easy ... */
1396 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1397 musb_g_giveback(musb_ep, request, -ECONNRESET);
1398
1399 /* ... else abort the dma transfer ... */
1400 else if (is_dma_capable() && musb_ep->dma) {
1401 struct dma_controller *c = musb->dma_controller;
1402
1403 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1404 if (c->channel_abort)
1405 status = c->channel_abort(musb_ep->dma);
1406 else
1407 status = -EBUSY;
1408 if (status == 0)
1409 musb_g_giveback(musb_ep, request, -ECONNRESET);
1410 } else {
1411 /* NOTE: by sticking to easily tested hardware/driver states,
1412 * we leave counting of in-flight packets imprecise.
1413 */
1414 musb_g_giveback(musb_ep, request, -ECONNRESET);
1415 }
1416
1417done:
1418 spin_unlock_irqrestore(&musb->lock, flags);
1419 return status;
1420}
1421
1422/*
1423 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1424 * data but will queue requests.
1425 *
1426 * exported to ep0 code
1427 */
1428static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1429{
1430 struct musb_ep *musb_ep = to_musb_ep(ep);
1431 u8 epnum = musb_ep->current_epnum;
1432 struct musb *musb = musb_ep->musb;
1433 void __iomem *epio = musb->endpoints[epnum].regs;
1434 void __iomem *mbase;
1435 unsigned long flags;
1436 u16 csr;
1437 struct musb_request *request;
1438 int status = 0;
1439
1440 if (!ep)
1441 return -EINVAL;
1442 mbase = musb->mregs;
1443
1444 spin_lock_irqsave(&musb->lock, flags);
1445
1446 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1447 status = -EINVAL;
1448 goto done;
1449 }
1450
1451 musb_ep_select(mbase, epnum);
1452
1453 request = next_request(musb_ep);
1454 if (value) {
1455 if (request) {
1456 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1457 ep->name);
1458 status = -EAGAIN;
1459 goto done;
1460 }
1461 /* Cannot portably stall with non-empty FIFO */
1462 if (musb_ep->is_in) {
1463 csr = musb_readw(epio, MUSB_TXCSR);
1464 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1465 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1466 status = -EAGAIN;
1467 goto done;
1468 }
1469 }
1470 } else
1471 musb_ep->wedged = 0;
1472
1473 /* set/clear the stall and toggle bits */
1474 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1475 if (musb_ep->is_in) {
1476 csr = musb_readw(epio, MUSB_TXCSR);
1477 csr |= MUSB_TXCSR_P_WZC_BITS
1478 | MUSB_TXCSR_CLRDATATOG;
1479 if (value)
1480 csr |= MUSB_TXCSR_P_SENDSTALL;
1481 else
1482 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1483 | MUSB_TXCSR_P_SENTSTALL);
1484 csr &= ~MUSB_TXCSR_TXPKTRDY;
1485 musb_writew(epio, MUSB_TXCSR, csr);
1486 } else {
1487 csr = musb_readw(epio, MUSB_RXCSR);
1488 csr |= MUSB_RXCSR_P_WZC_BITS
1489 | MUSB_RXCSR_FLUSHFIFO
1490 | MUSB_RXCSR_CLRDATATOG;
1491 if (value)
1492 csr |= MUSB_RXCSR_P_SENDSTALL;
1493 else
1494 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1495 | MUSB_RXCSR_P_SENTSTALL);
1496 musb_writew(epio, MUSB_RXCSR, csr);
1497 }
1498
1499 /* maybe start the first request in the queue */
1500 if (!musb_ep->busy && !value && request) {
1501 dev_dbg(musb->controller, "restarting the request\n");
1502 musb_ep_restart(musb, request);
1503 }
1504
1505done:
1506 spin_unlock_irqrestore(&musb->lock, flags);
1507 return status;
1508}
1509
1510#ifndef __UBOOT__
1511/*
1512 * Sets the halt feature with the clear requests ignored
1513 */
1514static int musb_gadget_set_wedge(struct usb_ep *ep)
1515{
1516 struct musb_ep *musb_ep = to_musb_ep(ep);
1517
1518 if (!ep)
1519 return -EINVAL;
1520
1521 musb_ep->wedged = 1;
1522
1523 return usb_ep_set_halt(ep);
1524}
1525#endif
1526
1527static int musb_gadget_fifo_status(struct usb_ep *ep)
1528{
1529 struct musb_ep *musb_ep = to_musb_ep(ep);
1530 void __iomem *epio = musb_ep->hw_ep->regs;
1531 int retval = -EINVAL;
1532
1533 if (musb_ep->desc && !musb_ep->is_in) {
1534 struct musb *musb = musb_ep->musb;
1535 int epnum = musb_ep->current_epnum;
1536 void __iomem *mbase = musb->mregs;
1537 unsigned long flags;
1538
1539 spin_lock_irqsave(&musb->lock, flags);
1540
1541 musb_ep_select(mbase, epnum);
1542 /* FIXME return zero unless RXPKTRDY is set */
1543 retval = musb_readw(epio, MUSB_RXCOUNT);
1544
1545 spin_unlock_irqrestore(&musb->lock, flags);
1546 }
1547 return retval;
1548}
1549
1550static void musb_gadget_fifo_flush(struct usb_ep *ep)
1551{
1552 struct musb_ep *musb_ep = to_musb_ep(ep);
1553 struct musb *musb = musb_ep->musb;
1554 u8 epnum = musb_ep->current_epnum;
1555 void __iomem *epio = musb->endpoints[epnum].regs;
1556 void __iomem *mbase;
1557 unsigned long flags;
1558 u16 csr, int_txe;
1559
1560 mbase = musb->mregs;
1561
1562 spin_lock_irqsave(&musb->lock, flags);
1563 musb_ep_select(mbase, (u8) epnum);
1564
1565 /* disable interrupts */
1566 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1567 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1568
1569 if (musb_ep->is_in) {
1570 csr = musb_readw(epio, MUSB_TXCSR);
1571 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1572 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1573 /*
1574 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1575 * to interrupt current FIFO loading, but not flushing
1576 * the already loaded ones.
1577 */
1578 csr &= ~MUSB_TXCSR_TXPKTRDY;
1579 musb_writew(epio, MUSB_TXCSR, csr);
1580 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1581 musb_writew(epio, MUSB_TXCSR, csr);
1582 }
1583 } else {
1584 csr = musb_readw(epio, MUSB_RXCSR);
1585 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1586 musb_writew(epio, MUSB_RXCSR, csr);
1587 musb_writew(epio, MUSB_RXCSR, csr);
1588 }
1589
1590 /* re-enable interrupt */
1591 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1592 spin_unlock_irqrestore(&musb->lock, flags);
1593}
1594
1595static const struct usb_ep_ops musb_ep_ops = {
1596 .enable = musb_gadget_enable,
1597 .disable = musb_gadget_disable,
1598 .alloc_request = musb_alloc_request,
1599 .free_request = musb_free_request,
1600 .queue = musb_gadget_queue,
1601 .dequeue = musb_gadget_dequeue,
1602 .set_halt = musb_gadget_set_halt,
1603#ifndef __UBOOT__
1604 .set_wedge = musb_gadget_set_wedge,
1605#endif
1606 .fifo_status = musb_gadget_fifo_status,
1607 .fifo_flush = musb_gadget_fifo_flush
1608};
1609
1610/* ----------------------------------------------------------------------- */
1611
1612static int musb_gadget_get_frame(struct usb_gadget *gadget)
1613{
1614 struct musb *musb = gadget_to_musb(gadget);
1615
1616 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1617}
1618
1619static int musb_gadget_wakeup(struct usb_gadget *gadget)
1620{
1621#ifndef __UBOOT__
1622 struct musb *musb = gadget_to_musb(gadget);
1623 void __iomem *mregs = musb->mregs;
1624 unsigned long flags;
1625 int status = -EINVAL;
1626 u8 power, devctl;
1627 int retries;
1628
1629 spin_lock_irqsave(&musb->lock, flags);
1630
1631 switch (musb->xceiv->state) {
1632 case OTG_STATE_B_PERIPHERAL:
1633 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1634 * that's part of the standard usb 1.1 state machine, and
1635 * doesn't affect OTG transitions.
1636 */
1637 if (musb->may_wakeup && musb->is_suspended)
1638 break;
1639 goto done;
1640 case OTG_STATE_B_IDLE:
1641 /* Start SRP ... OTG not required. */
1642 devctl = musb_readb(mregs, MUSB_DEVCTL);
1643 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1644 devctl |= MUSB_DEVCTL_SESSION;
1645 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1646 devctl = musb_readb(mregs, MUSB_DEVCTL);
1647 retries = 100;
1648 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1649 devctl = musb_readb(mregs, MUSB_DEVCTL);
1650 if (retries-- < 1)
1651 break;
1652 }
1653 retries = 10000;
1654 while (devctl & MUSB_DEVCTL_SESSION) {
1655 devctl = musb_readb(mregs, MUSB_DEVCTL);
1656 if (retries-- < 1)
1657 break;
1658 }
1659
1660 spin_unlock_irqrestore(&musb->lock, flags);
1661 otg_start_srp(musb->xceiv->otg);
1662 spin_lock_irqsave(&musb->lock, flags);
1663
1664 /* Block idling for at least 1s */
1665 musb_platform_try_idle(musb,
1666 jiffies + msecs_to_jiffies(1 * HZ));
1667
1668 status = 0;
1669 goto done;
1670 default:
1671 dev_dbg(musb->controller, "Unhandled wake: %s\n",
1672 otg_state_string(musb->xceiv->state));
1673 goto done;
1674 }
1675
1676 status = 0;
1677
1678 power = musb_readb(mregs, MUSB_POWER);
1679 power |= MUSB_POWER_RESUME;
1680 musb_writeb(mregs, MUSB_POWER, power);
1681 dev_dbg(musb->controller, "issue wakeup\n");
1682
1683 /* FIXME do this next chunk in a timer callback, no udelay */
1684 mdelay(2);
1685
1686 power = musb_readb(mregs, MUSB_POWER);
1687 power &= ~MUSB_POWER_RESUME;
1688 musb_writeb(mregs, MUSB_POWER, power);
1689done:
1690 spin_unlock_irqrestore(&musb->lock, flags);
1691 return status;
1692#else
1693 return 0;
1694#endif
1695}
1696
1697static int
1698musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1699{
1700 struct musb *musb = gadget_to_musb(gadget);
1701
1702 musb->is_self_powered = !!is_selfpowered;
1703 return 0;
1704}
1705
1706static void musb_pullup(struct musb *musb, int is_on)
1707{
1708 u8 power;
1709
1710 power = musb_readb(musb->mregs, MUSB_POWER);
1711 if (is_on)
1712 power |= MUSB_POWER_SOFTCONN;
1713 else
1714 power &= ~MUSB_POWER_SOFTCONN;
1715
1716 /* FIXME if on, HdrcStart; if off, HdrcStop */
1717
1718 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1719 is_on ? "on" : "off");
1720 musb_writeb(musb->mregs, MUSB_POWER, power);
1721}
1722
1723#if 0
1724static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1725{
1726 dev_dbg(musb->controller, "<= %s =>\n", __func__);
1727
1728 /*
1729 * FIXME iff driver's softconnect flag is set (as it is during probe,
1730 * though that can clear it), just musb_pullup().
1731 */
1732
1733 return -EINVAL;
1734}
1735#endif
1736
1737static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1738{
1739#ifndef __UBOOT__
1740 struct musb *musb = gadget_to_musb(gadget);
1741
1742 if (!musb->xceiv->set_power)
1743 return -EOPNOTSUPP;
1744 return usb_phy_set_power(musb->xceiv, mA);
1745#else
1746 return 0;
1747#endif
1748}
1749
1750static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1751{
1752 struct musb *musb = gadget_to_musb(gadget);
1753 unsigned long flags;
1754
1755 is_on = !!is_on;
1756
1757 pm_runtime_get_sync(musb->controller);
1758
1759 /* NOTE: this assumes we are sensing vbus; we'd rather
1760 * not pullup unless the B-session is active.
1761 */
1762 spin_lock_irqsave(&musb->lock, flags);
1763 if (is_on != musb->softconnect) {
1764 musb->softconnect = is_on;
1765 musb_pullup(musb, is_on);
1766 }
1767 spin_unlock_irqrestore(&musb->lock, flags);
1768
1769 pm_runtime_put(musb->controller);
1770
1771 return 0;
1772}
1773
1774#ifndef __UBOOT__
1775static int musb_gadget_start(struct usb_gadget *g,
1776 struct usb_gadget_driver *driver);
1777static int musb_gadget_stop(struct usb_gadget *g,
1778 struct usb_gadget_driver *driver);
1779#endif
1780
1781static const struct usb_gadget_ops musb_gadget_operations = {
1782 .get_frame = musb_gadget_get_frame,
1783 .wakeup = musb_gadget_wakeup,
1784 .set_selfpowered = musb_gadget_set_self_powered,
1785 /* .vbus_session = musb_gadget_vbus_session, */
1786 .vbus_draw = musb_gadget_vbus_draw,
1787 .pullup = musb_gadget_pullup,
1788#ifndef __UBOOT__
1789 .udc_start = musb_gadget_start,
1790 .udc_stop = musb_gadget_stop,
1791#endif
1792};
1793
1794/* ----------------------------------------------------------------------- */
1795
1796/* Registration */
1797
1798/* Only this registration code "knows" the rule (from USB standards)
1799 * about there being only one external upstream port. It assumes
1800 * all peripheral ports are external...
1801 */
1802
1803#ifndef __UBOOT__
1804static void musb_gadget_release(struct device *dev)
1805{
1806 /* kref_put(WHAT) */
1807 dev_dbg(dev, "%s\n", __func__);
1808}
1809#endif
1810
1811
1812static void __devinit
1813init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1814{
1815 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1816
1817 memset(ep, 0, sizeof *ep);
1818
1819 ep->current_epnum = epnum;
1820 ep->musb = musb;
1821 ep->hw_ep = hw_ep;
1822 ep->is_in = is_in;
1823
1824 INIT_LIST_HEAD(&ep->req_list);
1825
1826 sprintf(ep->name, "ep%d%s", epnum,
1827 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1828 is_in ? "in" : "out"));
1829 ep->end_point.name = ep->name;
1830 INIT_LIST_HEAD(&ep->end_point.ep_list);
1831 if (!epnum) {
1832 ep->end_point.maxpacket = 64;
1833 ep->end_point.ops = &musb_g_ep0_ops;
1834 musb->g.ep0 = &ep->end_point;
1835 } else {
1836 if (is_in)
1837 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1838 else
1839 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1840 ep->end_point.ops = &musb_ep_ops;
1841 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1842 }
1843}
1844
1845/*
1846 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1847 * to the rest of the driver state.
1848 */
1849static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1850{
1851 u8 epnum;
1852 struct musb_hw_ep *hw_ep;
1853 unsigned count = 0;
1854
1855 /* initialize endpoint list just once */
1856 INIT_LIST_HEAD(&(musb->g.ep_list));
1857
1858 for (epnum = 0, hw_ep = musb->endpoints;
1859 epnum < musb->nr_endpoints;
1860 epnum++, hw_ep++) {
1861 if (hw_ep->is_shared_fifo /* || !epnum */) {
1862 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1863 count++;
1864 } else {
1865 if (hw_ep->max_packet_sz_tx) {
1866 init_peripheral_ep(musb, &hw_ep->ep_in,
1867 epnum, 1);
1868 count++;
1869 }
1870 if (hw_ep->max_packet_sz_rx) {
1871 init_peripheral_ep(musb, &hw_ep->ep_out,
1872 epnum, 0);
1873 count++;
1874 }
1875 }
1876 }
1877}
1878
1879/* called once during driver setup to initialize and link into
1880 * the driver model; memory is zeroed.
1881 */
1882int __devinit musb_gadget_setup(struct musb *musb)
1883{
1884 int status;
1885
1886 /* REVISIT minor race: if (erroneously) setting up two
1887 * musb peripherals at the same time, only the bus lock
1888 * is probably held.
1889 */
1890
1891 musb->g.ops = &musb_gadget_operations;
1892#ifndef __UBOOT__
1893 musb->g.max_speed = USB_SPEED_HIGH;
1894#endif
1895 musb->g.speed = USB_SPEED_UNKNOWN;
1896
1897#ifndef __UBOOT__
1898 /* this "gadget" abstracts/virtualizes the controller */
1899 dev_set_name(&musb->g.dev, "gadget");
1900 musb->g.dev.parent = musb->controller;
1901 musb->g.dev.dma_mask = musb->controller->dma_mask;
1902 musb->g.dev.release = musb_gadget_release;
1903#endif
1904 musb->g.name = musb_driver_name;
1905
1906#ifndef __UBOOT__
1907 if (is_otg_enabled(musb))
1908 musb->g.is_otg = 1;
1909#endif
1910
1911 musb_g_init_endpoints(musb);
1912
1913 musb->is_active = 0;
1914 musb_platform_try_idle(musb, 0);
1915
1916#ifndef __UBOOT__
1917 status = device_register(&musb->g.dev);
1918 if (status != 0) {
1919 put_device(&musb->g.dev);
1920 return status;
1921 }
1922 status = usb_add_gadget_udc(musb->controller, &musb->g);
1923 if (status)
1924 goto err;
1925#endif
1926
1927 return 0;
1928#ifndef __UBOOT__
1929err:
1930 musb->g.dev.parent = NULL;
1931 device_unregister(&musb->g.dev);
1932 return status;
1933#endif
1934}
1935
1936void musb_gadget_cleanup(struct musb *musb)
1937{
1938#ifndef __UBOOT__
1939 usb_del_gadget_udc(&musb->g);
1940 if (musb->g.dev.parent)
1941 device_unregister(&musb->g.dev);
1942#endif
1943}
1944
1945/*
1946 * Register the gadget driver. Used by gadget drivers when
1947 * registering themselves with the controller.
1948 *
1949 * -EINVAL something went wrong (not driver)
1950 * -EBUSY another gadget is already using the controller
1951 * -ENOMEM no memory to perform the operation
1952 *
1953 * @param driver the gadget driver
1954 * @return <0 if error, 0 if everything is fine
1955 */
1956#ifndef __UBOOT__
1957static int musb_gadget_start(struct usb_gadget *g,
1958 struct usb_gadget_driver *driver)
1959#else
1960int musb_gadget_start(struct usb_gadget *g,
1961 struct usb_gadget_driver *driver)
1962#endif
1963{
1964 struct musb *musb = gadget_to_musb(g);
1965#ifndef __UBOOT__
1966 struct usb_otg *otg = musb->xceiv->otg;
1967#endif
1968 unsigned long flags;
1969 int retval = -EINVAL;
1970
1971#ifndef __UBOOT__
1972 if (driver->max_speed < USB_SPEED_HIGH)
1973 goto err0;
1974#endif
1975
1976 pm_runtime_get_sync(musb->controller);
1977
1978#ifndef __UBOOT__
1979 dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1980#endif
1981
1982 musb->softconnect = 0;
1983 musb->gadget_driver = driver;
1984
1985 spin_lock_irqsave(&musb->lock, flags);
1986 musb->is_active = 1;
1987
1988#ifndef __UBOOT__
1989 otg_set_peripheral(otg, &musb->g);
1990 musb->xceiv->state = OTG_STATE_B_IDLE;
1991
1992 /*
1993 * FIXME this ignores the softconnect flag. Drivers are
1994 * allowed hold the peripheral inactive until for example
1995 * userspace hooks up printer hardware or DSP codecs, so
1996 * hosts only see fully functional devices.
1997 */
1998
1999 if (!is_otg_enabled(musb))
2000#endif
2001 musb_start(musb);
2002
2003 spin_unlock_irqrestore(&musb->lock, flags);
2004
2005#ifndef __UBOOT__
2006 if (is_otg_enabled(musb)) {
2007 struct usb_hcd *hcd = musb_to_hcd(musb);
2008
2009 dev_dbg(musb->controller, "OTG startup...\n");
2010
2011 /* REVISIT: funcall to other code, which also
2012 * handles power budgeting ... this way also
2013 * ensures HdrcStart is indirectly called.
2014 */
2015 retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2016 if (retval < 0) {
2017 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
2018 goto err2;
2019 }
2020
2021 if ((musb->xceiv->last_event == USB_EVENT_ID)
2022 && otg->set_vbus)
2023 otg_set_vbus(otg, 1);
2024
2025 hcd->self.uses_pio_for_control = 1;
2026 }
2027 if (musb->xceiv->last_event == USB_EVENT_NONE)
2028 pm_runtime_put(musb->controller);
2029#endif
2030
2031 return 0;
2032
2033#ifndef __UBOOT__
2034err2:
2035 if (!is_otg_enabled(musb))
2036 musb_stop(musb);
2037err0:
2038 return retval;
2039#endif
2040}
2041
2042#ifndef __UBOOT__
2043static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2044{
2045 int i;
2046 struct musb_hw_ep *hw_ep;
2047
2048 /* don't disconnect if it's not connected */
2049 if (musb->g.speed == USB_SPEED_UNKNOWN)
2050 driver = NULL;
2051 else
2052 musb->g.speed = USB_SPEED_UNKNOWN;
2053
2054 /* deactivate the hardware */
2055 if (musb->softconnect) {
2056 musb->softconnect = 0;
2057 musb_pullup(musb, 0);
2058 }
2059 musb_stop(musb);
2060
2061 /* killing any outstanding requests will quiesce the driver;
2062 * then report disconnect
2063 */
2064 if (driver) {
2065 for (i = 0, hw_ep = musb->endpoints;
2066 i < musb->nr_endpoints;
2067 i++, hw_ep++) {
2068 musb_ep_select(musb->mregs, i);
2069 if (hw_ep->is_shared_fifo /* || !epnum */) {
2070 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2071 } else {
2072 if (hw_ep->max_packet_sz_tx)
2073 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2074 if (hw_ep->max_packet_sz_rx)
2075 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2076 }
2077 }
2078 }
2079}
2080
2081/*
2082 * Unregister the gadget driver. Used by gadget drivers when
2083 * unregistering themselves from the controller.
2084 *
2085 * @param driver the gadget driver to unregister
2086 */
2087static int musb_gadget_stop(struct usb_gadget *g,
2088 struct usb_gadget_driver *driver)
2089{
2090 struct musb *musb = gadget_to_musb(g);
2091 unsigned long flags;
2092
2093 if (musb->xceiv->last_event == USB_EVENT_NONE)
2094 pm_runtime_get_sync(musb->controller);
2095
2096 /*
2097 * REVISIT always use otg_set_peripheral() here too;
2098 * this needs to shut down the OTG engine.
2099 */
2100
2101 spin_lock_irqsave(&musb->lock, flags);
2102
2103 musb_hnp_stop(musb);
2104
2105 (void) musb_gadget_vbus_draw(&musb->g, 0);
2106
2107 musb->xceiv->state = OTG_STATE_UNDEFINED;
2108 stop_activity(musb, driver);
2109 otg_set_peripheral(musb->xceiv->otg, NULL);
2110
2111 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2112
2113 musb->is_active = 0;
2114 musb_platform_try_idle(musb, 0);
2115 spin_unlock_irqrestore(&musb->lock, flags);
2116
2117 if (is_otg_enabled(musb)) {
2118 usb_remove_hcd(musb_to_hcd(musb));
2119 /* FIXME we need to be able to register another
2120 * gadget driver here and have everything work;
2121 * that currently misbehaves.
2122 */
2123 }
2124
2125 if (!is_otg_enabled(musb))
2126 musb_stop(musb);
2127
2128 pm_runtime_put(musb->controller);
2129
2130 return 0;
2131}
2132#endif
2133
2134/* ----------------------------------------------------------------------- */
2135
2136/* lifecycle operations called through plat_uds.c */
2137
2138void musb_g_resume(struct musb *musb)
2139{
2140#ifndef __UBOOT__
2141 musb->is_suspended = 0;
2142 switch (musb->xceiv->state) {
2143 case OTG_STATE_B_IDLE:
2144 break;
2145 case OTG_STATE_B_WAIT_ACON:
2146 case OTG_STATE_B_PERIPHERAL:
2147 musb->is_active = 1;
2148 if (musb->gadget_driver && musb->gadget_driver->resume) {
2149 spin_unlock(&musb->lock);
2150 musb->gadget_driver->resume(&musb->g);
2151 spin_lock(&musb->lock);
2152 }
2153 break;
2154 default:
2155 WARNING("unhandled RESUME transition (%s)\n",
2156 otg_state_string(musb->xceiv->state));
2157 }
2158#endif
2159}
2160
2161/* called when SOF packets stop for 3+ msec */
2162void musb_g_suspend(struct musb *musb)
2163{
2164#ifndef __UBOOT__
2165 u8 devctl;
2166
2167 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2168 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2169
2170 switch (musb->xceiv->state) {
2171 case OTG_STATE_B_IDLE:
2172 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2173 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2174 break;
2175 case OTG_STATE_B_PERIPHERAL:
2176 musb->is_suspended = 1;
2177 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2178 spin_unlock(&musb->lock);
2179 musb->gadget_driver->suspend(&musb->g);
2180 spin_lock(&musb->lock);
2181 }
2182 break;
2183 default:
2184 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2185 * A_PERIPHERAL may need care too
2186 */
2187 WARNING("unhandled SUSPEND transition (%s)\n",
2188 otg_state_string(musb->xceiv->state));
2189 }
2190#endif
2191}
2192
2193/* Called during SRP */
2194void musb_g_wakeup(struct musb *musb)
2195{
2196 musb_gadget_wakeup(&musb->g);
2197}
2198
2199/* called when VBUS drops below session threshold, and in other cases */
2200void musb_g_disconnect(struct musb *musb)
2201{
2202 void __iomem *mregs = musb->mregs;
2203 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2204
2205 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2206
2207 /* clear HR */
2208 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2209
2210 /* don't draw vbus until new b-default session */
2211 (void) musb_gadget_vbus_draw(&musb->g, 0);
2212
2213 musb->g.speed = USB_SPEED_UNKNOWN;
2214 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2215 spin_unlock(&musb->lock);
2216 musb->gadget_driver->disconnect(&musb->g);
2217 spin_lock(&musb->lock);
2218 }
2219
2220#ifndef __UBOOT__
2221 switch (musb->xceiv->state) {
2222 default:
2223 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2224 otg_state_string(musb->xceiv->state));
2225 musb->xceiv->state = OTG_STATE_A_IDLE;
2226 MUSB_HST_MODE(musb);
2227 break;
2228 case OTG_STATE_A_PERIPHERAL:
2229 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2230 MUSB_HST_MODE(musb);
2231 break;
2232 case OTG_STATE_B_WAIT_ACON:
2233 case OTG_STATE_B_HOST:
2234 case OTG_STATE_B_PERIPHERAL:
2235 case OTG_STATE_B_IDLE:
2236 musb->xceiv->state = OTG_STATE_B_IDLE;
2237 break;
2238 case OTG_STATE_B_SRP_INIT:
2239 break;
2240 }
2241#endif
2242
2243 musb->is_active = 0;
2244}
2245
2246void musb_g_reset(struct musb *musb)
2247__releases(musb->lock)
2248__acquires(musb->lock)
2249{
2250 void __iomem *mbase = musb->mregs;
2251 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2252 u8 power;
2253
2254#ifndef __UBOOT__
2255 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2256 (devctl & MUSB_DEVCTL_BDEVICE)
2257 ? "B-Device" : "A-Device",
2258 musb_readb(mbase, MUSB_FADDR),
2259 musb->gadget_driver
2260 ? musb->gadget_driver->driver.name
2261 : NULL
2262 );
2263#endif
2264
2265 /* report disconnect, if we didn't already (flushing EP state) */
2266 if (musb->g.speed != USB_SPEED_UNKNOWN)
2267 musb_g_disconnect(musb);
2268
2269 /* clear HR */
2270 else if (devctl & MUSB_DEVCTL_HR)
2271 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2272
2273
2274 /* what speed did we negotiate? */
2275 power = musb_readb(mbase, MUSB_POWER);
2276 musb->g.speed = (power & MUSB_POWER_HSMODE)
2277 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2278
2279 /* start in USB_STATE_DEFAULT */
2280 musb->is_active = 1;
2281 musb->is_suspended = 0;
2282 MUSB_DEV_MODE(musb);
2283 musb->address = 0;
2284 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2285
2286 musb->may_wakeup = 0;
2287 musb->g.b_hnp_enable = 0;
2288 musb->g.a_alt_hnp_support = 0;
2289 musb->g.a_hnp_support = 0;
2290
2291#ifndef __UBOOT__
2292 /* Normal reset, as B-Device;
2293 * or else after HNP, as A-Device
2294 */
2295 if (devctl & MUSB_DEVCTL_BDEVICE) {
2296 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2297 musb->g.is_a_peripheral = 0;
2298 } else if (is_otg_enabled(musb)) {
2299 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2300 musb->g.is_a_peripheral = 1;
2301 } else
2302 WARN_ON(1);
2303
2304 /* start with default limits on VBUS power draw */
2305 (void) musb_gadget_vbus_draw(&musb->g,
2306 is_otg_enabled(musb) ? 8 : 100);
2307#endif
2308}