blob: 7c932c6064d6ca8a2ccccceb6758c9fd925a51bf [file] [log] [blame]
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05301/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +05304 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05305 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +05309 * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/gadget.c) and ported
10 * to uboot.
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +053011 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +053012 * commit 8e74475b0e : usb: dwc3: gadget: use udc-core's reset notifier
13 *
14 * SPDX-License-Identifier: GPL-2.0
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +053015 */
16
17#include <linux/kernel.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/list.h>
26#include <linux/dma-mapping.h>
27
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30
31#include "debug.h"
32#include "core.h"
33#include "gadget.h"
34#include "io.h"
35
36/**
37 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
38 * @dwc: pointer to our context structure
39 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
40 *
41 * Caller should take care of locking. This function will
42 * return 0 on success or -EINVAL if wrong Test Selector
43 * is passed
44 */
45int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
46{
47 u32 reg;
48
49 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
50 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
51
52 switch (mode) {
53 case TEST_J:
54 case TEST_K:
55 case TEST_SE0_NAK:
56 case TEST_PACKET:
57 case TEST_FORCE_EN:
58 reg |= mode << 1;
59 break;
60 default:
61 return -EINVAL;
62 }
63
64 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
65
66 return 0;
67}
68
69/**
70 * dwc3_gadget_get_link_state - Gets current state of USB Link
71 * @dwc: pointer to our context structure
72 *
73 * Caller should take care of locking. This function will
74 * return the link state on success (>= 0) or -ETIMEDOUT.
75 */
76int dwc3_gadget_get_link_state(struct dwc3 *dwc)
77{
78 u32 reg;
79
80 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
81
82 return DWC3_DSTS_USBLNKST(reg);
83}
84
85/**
86 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
87 * @dwc: pointer to our context structure
88 * @state: the state to put link into
89 *
90 * Caller should take care of locking. This function will
91 * return 0 on success or -ETIMEDOUT.
92 */
93int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
94{
95 int retries = 10000;
96 u32 reg;
97
98 /*
99 * Wait until device controller is ready. Only applies to 1.94a and
100 * later RTL.
101 */
102 if (dwc->revision >= DWC3_REVISION_194A) {
103 while (--retries) {
104 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
105 if (reg & DWC3_DSTS_DCNRD)
106 udelay(5);
107 else
108 break;
109 }
110
111 if (retries <= 0)
112 return -ETIMEDOUT;
113 }
114
115 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
116 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
117
118 /* set requested state */
119 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
120 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
121
122 /*
123 * The following code is racy when called from dwc3_gadget_wakeup,
124 * and is not needed, at least on newer versions
125 */
126 if (dwc->revision >= DWC3_REVISION_194A)
127 return 0;
128
129 /* wait for a change in DSTS */
130 retries = 10000;
131 while (--retries) {
132 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
133
134 if (DWC3_DSTS_USBLNKST(reg) == state)
135 return 0;
136
137 udelay(5);
138 }
139
140 dev_vdbg(dwc->dev, "link state change request timed out\n");
141
142 return -ETIMEDOUT;
143}
144
145/**
146 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
147 * @dwc: pointer to our context structure
148 *
149 * This function will a best effort FIFO allocation in order
150 * to improve FIFO usage and throughput, while still allowing
151 * us to enable as many endpoints as possible.
152 *
153 * Keep in mind that this operation will be highly dependent
154 * on the configured size for RAM1 - which contains TxFifo -,
155 * the amount of endpoints enabled on coreConsultant tool, and
156 * the width of the Master Bus.
157 *
158 * In the ideal world, we would always be able to satisfy the
159 * following equation:
160 *
161 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
162 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
163 *
164 * Unfortunately, due to many variables that's not always the case.
165 */
166int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
167{
168 int last_fifo_depth = 0;
169 int ram1_depth;
170 int fifo_size;
171 int mdwidth;
172 int num;
173
174 if (!dwc->needs_fifo_resize)
175 return 0;
176
177 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
178 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
179
180 /* MDWIDTH is represented in bits, we need it in bytes */
181 mdwidth >>= 3;
182
183 /*
184 * FIXME For now we will only allocate 1 wMaxPacketSize space
185 * for each enabled endpoint, later patches will come to
186 * improve this algorithm so that we better use the internal
187 * FIFO space
188 */
189 for (num = 0; num < dwc->num_in_eps; num++) {
190 /* bit0 indicates direction; 1 means IN ep */
191 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
192 int mult = 1;
193 int tmp;
194
195 if (!(dep->flags & DWC3_EP_ENABLED))
196 continue;
197
198 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
199 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
200 mult = 3;
201
202 /*
203 * REVISIT: the following assumes we will always have enough
204 * space available on the FIFO RAM for all possible use cases.
205 * Make sure that's true somehow and change FIFO allocation
206 * accordingly.
207 *
208 * If we have Bulk or Isochronous endpoints, we want
209 * them to be able to be very, very fast. So we're giving
210 * those endpoints a fifo_size which is enough for 3 full
211 * packets
212 */
213 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
214 tmp += mdwidth;
215
216 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
217
218 fifo_size |= (last_fifo_depth << 16);
219
220 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
221 dep->name, last_fifo_depth, fifo_size & 0xffff);
222
223 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
224
225 last_fifo_depth += (fifo_size & 0xffff);
226 }
227
228 return 0;
229}
230
231void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
232 int status)
233{
234 struct dwc3 *dwc = dep->dwc;
235 int i;
236
237 if (req->queued) {
238 i = 0;
239 do {
240 dep->busy_slot++;
241 /*
242 * Skip LINK TRB. We can't use req->trb and check for
243 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
244 * just completed (not the LINK TRB).
245 */
246 if (((dep->busy_slot & DWC3_TRB_MASK) ==
247 DWC3_TRB_NUM- 1) &&
248 usb_endpoint_xfer_isoc(dep->endpoint.desc))
249 dep->busy_slot++;
250 } while(++i < req->request.num_mapped_sgs);
251 req->queued = false;
252 }
253 list_del(&req->list);
254 req->trb = NULL;
255
256 if (req->request.status == -EINPROGRESS)
257 req->request.status = status;
258
259 if (dwc->ep0_bounced && dep->number == 0)
260 dwc->ep0_bounced = false;
261 else
262 usb_gadget_unmap_request(&dwc->gadget, &req->request,
263 req->direction);
264
265 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
266 req, dep->name, req->request.actual,
267 req->request.length, status);
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530268
269 spin_unlock(&dwc->lock);
270 usb_gadget_giveback_request(&dep->endpoint, &req->request);
271 spin_lock(&dwc->lock);
272}
273
274int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
275{
276 u32 timeout = 500;
277 u32 reg;
278
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530279 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
280 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
281
282 do {
283 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
284 if (!(reg & DWC3_DGCMD_CMDACT)) {
285 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
286 DWC3_DGCMD_STATUS(reg));
287 return 0;
288 }
289
290 /*
291 * We can't sleep here, because it's also called from
292 * interrupt context.
293 */
294 timeout--;
295 if (!timeout)
296 return -ETIMEDOUT;
297 udelay(1);
298 } while (1);
299}
300
301int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
302 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
303{
304 struct dwc3_ep *dep = dwc->eps[ep];
305 u32 timeout = 500;
306 u32 reg;
307
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530308 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
309 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
310 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
311
312 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
313 do {
314 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
315 if (!(reg & DWC3_DEPCMD_CMDACT)) {
316 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
317 DWC3_DEPCMD_STATUS(reg));
318 return 0;
319 }
320
321 /*
322 * We can't sleep here, because it is also called from
323 * interrupt context.
324 */
325 timeout--;
326 if (!timeout)
327 return -ETIMEDOUT;
328
329 udelay(1);
330 } while (1);
331}
332
333static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
334 struct dwc3_trb *trb)
335{
336 u32 offset = (char *) trb - (char *) dep->trb_pool;
337
338 return dep->trb_pool_dma + offset;
339}
340
341static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
342{
343 struct dwc3 *dwc = dep->dwc;
344
345 if (dep->trb_pool)
346 return 0;
347
348 if (dep->number == 0 || dep->number == 1)
349 return 0;
350
351 dep->trb_pool = dma_alloc_coherent(dwc->dev,
352 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
353 &dep->trb_pool_dma, GFP_KERNEL);
354 if (!dep->trb_pool) {
355 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
356 dep->name);
357 return -ENOMEM;
358 }
359
360 return 0;
361}
362
363static void dwc3_free_trb_pool(struct dwc3_ep *dep)
364{
365 struct dwc3 *dwc = dep->dwc;
366
367 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
368 dep->trb_pool, dep->trb_pool_dma);
369
370 dep->trb_pool = NULL;
371 dep->trb_pool_dma = 0;
372}
373
374static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
375{
376 struct dwc3_gadget_ep_cmd_params params;
377 u32 cmd;
378
379 memset(&params, 0x00, sizeof(params));
380
381 if (dep->number != 1) {
382 cmd = DWC3_DEPCMD_DEPSTARTCFG;
383 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
384 if (dep->number > 1) {
385 if (dwc->start_config_issued)
386 return 0;
387 dwc->start_config_issued = true;
388 cmd |= DWC3_DEPCMD_PARAM(2);
389 }
390
391 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
392 }
393
394 return 0;
395}
396
397static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
398 const struct usb_endpoint_descriptor *desc,
399 const struct usb_ss_ep_comp_descriptor *comp_desc,
400 bool ignore, bool restore)
401{
402 struct dwc3_gadget_ep_cmd_params params;
403
404 memset(&params, 0x00, sizeof(params));
405
406 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
407 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
408
409 /* Burst size is only needed in SuperSpeed mode */
410 if (dwc->gadget.speed == USB_SPEED_SUPER) {
411 u32 burst = dep->endpoint.maxburst - 1;
412
413 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
414 }
415
416 if (ignore)
417 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
418
419 if (restore) {
420 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
421 params.param2 |= dep->saved_state;
422 }
423
424 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
425 | DWC3_DEPCFG_XFER_NOT_READY_EN;
426
427 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
428 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
429 | DWC3_DEPCFG_STREAM_EVENT_EN;
430 dep->stream_capable = true;
431 }
432
433 if (!usb_endpoint_xfer_control(desc))
434 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
435
436 /*
437 * We are doing 1:1 mapping for endpoints, meaning
438 * Physical Endpoints 2 maps to Logical Endpoint 2 and
439 * so on. We consider the direction bit as part of the physical
440 * endpoint number. So USB endpoint 0x81 is 0x03.
441 */
442 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
443
444 /*
445 * We must use the lower 16 TX FIFOs even though
446 * HW might have more
447 */
448 if (dep->direction)
449 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
450
451 if (desc->bInterval) {
452 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
453 dep->interval = 1 << (desc->bInterval - 1);
454 }
455
456 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
457 DWC3_DEPCMD_SETEPCONFIG, &params);
458}
459
460static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
461{
462 struct dwc3_gadget_ep_cmd_params params;
463
464 memset(&params, 0x00, sizeof(params));
465
466 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
467
468 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
469 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
470}
471
472/**
473 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
474 * @dep: endpoint to be initialized
475 * @desc: USB Endpoint Descriptor
476 *
477 * Caller should take care of locking
478 */
479static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
480 const struct usb_endpoint_descriptor *desc,
481 const struct usb_ss_ep_comp_descriptor *comp_desc,
482 bool ignore, bool restore)
483{
484 struct dwc3 *dwc = dep->dwc;
485 u32 reg;
486 int ret;
487
488 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
489
490 if (!(dep->flags & DWC3_EP_ENABLED)) {
491 ret = dwc3_gadget_start_config(dwc, dep);
492 if (ret)
493 return ret;
494 }
495
496 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
497 restore);
498 if (ret)
499 return ret;
500
501 if (!(dep->flags & DWC3_EP_ENABLED)) {
502 struct dwc3_trb *trb_st_hw;
503 struct dwc3_trb *trb_link;
504
505 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
506 if (ret)
507 return ret;
508
509 dep->endpoint.desc = desc;
510 dep->comp_desc = comp_desc;
511 dep->type = usb_endpoint_type(desc);
512 dep->flags |= DWC3_EP_ENABLED;
513
514 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
515 reg |= DWC3_DALEPENA_EP(dep->number);
516 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
517
518 if (!usb_endpoint_xfer_isoc(desc))
519 return 0;
520
521 /* Link TRB for ISOC. The HWO bit is never reset */
522 trb_st_hw = &dep->trb_pool[0];
523
524 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
525 memset(trb_link, 0, sizeof(*trb_link));
526
527 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
528 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
529 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
530 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
531 }
532
533 return 0;
534}
535
536static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
537static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
538{
539 struct dwc3_request *req;
540
541 if (!list_empty(&dep->req_queued)) {
542 dwc3_stop_active_transfer(dwc, dep->number, true);
543
544 /* - giveback all requests to gadget driver */
545 while (!list_empty(&dep->req_queued)) {
546 req = next_request(&dep->req_queued);
547
548 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
549 }
550 }
551
552 while (!list_empty(&dep->request_list)) {
553 req = next_request(&dep->request_list);
554
555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
556 }
557}
558
559/**
560 * __dwc3_gadget_ep_disable - Disables a HW endpoint
561 * @dep: the endpoint to disable
562 *
563 * This function also removes requests which are currently processed ny the
564 * hardware and those which are not yet scheduled.
565 * Caller should take care of locking.
566 */
567static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
568{
569 struct dwc3 *dwc = dep->dwc;
570 u32 reg;
571
572 dwc3_remove_requests(dwc, dep);
573
574 /* make sure HW endpoint isn't stalled */
575 if (dep->flags & DWC3_EP_STALL)
576 __dwc3_gadget_ep_set_halt(dep, 0, false);
577
578 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
579 reg &= ~DWC3_DALEPENA_EP(dep->number);
580 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
581
582 dep->stream_capable = false;
583 dep->endpoint.desc = NULL;
584 dep->comp_desc = NULL;
585 dep->type = 0;
586 dep->flags = 0;
587
588 return 0;
589}
590
591/* -------------------------------------------------------------------------- */
592
593static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
594 const struct usb_endpoint_descriptor *desc)
595{
596 return -EINVAL;
597}
598
599static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
600{
601 return -EINVAL;
602}
603
604/* -------------------------------------------------------------------------- */
605
606static int dwc3_gadget_ep_enable(struct usb_ep *ep,
607 const struct usb_endpoint_descriptor *desc)
608{
609 struct dwc3_ep *dep;
610 struct dwc3 *dwc;
611 unsigned long flags;
612 int ret;
613
614 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
615 pr_debug("dwc3: invalid parameters\n");
616 return -EINVAL;
617 }
618
619 if (!desc->wMaxPacketSize) {
620 pr_debug("dwc3: missing wMaxPacketSize\n");
621 return -EINVAL;
622 }
623
624 dep = to_dwc3_ep(ep);
625 dwc = dep->dwc;
626
627 if (dep->flags & DWC3_EP_ENABLED) {
628 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
629 dep->name);
630 return 0;
631 }
632
633 switch (usb_endpoint_type(desc)) {
634 case USB_ENDPOINT_XFER_CONTROL:
635 strlcat(dep->name, "-control", sizeof(dep->name));
636 break;
637 case USB_ENDPOINT_XFER_ISOC:
638 strlcat(dep->name, "-isoc", sizeof(dep->name));
639 break;
640 case USB_ENDPOINT_XFER_BULK:
641 strlcat(dep->name, "-bulk", sizeof(dep->name));
642 break;
643 case USB_ENDPOINT_XFER_INT:
644 strlcat(dep->name, "-int", sizeof(dep->name));
645 break;
646 default:
647 dev_err(dwc->dev, "invalid endpoint transfer type\n");
648 }
649
650 spin_lock_irqsave(&dwc->lock, flags);
651 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
652 spin_unlock_irqrestore(&dwc->lock, flags);
653
654 return ret;
655}
656
657static int dwc3_gadget_ep_disable(struct usb_ep *ep)
658{
659 struct dwc3_ep *dep;
660 struct dwc3 *dwc;
661 unsigned long flags;
662 int ret;
663
664 if (!ep) {
665 pr_debug("dwc3: invalid parameters\n");
666 return -EINVAL;
667 }
668
669 dep = to_dwc3_ep(ep);
670 dwc = dep->dwc;
671
672 if (!(dep->flags & DWC3_EP_ENABLED)) {
673 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
674 dep->name);
675 return 0;
676 }
677
678 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
679 dep->number >> 1,
680 (dep->number & 1) ? "in" : "out");
681
682 spin_lock_irqsave(&dwc->lock, flags);
683 ret = __dwc3_gadget_ep_disable(dep);
684 spin_unlock_irqrestore(&dwc->lock, flags);
685
686 return ret;
687}
688
689static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
690 gfp_t gfp_flags)
691{
692 struct dwc3_request *req;
693 struct dwc3_ep *dep = to_dwc3_ep(ep);
694
695 req = kzalloc(sizeof(*req), gfp_flags);
696 if (!req)
697 return NULL;
698
699 req->epnum = dep->number;
700 req->dep = dep;
701
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530702 return &req->request;
703}
704
705static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
706 struct usb_request *request)
707{
708 struct dwc3_request *req = to_dwc3_request(request);
709
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530710 kfree(req);
711}
712
713/**
714 * dwc3_prepare_one_trb - setup one TRB from one request
715 * @dep: endpoint for which this request is prepared
716 * @req: dwc3_request pointer
717 */
718static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
719 struct dwc3_request *req, dma_addr_t dma,
720 unsigned length, unsigned last, unsigned chain, unsigned node)
721{
722 struct dwc3 *dwc = dep->dwc;
723 struct dwc3_trb *trb;
724
725 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
726 dep->name, req, (unsigned long long) dma,
727 length, last ? " last" : "",
728 chain ? " chain" : "");
729
730
731 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
732
733 if (!req->trb) {
734 dwc3_gadget_move_request_queued(req);
735 req->trb = trb;
736 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
737 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
738 }
739
740 dep->free_slot++;
741 /* Skip the LINK-TRB on ISOC */
742 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
743 usb_endpoint_xfer_isoc(dep->endpoint.desc))
744 dep->free_slot++;
745
746 trb->size = DWC3_TRB_SIZE_LENGTH(length);
747 trb->bpl = lower_32_bits(dma);
748 trb->bph = upper_32_bits(dma);
749
750 switch (usb_endpoint_type(dep->endpoint.desc)) {
751 case USB_ENDPOINT_XFER_CONTROL:
752 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
753 break;
754
755 case USB_ENDPOINT_XFER_ISOC:
756 if (!node)
757 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
758 else
759 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
760 break;
761
762 case USB_ENDPOINT_XFER_BULK:
763 case USB_ENDPOINT_XFER_INT:
764 trb->ctrl = DWC3_TRBCTL_NORMAL;
765 break;
766 default:
767 /*
768 * This is only possible with faulty memory because we
769 * checked it already :)
770 */
771 BUG();
772 }
773
774 if (!req->request.no_interrupt && !chain)
775 trb->ctrl |= DWC3_TRB_CTRL_IOC;
776
777 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
778 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
779 trb->ctrl |= DWC3_TRB_CTRL_CSP;
780 } else if (last) {
781 trb->ctrl |= DWC3_TRB_CTRL_LST;
782 }
783
784 if (chain)
785 trb->ctrl |= DWC3_TRB_CTRL_CHN;
786
787 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
788 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
789
790 trb->ctrl |= DWC3_TRB_CTRL_HWO;
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +0530791}
792
793/*
794 * dwc3_prepare_trbs - setup TRBs from requests
795 * @dep: endpoint for which requests are being prepared
796 * @starting: true if the endpoint is idle and no requests are queued.
797 *
798 * The function goes through the requests list and sets up TRBs for the
799 * transfers. The function returns once there are no more TRBs available or
800 * it runs out of requests.
801 */
802static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
803{
804 struct dwc3_request *req, *n;
805 u32 trbs_left;
806 u32 max;
807 unsigned int last_one = 0;
808
809 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
810
811 /* the first request must not be queued */
812 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
813
814 /* Can't wrap around on a non-isoc EP since there's no link TRB */
815 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
816 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
817 if (trbs_left > max)
818 trbs_left = max;
819 }
820
821 /*
822 * If busy & slot are equal than it is either full or empty. If we are
823 * starting to process requests then we are empty. Otherwise we are
824 * full and don't do anything
825 */
826 if (!trbs_left) {
827 if (!starting)
828 return;
829 trbs_left = DWC3_TRB_NUM;
830 /*
831 * In case we start from scratch, we queue the ISOC requests
832 * starting from slot 1. This is done because we use ring
833 * buffer and have no LST bit to stop us. Instead, we place
834 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
835 * after the first request so we start at slot 1 and have
836 * 7 requests proceed before we hit the first IOC.
837 * Other transfer types don't use the ring buffer and are
838 * processed from the first TRB until the last one. Since we
839 * don't wrap around we have to start at the beginning.
840 */
841 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
842 dep->busy_slot = 1;
843 dep->free_slot = 1;
844 } else {
845 dep->busy_slot = 0;
846 dep->free_slot = 0;
847 }
848 }
849
850 /* The last TRB is a link TRB, not used for xfer */
851 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
852 return;
853
854 list_for_each_entry_safe(req, n, &dep->request_list, list) {
855 unsigned length;
856 dma_addr_t dma;
857 last_one = false;
858
859 if (req->request.num_mapped_sgs > 0) {
860 struct usb_request *request = &req->request;
861 struct scatterlist *sg = request->sg;
862 struct scatterlist *s;
863 int i;
864
865 for_each_sg(sg, s, request->num_mapped_sgs, i) {
866 unsigned chain = true;
867
868 length = sg_dma_len(s);
869 dma = sg_dma_address(s);
870
871 if (i == (request->num_mapped_sgs - 1) ||
872 sg_is_last(s)) {
873 if (list_is_last(&req->list,
874 &dep->request_list))
875 last_one = true;
876 chain = false;
877 }
878
879 trbs_left--;
880 if (!trbs_left)
881 last_one = true;
882
883 if (last_one)
884 chain = false;
885
886 dwc3_prepare_one_trb(dep, req, dma, length,
887 last_one, chain, i);
888
889 if (last_one)
890 break;
891 }
892 } else {
893 dma = req->request.dma;
894 length = req->request.length;
895 trbs_left--;
896
897 if (!trbs_left)
898 last_one = 1;
899
900 /* Is this the last request? */
901 if (list_is_last(&req->list, &dep->request_list))
902 last_one = 1;
903
904 dwc3_prepare_one_trb(dep, req, dma, length,
905 last_one, false, 0);
906
907 if (last_one)
908 break;
909 }
910 }
911}
912
913static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
914 int start_new)
915{
916 struct dwc3_gadget_ep_cmd_params params;
917 struct dwc3_request *req;
918 struct dwc3 *dwc = dep->dwc;
919 int ret;
920 u32 cmd;
921
922 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
923 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
924 return -EBUSY;
925 }
926 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
927
928 /*
929 * If we are getting here after a short-out-packet we don't enqueue any
930 * new requests as we try to set the IOC bit only on the last request.
931 */
932 if (start_new) {
933 if (list_empty(&dep->req_queued))
934 dwc3_prepare_trbs(dep, start_new);
935
936 /* req points to the first request which will be sent */
937 req = next_request(&dep->req_queued);
938 } else {
939 dwc3_prepare_trbs(dep, start_new);
940
941 /*
942 * req points to the first request where HWO changed from 0 to 1
943 */
944 req = next_request(&dep->req_queued);
945 }
946 if (!req) {
947 dep->flags |= DWC3_EP_PENDING_REQUEST;
948 return 0;
949 }
950
951 memset(&params, 0, sizeof(params));
952
953 if (start_new) {
954 params.param0 = upper_32_bits(req->trb_dma);
955 params.param1 = lower_32_bits(req->trb_dma);
956 cmd = DWC3_DEPCMD_STARTTRANSFER;
957 } else {
958 cmd = DWC3_DEPCMD_UPDATETRANSFER;
959 }
960
961 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
962 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
963 if (ret < 0) {
964 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
965
966 /*
967 * FIXME we need to iterate over the list of requests
968 * here and stop, unmap, free and del each of the linked
969 * requests instead of what we do now.
970 */
971 usb_gadget_unmap_request(&dwc->gadget, &req->request,
972 req->direction);
973 list_del(&req->list);
974 return ret;
975 }
976
977 dep->flags |= DWC3_EP_BUSY;
978
979 if (start_new) {
980 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
981 dep->number);
982 WARN_ON_ONCE(!dep->resource_index);
983 }
984
985 return 0;
986}
987
988static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
989 struct dwc3_ep *dep, u32 cur_uf)
990{
991 u32 uf;
992
993 if (list_empty(&dep->request_list)) {
994 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
995 dep->name);
996 dep->flags |= DWC3_EP_PENDING_REQUEST;
997 return;
998 }
999
1000 /* 4 micro frames in the future */
1001 uf = cur_uf + dep->interval * 4;
1002
1003 __dwc3_gadget_kick_transfer(dep, uf, 1);
1004}
1005
1006static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1007 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1008{
1009 u32 cur_uf, mask;
1010
1011 mask = ~(dep->interval - 1);
1012 cur_uf = event->parameters & mask;
1013
1014 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1015}
1016
1017static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1018{
1019 struct dwc3 *dwc = dep->dwc;
1020 int ret;
1021
1022 req->request.actual = 0;
1023 req->request.status = -EINPROGRESS;
1024 req->direction = dep->direction;
1025 req->epnum = dep->number;
1026
1027 /*
1028 * We only add to our list of requests now and
1029 * start consuming the list once we get XferNotReady
1030 * IRQ.
1031 *
1032 * That way, we avoid doing anything that we don't need
1033 * to do now and defer it until the point we receive a
1034 * particular token from the Host side.
1035 *
1036 * This will also avoid Host cancelling URBs due to too
1037 * many NAKs.
1038 */
1039 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1040 dep->direction);
1041 if (ret)
1042 return ret;
1043
1044 list_add_tail(&req->list, &dep->request_list);
1045
1046 /*
1047 * There are a few special cases:
1048 *
1049 * 1. XferNotReady with empty list of requests. We need to kick the
1050 * transfer here in that situation, otherwise we will be NAKing
1051 * forever. If we get XferNotReady before gadget driver has a
1052 * chance to queue a request, we will ACK the IRQ but won't be
1053 * able to receive the data until the next request is queued.
1054 * The following code is handling exactly that.
1055 *
1056 */
1057 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1058 /*
1059 * If xfernotready is already elapsed and it is a case
1060 * of isoc transfer, then issue END TRANSFER, so that
1061 * you can receive xfernotready again and can have
1062 * notion of current microframe.
1063 */
1064 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1065 if (list_empty(&dep->req_queued)) {
1066 dwc3_stop_active_transfer(dwc, dep->number, true);
1067 dep->flags = DWC3_EP_ENABLED;
1068 }
1069 return 0;
1070 }
1071
1072 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1073 if (ret && ret != -EBUSY)
1074 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1075 dep->name);
1076 return ret;
1077 }
1078
1079 /*
1080 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1081 * kick the transfer here after queuing a request, otherwise the
1082 * core may not see the modified TRB(s).
1083 */
1084 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1085 (dep->flags & DWC3_EP_BUSY) &&
1086 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1087 WARN_ON_ONCE(!dep->resource_index);
1088 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1089 false);
1090 if (ret && ret != -EBUSY)
1091 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1092 dep->name);
1093 return ret;
1094 }
1095
1096 /*
1097 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1098 * right away, otherwise host will not know we have streams to be
1099 * handled.
1100 */
1101 if (dep->stream_capable) {
1102 int ret;
1103
1104 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1105 if (ret && ret != -EBUSY) {
1106 struct dwc3 *dwc = dep->dwc;
1107
1108 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1109 dep->name);
1110 }
1111 }
1112
1113 return 0;
1114}
1115
1116static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1117 gfp_t gfp_flags)
1118{
1119 struct dwc3_request *req = to_dwc3_request(request);
1120 struct dwc3_ep *dep = to_dwc3_ep(ep);
1121 struct dwc3 *dwc = dep->dwc;
1122
1123 unsigned long flags;
1124
1125 int ret;
1126
1127 spin_lock_irqsave(&dwc->lock, flags);
1128 if (!dep->endpoint.desc) {
1129 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1130 request, ep->name);
1131 ret = -ESHUTDOWN;
1132 goto out;
1133 }
1134
1135 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1136 request, req->dep->name)) {
1137 ret = -EINVAL;
1138 goto out;
1139 }
1140
1141 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1142 request, ep->name, request->length);
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05301143
1144 ret = __dwc3_gadget_ep_queue(dep, req);
1145
1146out:
1147 spin_unlock_irqrestore(&dwc->lock, flags);
1148
1149 return ret;
1150}
1151
1152static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1153 struct usb_request *request)
1154{
1155 struct dwc3_request *req = to_dwc3_request(request);
1156 struct dwc3_request *r = NULL;
1157
1158 struct dwc3_ep *dep = to_dwc3_ep(ep);
1159 struct dwc3 *dwc = dep->dwc;
1160
1161 unsigned long flags;
1162 int ret = 0;
1163
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05301164 spin_lock_irqsave(&dwc->lock, flags);
1165
1166 list_for_each_entry(r, &dep->request_list, list) {
1167 if (r == req)
1168 break;
1169 }
1170
1171 if (r != req) {
1172 list_for_each_entry(r, &dep->req_queued, list) {
1173 if (r == req)
1174 break;
1175 }
1176 if (r == req) {
1177 /* wait until it is processed */
1178 dwc3_stop_active_transfer(dwc, dep->number, true);
1179 goto out1;
1180 }
1181 dev_err(dwc->dev, "request %p was not queued to %s\n",
1182 request, ep->name);
1183 ret = -EINVAL;
1184 goto out0;
1185 }
1186
1187out1:
1188 /* giveback the request */
1189 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1190
1191out0:
1192 spin_unlock_irqrestore(&dwc->lock, flags);
1193
1194 return ret;
1195}
1196
1197int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1198{
1199 struct dwc3_gadget_ep_cmd_params params;
1200 struct dwc3 *dwc = dep->dwc;
1201 int ret;
1202
1203 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1204 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1205 return -EINVAL;
1206 }
1207
1208 memset(&params, 0x00, sizeof(params));
1209
1210 if (value) {
1211 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1212 (!list_empty(&dep->req_queued) ||
1213 !list_empty(&dep->request_list)))) {
1214 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1215 dep->name);
1216 return -EAGAIN;
1217 }
1218
1219 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1220 DWC3_DEPCMD_SETSTALL, &params);
1221 if (ret)
1222 dev_err(dwc->dev, "failed to set STALL on %s\n",
1223 dep->name);
1224 else
1225 dep->flags |= DWC3_EP_STALL;
1226 } else {
1227 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1228 DWC3_DEPCMD_CLEARSTALL, &params);
1229 if (ret)
1230 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1231 dep->name);
1232 else
1233 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1234 }
1235
1236 return ret;
1237}
1238
1239static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1240{
1241 struct dwc3_ep *dep = to_dwc3_ep(ep);
1242 struct dwc3 *dwc = dep->dwc;
1243
1244 unsigned long flags;
1245
1246 int ret;
1247
1248 spin_lock_irqsave(&dwc->lock, flags);
1249 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1250 spin_unlock_irqrestore(&dwc->lock, flags);
1251
1252 return ret;
1253}
1254
1255static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1256{
1257 struct dwc3_ep *dep = to_dwc3_ep(ep);
1258 struct dwc3 *dwc = dep->dwc;
1259 unsigned long flags;
1260 int ret;
1261
1262 spin_lock_irqsave(&dwc->lock, flags);
1263 dep->flags |= DWC3_EP_WEDGE;
1264
1265 if (dep->number == 0 || dep->number == 1)
1266 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1267 else
1268 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1269 spin_unlock_irqrestore(&dwc->lock, flags);
1270
1271 return ret;
1272}
1273
1274/* -------------------------------------------------------------------------- */
1275
1276static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1277 .bLength = USB_DT_ENDPOINT_SIZE,
1278 .bDescriptorType = USB_DT_ENDPOINT,
1279 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1280};
1281
1282static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1283 .enable = dwc3_gadget_ep0_enable,
1284 .disable = dwc3_gadget_ep0_disable,
1285 .alloc_request = dwc3_gadget_ep_alloc_request,
1286 .free_request = dwc3_gadget_ep_free_request,
1287 .queue = dwc3_gadget_ep0_queue,
1288 .dequeue = dwc3_gadget_ep_dequeue,
1289 .set_halt = dwc3_gadget_ep0_set_halt,
1290 .set_wedge = dwc3_gadget_ep_set_wedge,
1291};
1292
1293static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1294 .enable = dwc3_gadget_ep_enable,
1295 .disable = dwc3_gadget_ep_disable,
1296 .alloc_request = dwc3_gadget_ep_alloc_request,
1297 .free_request = dwc3_gadget_ep_free_request,
1298 .queue = dwc3_gadget_ep_queue,
1299 .dequeue = dwc3_gadget_ep_dequeue,
1300 .set_halt = dwc3_gadget_ep_set_halt,
1301 .set_wedge = dwc3_gadget_ep_set_wedge,
1302};
1303
1304/* -------------------------------------------------------------------------- */
1305
1306static int dwc3_gadget_get_frame(struct usb_gadget *g)
1307{
1308 struct dwc3 *dwc = gadget_to_dwc(g);
1309 u32 reg;
1310
1311 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1312 return DWC3_DSTS_SOFFN(reg);
1313}
1314
1315static int dwc3_gadget_wakeup(struct usb_gadget *g)
1316{
1317 struct dwc3 *dwc = gadget_to_dwc(g);
1318
1319 unsigned long timeout;
1320 unsigned long flags;
1321
1322 u32 reg;
1323
1324 int ret = 0;
1325
1326 u8 link_state;
1327 u8 speed;
1328
1329 spin_lock_irqsave(&dwc->lock, flags);
1330
1331 /*
1332 * According to the Databook Remote wakeup request should
1333 * be issued only when the device is in early suspend state.
1334 *
1335 * We can check that via USB Link State bits in DSTS register.
1336 */
1337 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1338
1339 speed = reg & DWC3_DSTS_CONNECTSPD;
1340 if (speed == DWC3_DSTS_SUPERSPEED) {
1341 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1342 ret = -EINVAL;
1343 goto out;
1344 }
1345
1346 link_state = DWC3_DSTS_USBLNKST(reg);
1347
1348 switch (link_state) {
1349 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1350 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1351 break;
1352 default:
1353 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1354 link_state);
1355 ret = -EINVAL;
1356 goto out;
1357 }
1358
1359 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1360 if (ret < 0) {
1361 dev_err(dwc->dev, "failed to put link in Recovery\n");
1362 goto out;
1363 }
1364
1365 /* Recent versions do this automatically */
1366 if (dwc->revision < DWC3_REVISION_194A) {
1367 /* write zeroes to Link Change Request */
1368 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1369 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1370 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1371 }
1372
1373 /* poll until Link State changes to ON */
1374 timeout = jiffies + msecs_to_jiffies(100);
1375
1376 while (!time_after(jiffies, timeout)) {
1377 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1378
1379 /* in HS, means ON */
1380 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1381 break;
1382 }
1383
1384 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1385 dev_err(dwc->dev, "failed to send remote wakeup\n");
1386 ret = -EINVAL;
1387 }
1388
1389out:
1390 spin_unlock_irqrestore(&dwc->lock, flags);
1391
1392 return ret;
1393}
1394
1395static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1396 int is_selfpowered)
1397{
1398 struct dwc3 *dwc = gadget_to_dwc(g);
1399 unsigned long flags;
1400
1401 spin_lock_irqsave(&dwc->lock, flags);
1402 dwc->is_selfpowered = !!is_selfpowered;
1403 spin_unlock_irqrestore(&dwc->lock, flags);
1404
1405 return 0;
1406}
1407
1408static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1409{
1410 u32 reg;
1411 u32 timeout = 500;
1412
1413 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1414 if (is_on) {
1415 if (dwc->revision <= DWC3_REVISION_187A) {
1416 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1417 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1418 }
1419
1420 if (dwc->revision >= DWC3_REVISION_194A)
1421 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1422 reg |= DWC3_DCTL_RUN_STOP;
1423
1424 if (dwc->has_hibernation)
1425 reg |= DWC3_DCTL_KEEP_CONNECT;
1426
1427 dwc->pullups_connected = true;
1428 } else {
1429 reg &= ~DWC3_DCTL_RUN_STOP;
1430
1431 if (dwc->has_hibernation && !suspend)
1432 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1433
1434 dwc->pullups_connected = false;
1435 }
1436
1437 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1438
1439 do {
1440 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1441 if (is_on) {
1442 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1443 break;
1444 } else {
1445 if (reg & DWC3_DSTS_DEVCTRLHLT)
1446 break;
1447 }
1448 timeout--;
1449 if (!timeout)
1450 return -ETIMEDOUT;
1451 udelay(1);
1452 } while (1);
1453
1454 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1455 dwc->gadget_driver
1456 ? dwc->gadget_driver->function : "no-function",
1457 is_on ? "connect" : "disconnect");
1458
1459 return 0;
1460}
1461
1462static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1463{
1464 struct dwc3 *dwc = gadget_to_dwc(g);
1465 unsigned long flags;
1466 int ret;
1467
1468 is_on = !!is_on;
1469
1470 spin_lock_irqsave(&dwc->lock, flags);
1471 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1472 spin_unlock_irqrestore(&dwc->lock, flags);
1473
1474 return ret;
1475}
1476
1477static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1478{
1479 u32 reg;
1480
1481 /* Enable all but Start and End of Frame IRQs */
1482 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1483 DWC3_DEVTEN_EVNTOVERFLOWEN |
1484 DWC3_DEVTEN_CMDCMPLTEN |
1485 DWC3_DEVTEN_ERRTICERREN |
1486 DWC3_DEVTEN_WKUPEVTEN |
1487 DWC3_DEVTEN_ULSTCNGEN |
1488 DWC3_DEVTEN_CONNECTDONEEN |
1489 DWC3_DEVTEN_USBRSTEN |
1490 DWC3_DEVTEN_DISCONNEVTEN);
1491
1492 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1493}
1494
1495static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1496{
1497 /* mask all interrupts */
1498 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1499}
1500
1501static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1502static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1503
1504static int dwc3_gadget_start(struct usb_gadget *g,
1505 struct usb_gadget_driver *driver)
1506{
1507 struct dwc3 *dwc = gadget_to_dwc(g);
1508 struct dwc3_ep *dep;
1509 unsigned long flags;
1510 int ret = 0;
1511 int irq;
1512 u32 reg;
1513
1514 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1515 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1516 IRQF_SHARED, "dwc3", dwc);
1517 if (ret) {
1518 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1519 irq, ret);
1520 goto err0;
1521 }
1522
1523 spin_lock_irqsave(&dwc->lock, flags);
1524
1525 if (dwc->gadget_driver) {
1526 dev_err(dwc->dev, "%s is already bound to %s\n",
1527 dwc->gadget.name,
1528 dwc->gadget_driver->driver.name);
1529 ret = -EBUSY;
1530 goto err1;
1531 }
1532
1533 dwc->gadget_driver = driver;
1534
1535 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1536 reg &= ~(DWC3_DCFG_SPEED_MASK);
1537
1538 /**
1539 * WORKAROUND: DWC3 revision < 2.20a have an issue
1540 * which would cause metastability state on Run/Stop
1541 * bit if we try to force the IP to USB2-only mode.
1542 *
1543 * Because of that, we cannot configure the IP to any
1544 * speed other than the SuperSpeed
1545 *
1546 * Refers to:
1547 *
1548 * STAR#9000525659: Clock Domain Crossing on DCTL in
1549 * USB 2.0 Mode
1550 */
1551 if (dwc->revision < DWC3_REVISION_220A) {
1552 reg |= DWC3_DCFG_SUPERSPEED;
1553 } else {
1554 switch (dwc->maximum_speed) {
1555 case USB_SPEED_LOW:
1556 reg |= DWC3_DSTS_LOWSPEED;
1557 break;
1558 case USB_SPEED_FULL:
1559 reg |= DWC3_DSTS_FULLSPEED1;
1560 break;
1561 case USB_SPEED_HIGH:
1562 reg |= DWC3_DSTS_HIGHSPEED;
1563 break;
1564 case USB_SPEED_SUPER: /* FALLTHROUGH */
1565 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1566 default:
1567 reg |= DWC3_DSTS_SUPERSPEED;
1568 }
1569 }
1570 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1571
1572 dwc->start_config_issued = false;
1573
1574 /* Start with SuperSpeed Default */
1575 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1576
1577 dep = dwc->eps[0];
1578 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1579 false);
1580 if (ret) {
1581 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1582 goto err2;
1583 }
1584
1585 dep = dwc->eps[1];
1586 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1587 false);
1588 if (ret) {
1589 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1590 goto err3;
1591 }
1592
1593 /* begin to receive SETUP packets */
1594 dwc->ep0state = EP0_SETUP_PHASE;
1595 dwc3_ep0_out_start(dwc);
1596
1597 dwc3_gadget_enable_irq(dwc);
1598
1599 spin_unlock_irqrestore(&dwc->lock, flags);
1600
1601 return 0;
1602
1603err3:
1604 __dwc3_gadget_ep_disable(dwc->eps[0]);
1605
1606err2:
1607 dwc->gadget_driver = NULL;
1608
1609err1:
1610 spin_unlock_irqrestore(&dwc->lock, flags);
1611
1612 free_irq(irq, dwc);
1613
1614err0:
1615 return ret;
1616}
1617
1618static int dwc3_gadget_stop(struct usb_gadget *g)
1619{
1620 struct dwc3 *dwc = gadget_to_dwc(g);
1621 unsigned long flags;
1622 int irq;
1623
1624 spin_lock_irqsave(&dwc->lock, flags);
1625
1626 dwc3_gadget_disable_irq(dwc);
1627 __dwc3_gadget_ep_disable(dwc->eps[0]);
1628 __dwc3_gadget_ep_disable(dwc->eps[1]);
1629
1630 dwc->gadget_driver = NULL;
1631
1632 spin_unlock_irqrestore(&dwc->lock, flags);
1633
1634 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1635 free_irq(irq, dwc);
1636
1637 return 0;
1638}
1639
1640static const struct usb_gadget_ops dwc3_gadget_ops = {
1641 .get_frame = dwc3_gadget_get_frame,
1642 .wakeup = dwc3_gadget_wakeup,
1643 .set_selfpowered = dwc3_gadget_set_selfpowered,
1644 .pullup = dwc3_gadget_pullup,
1645 .udc_start = dwc3_gadget_start,
1646 .udc_stop = dwc3_gadget_stop,
1647};
1648
1649/* -------------------------------------------------------------------------- */
1650
1651static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1652 u8 num, u32 direction)
1653{
1654 struct dwc3_ep *dep;
1655 u8 i;
1656
1657 for (i = 0; i < num; i++) {
1658 u8 epnum = (i << 1) | (!!direction);
1659
1660 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1661 if (!dep)
1662 return -ENOMEM;
1663
1664 dep->dwc = dwc;
1665 dep->number = epnum;
1666 dep->direction = !!direction;
1667 dwc->eps[epnum] = dep;
1668
1669 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1670 (epnum & 1) ? "in" : "out");
1671
1672 dep->endpoint.name = dep->name;
1673
1674 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1675
1676 if (epnum == 0 || epnum == 1) {
1677 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1678 dep->endpoint.maxburst = 1;
1679 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1680 if (!epnum)
1681 dwc->gadget.ep0 = &dep->endpoint;
1682 } else {
1683 int ret;
1684
1685 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1686 dep->endpoint.max_streams = 15;
1687 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1688 list_add_tail(&dep->endpoint.ep_list,
1689 &dwc->gadget.ep_list);
1690
1691 ret = dwc3_alloc_trb_pool(dep);
1692 if (ret)
1693 return ret;
1694 }
1695
1696 INIT_LIST_HEAD(&dep->request_list);
1697 INIT_LIST_HEAD(&dep->req_queued);
1698 }
1699
1700 return 0;
1701}
1702
1703static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1704{
1705 int ret;
1706
1707 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1708
1709 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1710 if (ret < 0) {
1711 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1712 return ret;
1713 }
1714
1715 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1716 if (ret < 0) {
1717 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1718 return ret;
1719 }
1720
1721 return 0;
1722}
1723
1724static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1725{
1726 struct dwc3_ep *dep;
1727 u8 epnum;
1728
1729 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1730 dep = dwc->eps[epnum];
1731 if (!dep)
1732 continue;
1733 /*
1734 * Physical endpoints 0 and 1 are special; they form the
1735 * bi-directional USB endpoint 0.
1736 *
1737 * For those two physical endpoints, we don't allocate a TRB
1738 * pool nor do we add them the endpoints list. Due to that, we
1739 * shouldn't do these two operations otherwise we would end up
1740 * with all sorts of bugs when removing dwc3.ko.
1741 */
1742 if (epnum != 0 && epnum != 1) {
1743 dwc3_free_trb_pool(dep);
1744 list_del(&dep->endpoint.ep_list);
1745 }
1746
1747 kfree(dep);
1748 }
1749}
1750
1751/* -------------------------------------------------------------------------- */
1752
1753static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1754 struct dwc3_request *req, struct dwc3_trb *trb,
1755 const struct dwc3_event_depevt *event, int status)
1756{
1757 unsigned int count;
1758 unsigned int s_pkt = 0;
1759 unsigned int trb_status;
1760
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05301761 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1762 /*
1763 * We continue despite the error. There is not much we
1764 * can do. If we don't clean it up we loop forever. If
1765 * we skip the TRB then it gets overwritten after a
1766 * while since we use them in a ring buffer. A BUG()
1767 * would help. Lets hope that if this occurs, someone
1768 * fixes the root cause instead of looking away :)
1769 */
1770 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1771 dep->name, trb);
1772 count = trb->size & DWC3_TRB_SIZE_MASK;
1773
1774 if (dep->direction) {
1775 if (count) {
1776 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1777 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1778 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1779 dep->name);
1780 /*
1781 * If missed isoc occurred and there is
1782 * no request queued then issue END
1783 * TRANSFER, so that core generates
1784 * next xfernotready and we will issue
1785 * a fresh START TRANSFER.
1786 * If there are still queued request
1787 * then wait, do not issue either END
1788 * or UPDATE TRANSFER, just attach next
1789 * request in request_list during
1790 * giveback.If any future queued request
1791 * is successfully transferred then we
1792 * will issue UPDATE TRANSFER for all
1793 * request in the request_list.
1794 */
1795 dep->flags |= DWC3_EP_MISSED_ISOC;
1796 } else {
1797 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1798 dep->name);
1799 status = -ECONNRESET;
1800 }
1801 } else {
1802 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1803 }
1804 } else {
1805 if (count && (event->status & DEPEVT_STATUS_SHORT))
1806 s_pkt = 1;
1807 }
1808
1809 /*
1810 * We assume here we will always receive the entire data block
1811 * which we should receive. Meaning, if we program RX to
1812 * receive 4K but we receive only 2K, we assume that's all we
1813 * should receive and we simply bounce the request back to the
1814 * gadget driver for further processing.
1815 */
1816 req->request.actual += req->request.length - count;
1817 if (s_pkt)
1818 return 1;
1819 if ((event->status & DEPEVT_STATUS_LST) &&
1820 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1821 DWC3_TRB_CTRL_HWO)))
1822 return 1;
1823 if ((event->status & DEPEVT_STATUS_IOC) &&
1824 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1825 return 1;
1826 return 0;
1827}
1828
1829static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1830 const struct dwc3_event_depevt *event, int status)
1831{
1832 struct dwc3_request *req;
1833 struct dwc3_trb *trb;
1834 unsigned int slot;
1835 unsigned int i;
1836 int ret;
1837
1838 do {
1839 req = next_request(&dep->req_queued);
1840 if (!req) {
1841 WARN_ON_ONCE(1);
1842 return 1;
1843 }
1844 i = 0;
1845 do {
1846 slot = req->start_slot + i;
1847 if ((slot == DWC3_TRB_NUM - 1) &&
1848 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1849 slot++;
1850 slot %= DWC3_TRB_NUM;
1851 trb = &dep->trb_pool[slot];
1852
1853 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1854 event, status);
1855 if (ret)
1856 break;
1857 }while (++i < req->request.num_mapped_sgs);
1858
1859 dwc3_gadget_giveback(dep, req, status);
1860
1861 if (ret)
1862 break;
1863 } while (1);
1864
1865 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1866 list_empty(&dep->req_queued)) {
1867 if (list_empty(&dep->request_list)) {
1868 /*
1869 * If there is no entry in request list then do
1870 * not issue END TRANSFER now. Just set PENDING
1871 * flag, so that END TRANSFER is issued when an
1872 * entry is added into request list.
1873 */
1874 dep->flags = DWC3_EP_PENDING_REQUEST;
1875 } else {
1876 dwc3_stop_active_transfer(dwc, dep->number, true);
1877 dep->flags = DWC3_EP_ENABLED;
1878 }
1879 return 1;
1880 }
1881
1882 return 1;
1883}
1884
1885static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1886 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1887{
1888 unsigned status = 0;
1889 int clean_busy;
1890
1891 if (event->status & DEPEVT_STATUS_BUSERR)
1892 status = -ECONNRESET;
1893
1894 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1895 if (clean_busy)
1896 dep->flags &= ~DWC3_EP_BUSY;
1897
1898 /*
1899 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1900 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1901 */
1902 if (dwc->revision < DWC3_REVISION_183A) {
1903 u32 reg;
1904 int i;
1905
1906 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1907 dep = dwc->eps[i];
1908
1909 if (!(dep->flags & DWC3_EP_ENABLED))
1910 continue;
1911
1912 if (!list_empty(&dep->req_queued))
1913 return;
1914 }
1915
1916 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1917 reg |= dwc->u1u2;
1918 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1919
1920 dwc->u1u2 = 0;
1921 }
1922}
1923
1924static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1925 const struct dwc3_event_depevt *event)
1926{
1927 struct dwc3_ep *dep;
1928 u8 epnum = event->endpoint_number;
1929
1930 dep = dwc->eps[epnum];
1931
1932 if (!(dep->flags & DWC3_EP_ENABLED))
1933 return;
1934
1935 if (epnum == 0 || epnum == 1) {
1936 dwc3_ep0_interrupt(dwc, event);
1937 return;
1938 }
1939
1940 switch (event->endpoint_event) {
1941 case DWC3_DEPEVT_XFERCOMPLETE:
1942 dep->resource_index = 0;
1943
1944 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1945 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1946 dep->name);
1947 return;
1948 }
1949
1950 dwc3_endpoint_transfer_complete(dwc, dep, event);
1951 break;
1952 case DWC3_DEPEVT_XFERINPROGRESS:
1953 dwc3_endpoint_transfer_complete(dwc, dep, event);
1954 break;
1955 case DWC3_DEPEVT_XFERNOTREADY:
1956 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1957 dwc3_gadget_start_isoc(dwc, dep, event);
1958 } else {
1959 int ret;
1960
1961 dev_vdbg(dwc->dev, "%s: reason %s\n",
1962 dep->name, event->status &
1963 DEPEVT_STATUS_TRANSFER_ACTIVE
1964 ? "Transfer Active"
1965 : "Transfer Not Active");
1966
1967 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1968 if (!ret || ret == -EBUSY)
1969 return;
1970
1971 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1972 dep->name);
1973 }
1974
1975 break;
1976 case DWC3_DEPEVT_STREAMEVT:
1977 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1978 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1979 dep->name);
1980 return;
1981 }
1982
1983 switch (event->status) {
1984 case DEPEVT_STREAMEVT_FOUND:
1985 dev_vdbg(dwc->dev, "Stream %d found and started\n",
1986 event->parameters);
1987
1988 break;
1989 case DEPEVT_STREAMEVT_NOTFOUND:
1990 /* FALLTHROUGH */
1991 default:
1992 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1993 }
1994 break;
1995 case DWC3_DEPEVT_RXTXFIFOEVT:
1996 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1997 break;
1998 case DWC3_DEPEVT_EPCMDCMPLT:
1999 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
2000 break;
2001 }
2002}
2003
2004static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2005{
2006 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2007 spin_unlock(&dwc->lock);
2008 dwc->gadget_driver->disconnect(&dwc->gadget);
2009 spin_lock(&dwc->lock);
2010 }
2011}
2012
2013static void dwc3_suspend_gadget(struct dwc3 *dwc)
2014{
2015 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2016 spin_unlock(&dwc->lock);
2017 dwc->gadget_driver->suspend(&dwc->gadget);
2018 spin_lock(&dwc->lock);
2019 }
2020}
2021
2022static void dwc3_resume_gadget(struct dwc3 *dwc)
2023{
2024 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2025 spin_unlock(&dwc->lock);
2026 dwc->gadget_driver->resume(&dwc->gadget);
2027 }
2028}
2029
2030static void dwc3_reset_gadget(struct dwc3 *dwc)
2031{
2032 if (!dwc->gadget_driver)
2033 return;
2034
2035 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2036 spin_unlock(&dwc->lock);
2037 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2038 spin_lock(&dwc->lock);
2039 }
2040}
2041
2042static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2043{
2044 struct dwc3_ep *dep;
2045 struct dwc3_gadget_ep_cmd_params params;
2046 u32 cmd;
2047 int ret;
2048
2049 dep = dwc->eps[epnum];
2050
2051 if (!dep->resource_index)
2052 return;
2053
2054 /*
2055 * NOTICE: We are violating what the Databook says about the
2056 * EndTransfer command. Ideally we would _always_ wait for the
2057 * EndTransfer Command Completion IRQ, but that's causing too
2058 * much trouble synchronizing between us and gadget driver.
2059 *
2060 * We have discussed this with the IP Provider and it was
2061 * suggested to giveback all requests here, but give HW some
2062 * extra time to synchronize with the interconnect. We're using
2063 * an arbitraty 100us delay for that.
2064 *
2065 * Note also that a similar handling was tested by Synopsys
2066 * (thanks a lot Paul) and nothing bad has come out of it.
2067 * In short, what we're doing is:
2068 *
2069 * - Issue EndTransfer WITH CMDIOC bit set
2070 * - Wait 100us
2071 */
2072
2073 cmd = DWC3_DEPCMD_ENDTRANSFER;
2074 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2075 cmd |= DWC3_DEPCMD_CMDIOC;
2076 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2077 memset(&params, 0, sizeof(params));
2078 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2079 WARN_ON_ONCE(ret);
2080 dep->resource_index = 0;
2081 dep->flags &= ~DWC3_EP_BUSY;
2082 udelay(100);
2083}
2084
2085static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2086{
2087 u32 epnum;
2088
2089 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2090 struct dwc3_ep *dep;
2091
2092 dep = dwc->eps[epnum];
2093 if (!dep)
2094 continue;
2095
2096 if (!(dep->flags & DWC3_EP_ENABLED))
2097 continue;
2098
2099 dwc3_remove_requests(dwc, dep);
2100 }
2101}
2102
2103static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2104{
2105 u32 epnum;
2106
2107 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2108 struct dwc3_ep *dep;
2109 struct dwc3_gadget_ep_cmd_params params;
2110 int ret;
2111
2112 dep = dwc->eps[epnum];
2113 if (!dep)
2114 continue;
2115
2116 if (!(dep->flags & DWC3_EP_STALL))
2117 continue;
2118
2119 dep->flags &= ~DWC3_EP_STALL;
2120
2121 memset(&params, 0, sizeof(params));
2122 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2123 DWC3_DEPCMD_CLEARSTALL, &params);
2124 WARN_ON_ONCE(ret);
2125 }
2126}
2127
2128static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2129{
2130 int reg;
2131
2132 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2133 reg &= ~DWC3_DCTL_INITU1ENA;
2134 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2135
2136 reg &= ~DWC3_DCTL_INITU2ENA;
2137 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2138
2139 dwc3_disconnect_gadget(dwc);
2140 dwc->start_config_issued = false;
2141
2142 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2143 dwc->setup_packet_pending = false;
2144 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2145}
2146
2147static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2148{
2149 u32 reg;
2150
2151 /*
2152 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2153 * would cause a missing Disconnect Event if there's a
2154 * pending Setup Packet in the FIFO.
2155 *
2156 * There's no suggested workaround on the official Bug
2157 * report, which states that "unless the driver/application
2158 * is doing any special handling of a disconnect event,
2159 * there is no functional issue".
2160 *
2161 * Unfortunately, it turns out that we _do_ some special
2162 * handling of a disconnect event, namely complete all
2163 * pending transfers, notify gadget driver of the
2164 * disconnection, and so on.
2165 *
2166 * Our suggested workaround is to follow the Disconnect
2167 * Event steps here, instead, based on a setup_packet_pending
2168 * flag. Such flag gets set whenever we have a XferNotReady
2169 * event on EP0 and gets cleared on XferComplete for the
2170 * same endpoint.
2171 *
2172 * Refers to:
2173 *
2174 * STAR#9000466709: RTL: Device : Disconnect event not
2175 * generated if setup packet pending in FIFO
2176 */
2177 if (dwc->revision < DWC3_REVISION_188A) {
2178 if (dwc->setup_packet_pending)
2179 dwc3_gadget_disconnect_interrupt(dwc);
2180 }
2181
2182 dwc3_reset_gadget(dwc);
2183
2184 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2185 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2186 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2187 dwc->test_mode = false;
2188
2189 dwc3_stop_active_transfers(dwc);
2190 dwc3_clear_stall_all_ep(dwc);
2191 dwc->start_config_issued = false;
2192
2193 /* Reset device address to zero */
2194 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2195 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2196 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2197}
2198
2199static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2200{
2201 u32 reg;
2202 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2203
2204 /*
2205 * We change the clock only at SS but I dunno why I would want to do
2206 * this. Maybe it becomes part of the power saving plan.
2207 */
2208
2209 if (speed != DWC3_DSTS_SUPERSPEED)
2210 return;
2211
2212 /*
2213 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2214 * each time on Connect Done.
2215 */
2216 if (!usb30_clock)
2217 return;
2218
2219 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2220 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2221 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2222}
2223
2224static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2225{
2226 struct dwc3_ep *dep;
2227 int ret;
2228 u32 reg;
2229 u8 speed;
2230
2231 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2232 speed = reg & DWC3_DSTS_CONNECTSPD;
2233 dwc->speed = speed;
2234
2235 dwc3_update_ram_clk_sel(dwc, speed);
2236
2237 switch (speed) {
2238 case DWC3_DCFG_SUPERSPEED:
2239 /*
2240 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2241 * would cause a missing USB3 Reset event.
2242 *
2243 * In such situations, we should force a USB3 Reset
2244 * event by calling our dwc3_gadget_reset_interrupt()
2245 * routine.
2246 *
2247 * Refers to:
2248 *
2249 * STAR#9000483510: RTL: SS : USB3 reset event may
2250 * not be generated always when the link enters poll
2251 */
2252 if (dwc->revision < DWC3_REVISION_190A)
2253 dwc3_gadget_reset_interrupt(dwc);
2254
2255 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2256 dwc->gadget.ep0->maxpacket = 512;
2257 dwc->gadget.speed = USB_SPEED_SUPER;
2258 break;
2259 case DWC3_DCFG_HIGHSPEED:
2260 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2261 dwc->gadget.ep0->maxpacket = 64;
2262 dwc->gadget.speed = USB_SPEED_HIGH;
2263 break;
2264 case DWC3_DCFG_FULLSPEED2:
2265 case DWC3_DCFG_FULLSPEED1:
2266 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2267 dwc->gadget.ep0->maxpacket = 64;
2268 dwc->gadget.speed = USB_SPEED_FULL;
2269 break;
2270 case DWC3_DCFG_LOWSPEED:
2271 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2272 dwc->gadget.ep0->maxpacket = 8;
2273 dwc->gadget.speed = USB_SPEED_LOW;
2274 break;
2275 }
2276
2277 /* Enable USB2 LPM Capability */
2278
2279 if ((dwc->revision > DWC3_REVISION_194A)
2280 && (speed != DWC3_DCFG_SUPERSPEED)) {
2281 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2282 reg |= DWC3_DCFG_LPM_CAP;
2283 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2284
2285 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2286 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2287
2288 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2289
2290 /*
2291 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2292 * DCFG.LPMCap is set, core responses with an ACK and the
2293 * BESL value in the LPM token is less than or equal to LPM
2294 * NYET threshold.
2295 */
2296 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2297 && dwc->has_lpm_erratum,
2298 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2299
2300 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2301 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2302
2303 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2304 } else {
2305 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2306 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2307 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2308 }
2309
2310 dep = dwc->eps[0];
2311 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2312 false);
2313 if (ret) {
2314 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2315 return;
2316 }
2317
2318 dep = dwc->eps[1];
2319 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2320 false);
2321 if (ret) {
2322 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2323 return;
2324 }
2325
2326 /*
2327 * Configure PHY via GUSB3PIPECTLn if required.
2328 *
2329 * Update GTXFIFOSIZn
2330 *
2331 * In both cases reset values should be sufficient.
2332 */
2333}
2334
2335static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2336{
2337 /*
2338 * TODO take core out of low power mode when that's
2339 * implemented.
2340 */
2341
2342 dwc->gadget_driver->resume(&dwc->gadget);
2343}
2344
2345static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2346 unsigned int evtinfo)
2347{
2348 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2349 unsigned int pwropt;
2350
2351 /*
2352 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2353 * Hibernation mode enabled which would show up when device detects
2354 * host-initiated U3 exit.
2355 *
2356 * In that case, device will generate a Link State Change Interrupt
2357 * from U3 to RESUME which is only necessary if Hibernation is
2358 * configured in.
2359 *
2360 * There are no functional changes due to such spurious event and we
2361 * just need to ignore it.
2362 *
2363 * Refers to:
2364 *
2365 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2366 * operational mode
2367 */
2368 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2369 if ((dwc->revision < DWC3_REVISION_250A) &&
2370 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2371 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2372 (next == DWC3_LINK_STATE_RESUME)) {
2373 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2374 return;
2375 }
2376 }
2377
2378 /*
2379 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2380 * on the link partner, the USB session might do multiple entry/exit
2381 * of low power states before a transfer takes place.
2382 *
2383 * Due to this problem, we might experience lower throughput. The
2384 * suggested workaround is to disable DCTL[12:9] bits if we're
2385 * transitioning from U1/U2 to U0 and enable those bits again
2386 * after a transfer completes and there are no pending transfers
2387 * on any of the enabled endpoints.
2388 *
2389 * This is the first half of that workaround.
2390 *
2391 * Refers to:
2392 *
2393 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2394 * core send LGO_Ux entering U0
2395 */
2396 if (dwc->revision < DWC3_REVISION_183A) {
2397 if (next == DWC3_LINK_STATE_U0) {
2398 u32 u1u2;
2399 u32 reg;
2400
2401 switch (dwc->link_state) {
2402 case DWC3_LINK_STATE_U1:
2403 case DWC3_LINK_STATE_U2:
2404 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2405 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2406 | DWC3_DCTL_ACCEPTU2ENA
2407 | DWC3_DCTL_INITU1ENA
2408 | DWC3_DCTL_ACCEPTU1ENA);
2409
2410 if (!dwc->u1u2)
2411 dwc->u1u2 = reg & u1u2;
2412
2413 reg &= ~u1u2;
2414
2415 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2416 break;
2417 default:
2418 /* do nothing */
2419 break;
2420 }
2421 }
2422 }
2423
2424 switch (next) {
2425 case DWC3_LINK_STATE_U1:
2426 if (dwc->speed == USB_SPEED_SUPER)
2427 dwc3_suspend_gadget(dwc);
2428 break;
2429 case DWC3_LINK_STATE_U2:
2430 case DWC3_LINK_STATE_U3:
2431 dwc3_suspend_gadget(dwc);
2432 break;
2433 case DWC3_LINK_STATE_RESUME:
2434 dwc3_resume_gadget(dwc);
2435 break;
2436 default:
2437 /* do nothing */
2438 break;
2439 }
2440
2441 dwc->link_state = next;
2442}
2443
2444static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2445 unsigned int evtinfo)
2446{
2447 unsigned int is_ss = evtinfo & BIT(4);
2448
2449 /**
2450 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2451 * have a known issue which can cause USB CV TD.9.23 to fail
2452 * randomly.
2453 *
2454 * Because of this issue, core could generate bogus hibernation
2455 * events which SW needs to ignore.
2456 *
2457 * Refers to:
2458 *
2459 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2460 * Device Fallback from SuperSpeed
2461 */
2462 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2463 return;
2464
2465 /* enter hibernation here */
2466}
2467
2468static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2469 const struct dwc3_event_devt *event)
2470{
2471 switch (event->type) {
2472 case DWC3_DEVICE_EVENT_DISCONNECT:
2473 dwc3_gadget_disconnect_interrupt(dwc);
2474 break;
2475 case DWC3_DEVICE_EVENT_RESET:
2476 dwc3_gadget_reset_interrupt(dwc);
2477 break;
2478 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2479 dwc3_gadget_conndone_interrupt(dwc);
2480 break;
2481 case DWC3_DEVICE_EVENT_WAKEUP:
2482 dwc3_gadget_wakeup_interrupt(dwc);
2483 break;
2484 case DWC3_DEVICE_EVENT_HIBER_REQ:
2485 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2486 "unexpected hibernation event\n"))
2487 break;
2488
2489 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2490 break;
2491 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2492 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2493 break;
2494 case DWC3_DEVICE_EVENT_EOPF:
2495 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2496 break;
2497 case DWC3_DEVICE_EVENT_SOF:
2498 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2499 break;
2500 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2501 dev_vdbg(dwc->dev, "Erratic Error\n");
2502 break;
2503 case DWC3_DEVICE_EVENT_CMD_CMPL:
2504 dev_vdbg(dwc->dev, "Command Complete\n");
2505 break;
2506 case DWC3_DEVICE_EVENT_OVERFLOW:
2507 dev_vdbg(dwc->dev, "Overflow\n");
2508 break;
2509 default:
2510 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2511 }
2512}
2513
2514static void dwc3_process_event_entry(struct dwc3 *dwc,
2515 const union dwc3_event *event)
2516{
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05302517 /* Endpoint IRQ, handle it and return early */
2518 if (event->type.is_devspec == 0) {
2519 /* depevt */
2520 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2521 }
2522
2523 switch (event->type.type) {
2524 case DWC3_EVENT_TYPE_DEV:
2525 dwc3_gadget_interrupt(dwc, &event->devt);
2526 break;
2527 /* REVISIT what to do with Carkit and I2C events ? */
2528 default:
2529 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2530 }
2531}
2532
2533static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2534{
2535 struct dwc3_event_buffer *evt;
2536 irqreturn_t ret = IRQ_NONE;
2537 int left;
2538 u32 reg;
2539
2540 evt = dwc->ev_buffs[buf];
2541 left = evt->count;
2542
2543 if (!(evt->flags & DWC3_EVENT_PENDING))
2544 return IRQ_NONE;
2545
2546 while (left > 0) {
2547 union dwc3_event event;
2548
2549 event.raw = *(u32 *) (evt->buf + evt->lpos);
2550
2551 dwc3_process_event_entry(dwc, &event);
2552
2553 /*
2554 * FIXME we wrap around correctly to the next entry as
2555 * almost all entries are 4 bytes in size. There is one
2556 * entry which has 12 bytes which is a regular entry
2557 * followed by 8 bytes data. ATM I don't know how
2558 * things are organized if we get next to the a
2559 * boundary so I worry about that once we try to handle
2560 * that.
2561 */
2562 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2563 left -= 4;
2564
2565 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2566 }
2567
2568 evt->count = 0;
2569 evt->flags &= ~DWC3_EVENT_PENDING;
2570 ret = IRQ_HANDLED;
2571
2572 /* Unmask interrupt */
2573 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2574 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2575 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2576
2577 return ret;
2578}
2579
2580static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2581{
2582 struct dwc3 *dwc = _dwc;
2583 unsigned long flags;
2584 irqreturn_t ret = IRQ_NONE;
2585 int i;
2586
2587 spin_lock_irqsave(&dwc->lock, flags);
2588
2589 for (i = 0; i < dwc->num_event_buffers; i++)
2590 ret |= dwc3_process_event_buf(dwc, i);
2591
2592 spin_unlock_irqrestore(&dwc->lock, flags);
2593
2594 return ret;
2595}
2596
2597static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2598{
2599 struct dwc3_event_buffer *evt;
2600 u32 count;
2601 u32 reg;
2602
2603 evt = dwc->ev_buffs[buf];
2604
2605 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2606 count &= DWC3_GEVNTCOUNT_MASK;
2607 if (!count)
2608 return IRQ_NONE;
2609
2610 evt->count = count;
2611 evt->flags |= DWC3_EVENT_PENDING;
2612
2613 /* Mask interrupt */
2614 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2615 reg |= DWC3_GEVNTSIZ_INTMASK;
2616 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2617
2618 return IRQ_WAKE_THREAD;
2619}
2620
2621static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2622{
2623 struct dwc3 *dwc = _dwc;
2624 int i;
2625 irqreturn_t ret = IRQ_NONE;
2626
2627 spin_lock(&dwc->lock);
2628
2629 for (i = 0; i < dwc->num_event_buffers; i++) {
2630 irqreturn_t status;
2631
2632 status = dwc3_check_event_buf(dwc, i);
2633 if (status == IRQ_WAKE_THREAD)
2634 ret = status;
2635 }
2636
2637 spin_unlock(&dwc->lock);
2638
2639 return ret;
2640}
2641
2642/**
2643 * dwc3_gadget_init - Initializes gadget related registers
2644 * @dwc: pointer to our controller context structure
2645 *
2646 * Returns 0 on success otherwise negative errno.
2647 */
2648int dwc3_gadget_init(struct dwc3 *dwc)
2649{
2650 int ret;
2651
2652 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2653 &dwc->ctrl_req_addr, GFP_KERNEL);
2654 if (!dwc->ctrl_req) {
2655 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2656 ret = -ENOMEM;
2657 goto err0;
2658 }
2659
2660 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2661 &dwc->ep0_trb_addr, GFP_KERNEL);
2662 if (!dwc->ep0_trb) {
2663 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2664 ret = -ENOMEM;
2665 goto err1;
2666 }
2667
2668 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2669 if (!dwc->setup_buf) {
2670 ret = -ENOMEM;
2671 goto err2;
2672 }
2673
2674 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2675 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2676 GFP_KERNEL);
2677 if (!dwc->ep0_bounce) {
2678 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2679 ret = -ENOMEM;
2680 goto err3;
2681 }
2682
2683 dwc->gadget.ops = &dwc3_gadget_ops;
2684 dwc->gadget.max_speed = USB_SPEED_SUPER;
2685 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2686 dwc->gadget.sg_supported = true;
2687 dwc->gadget.name = "dwc3-gadget";
2688
2689 /*
2690 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2691 * on ep out.
2692 */
2693 dwc->gadget.quirk_ep_out_aligned_size = true;
2694
2695 /*
2696 * REVISIT: Here we should clear all pending IRQs to be
2697 * sure we're starting from a well known location.
2698 */
2699
2700 ret = dwc3_gadget_init_endpoints(dwc);
2701 if (ret)
2702 goto err4;
2703
2704 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2705 if (ret) {
2706 dev_err(dwc->dev, "failed to register udc\n");
2707 goto err4;
2708 }
2709
2710 return 0;
2711
2712err4:
2713 dwc3_gadget_free_endpoints(dwc);
2714 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2715 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2716
2717err3:
2718 kfree(dwc->setup_buf);
2719
2720err2:
2721 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2722 dwc->ep0_trb, dwc->ep0_trb_addr);
2723
2724err1:
2725 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2726 dwc->ctrl_req, dwc->ctrl_req_addr);
2727
2728err0:
2729 return ret;
2730}
2731
2732/* -------------------------------------------------------------------------- */
2733
2734void dwc3_gadget_exit(struct dwc3 *dwc)
2735{
2736 usb_del_gadget_udc(&dwc->gadget);
2737
2738 dwc3_gadget_free_endpoints(dwc);
2739
2740 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2741 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2742
2743 kfree(dwc->setup_buf);
2744
2745 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2746 dwc->ep0_trb, dwc->ep0_trb_addr);
2747
2748 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2749 dwc->ctrl_req, dwc->ctrl_req_addr);
2750}