blob: 0aefc5c7e2d514d42a8f945191ca5f3653a2adad [file] [log] [blame]
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05301/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +05304 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +05305 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +05309 * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/gadget.c) and ported
10 * to uboot.
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +053011 *
Kishon Vijay Abraham I30c31d52015-02-23 18:39:52 +053012 * commit 8e74475b0e : usb: dwc3: gadget: use udc-core's reset notifier
13 *
14 * SPDX-License-Identifier: GPL-2.0
Kishon Vijay Abraham I85d5e702015-02-23 18:39:50 +053015 */
16
17#include <linux/kernel.h>
18#include <linux/delay.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/list.h>
26#include <linux/dma-mapping.h>
27
28#include <linux/usb/ch9.h>
29#include <linux/usb/gadget.h>
30
31#include "debug.h"
32#include "core.h"
33#include "gadget.h"
34#include "io.h"
35
36/**
37 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
38 * @dwc: pointer to our context structure
39 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
40 *
41 * Caller should take care of locking. This function will
42 * return 0 on success or -EINVAL if wrong Test Selector
43 * is passed
44 */
45int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
46{
47 u32 reg;
48
49 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
50 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
51
52 switch (mode) {
53 case TEST_J:
54 case TEST_K:
55 case TEST_SE0_NAK:
56 case TEST_PACKET:
57 case TEST_FORCE_EN:
58 reg |= mode << 1;
59 break;
60 default:
61 return -EINVAL;
62 }
63
64 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
65
66 return 0;
67}
68
69/**
70 * dwc3_gadget_get_link_state - Gets current state of USB Link
71 * @dwc: pointer to our context structure
72 *
73 * Caller should take care of locking. This function will
74 * return the link state on success (>= 0) or -ETIMEDOUT.
75 */
76int dwc3_gadget_get_link_state(struct dwc3 *dwc)
77{
78 u32 reg;
79
80 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
81
82 return DWC3_DSTS_USBLNKST(reg);
83}
84
85/**
86 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
87 * @dwc: pointer to our context structure
88 * @state: the state to put link into
89 *
90 * Caller should take care of locking. This function will
91 * return 0 on success or -ETIMEDOUT.
92 */
93int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
94{
95 int retries = 10000;
96 u32 reg;
97
98 /*
99 * Wait until device controller is ready. Only applies to 1.94a and
100 * later RTL.
101 */
102 if (dwc->revision >= DWC3_REVISION_194A) {
103 while (--retries) {
104 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
105 if (reg & DWC3_DSTS_DCNRD)
106 udelay(5);
107 else
108 break;
109 }
110
111 if (retries <= 0)
112 return -ETIMEDOUT;
113 }
114
115 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
116 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
117
118 /* set requested state */
119 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
120 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
121
122 /*
123 * The following code is racy when called from dwc3_gadget_wakeup,
124 * and is not needed, at least on newer versions
125 */
126 if (dwc->revision >= DWC3_REVISION_194A)
127 return 0;
128
129 /* wait for a change in DSTS */
130 retries = 10000;
131 while (--retries) {
132 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
133
134 if (DWC3_DSTS_USBLNKST(reg) == state)
135 return 0;
136
137 udelay(5);
138 }
139
140 dev_vdbg(dwc->dev, "link state change request timed out\n");
141
142 return -ETIMEDOUT;
143}
144
145/**
146 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
147 * @dwc: pointer to our context structure
148 *
149 * This function will a best effort FIFO allocation in order
150 * to improve FIFO usage and throughput, while still allowing
151 * us to enable as many endpoints as possible.
152 *
153 * Keep in mind that this operation will be highly dependent
154 * on the configured size for RAM1 - which contains TxFifo -,
155 * the amount of endpoints enabled on coreConsultant tool, and
156 * the width of the Master Bus.
157 *
158 * In the ideal world, we would always be able to satisfy the
159 * following equation:
160 *
161 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
162 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
163 *
164 * Unfortunately, due to many variables that's not always the case.
165 */
166int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
167{
168 int last_fifo_depth = 0;
169 int ram1_depth;
170 int fifo_size;
171 int mdwidth;
172 int num;
173
174 if (!dwc->needs_fifo_resize)
175 return 0;
176
177 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
178 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
179
180 /* MDWIDTH is represented in bits, we need it in bytes */
181 mdwidth >>= 3;
182
183 /*
184 * FIXME For now we will only allocate 1 wMaxPacketSize space
185 * for each enabled endpoint, later patches will come to
186 * improve this algorithm so that we better use the internal
187 * FIFO space
188 */
189 for (num = 0; num < dwc->num_in_eps; num++) {
190 /* bit0 indicates direction; 1 means IN ep */
191 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
192 int mult = 1;
193 int tmp;
194
195 if (!(dep->flags & DWC3_EP_ENABLED))
196 continue;
197
198 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
199 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
200 mult = 3;
201
202 /*
203 * REVISIT: the following assumes we will always have enough
204 * space available on the FIFO RAM for all possible use cases.
205 * Make sure that's true somehow and change FIFO allocation
206 * accordingly.
207 *
208 * If we have Bulk or Isochronous endpoints, we want
209 * them to be able to be very, very fast. So we're giving
210 * those endpoints a fifo_size which is enough for 3 full
211 * packets
212 */
213 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
214 tmp += mdwidth;
215
216 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
217
218 fifo_size |= (last_fifo_depth << 16);
219
220 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
221 dep->name, last_fifo_depth, fifo_size & 0xffff);
222
223 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
224
225 last_fifo_depth += (fifo_size & 0xffff);
226 }
227
228 return 0;
229}
230
231void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
232 int status)
233{
234 struct dwc3 *dwc = dep->dwc;
235 int i;
236
237 if (req->queued) {
238 i = 0;
239 do {
240 dep->busy_slot++;
241 /*
242 * Skip LINK TRB. We can't use req->trb and check for
243 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
244 * just completed (not the LINK TRB).
245 */
246 if (((dep->busy_slot & DWC3_TRB_MASK) ==
247 DWC3_TRB_NUM- 1) &&
248 usb_endpoint_xfer_isoc(dep->endpoint.desc))
249 dep->busy_slot++;
250 } while(++i < req->request.num_mapped_sgs);
251 req->queued = false;
252 }
253 list_del(&req->list);
254 req->trb = NULL;
255
256 if (req->request.status == -EINPROGRESS)
257 req->request.status = status;
258
259 if (dwc->ep0_bounced && dep->number == 0)
260 dwc->ep0_bounced = false;
261 else
262 usb_gadget_unmap_request(&dwc->gadget, &req->request,
263 req->direction);
264
265 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
266 req, dep->name, req->request.actual,
267 req->request.length, status);
268 trace_dwc3_gadget_giveback(req);
269
270 spin_unlock(&dwc->lock);
271 usb_gadget_giveback_request(&dep->endpoint, &req->request);
272 spin_lock(&dwc->lock);
273}
274
275int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
276{
277 u32 timeout = 500;
278 u32 reg;
279
280 trace_dwc3_gadget_generic_cmd(cmd, param);
281
282 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
283 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
284
285 do {
286 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
287 if (!(reg & DWC3_DGCMD_CMDACT)) {
288 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
289 DWC3_DGCMD_STATUS(reg));
290 return 0;
291 }
292
293 /*
294 * We can't sleep here, because it's also called from
295 * interrupt context.
296 */
297 timeout--;
298 if (!timeout)
299 return -ETIMEDOUT;
300 udelay(1);
301 } while (1);
302}
303
304int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
305 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
306{
307 struct dwc3_ep *dep = dwc->eps[ep];
308 u32 timeout = 500;
309 u32 reg;
310
311 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
312
313 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
314 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
316
317 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
318 do {
319 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
320 if (!(reg & DWC3_DEPCMD_CMDACT)) {
321 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
322 DWC3_DEPCMD_STATUS(reg));
323 return 0;
324 }
325
326 /*
327 * We can't sleep here, because it is also called from
328 * interrupt context.
329 */
330 timeout--;
331 if (!timeout)
332 return -ETIMEDOUT;
333
334 udelay(1);
335 } while (1);
336}
337
338static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
339 struct dwc3_trb *trb)
340{
341 u32 offset = (char *) trb - (char *) dep->trb_pool;
342
343 return dep->trb_pool_dma + offset;
344}
345
346static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
347{
348 struct dwc3 *dwc = dep->dwc;
349
350 if (dep->trb_pool)
351 return 0;
352
353 if (dep->number == 0 || dep->number == 1)
354 return 0;
355
356 dep->trb_pool = dma_alloc_coherent(dwc->dev,
357 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
358 &dep->trb_pool_dma, GFP_KERNEL);
359 if (!dep->trb_pool) {
360 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
361 dep->name);
362 return -ENOMEM;
363 }
364
365 return 0;
366}
367
368static void dwc3_free_trb_pool(struct dwc3_ep *dep)
369{
370 struct dwc3 *dwc = dep->dwc;
371
372 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
373 dep->trb_pool, dep->trb_pool_dma);
374
375 dep->trb_pool = NULL;
376 dep->trb_pool_dma = 0;
377}
378
379static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
380{
381 struct dwc3_gadget_ep_cmd_params params;
382 u32 cmd;
383
384 memset(&params, 0x00, sizeof(params));
385
386 if (dep->number != 1) {
387 cmd = DWC3_DEPCMD_DEPSTARTCFG;
388 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
389 if (dep->number > 1) {
390 if (dwc->start_config_issued)
391 return 0;
392 dwc->start_config_issued = true;
393 cmd |= DWC3_DEPCMD_PARAM(2);
394 }
395
396 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
397 }
398
399 return 0;
400}
401
402static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
403 const struct usb_endpoint_descriptor *desc,
404 const struct usb_ss_ep_comp_descriptor *comp_desc,
405 bool ignore, bool restore)
406{
407 struct dwc3_gadget_ep_cmd_params params;
408
409 memset(&params, 0x00, sizeof(params));
410
411 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
412 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
413
414 /* Burst size is only needed in SuperSpeed mode */
415 if (dwc->gadget.speed == USB_SPEED_SUPER) {
416 u32 burst = dep->endpoint.maxburst - 1;
417
418 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
419 }
420
421 if (ignore)
422 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
423
424 if (restore) {
425 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
426 params.param2 |= dep->saved_state;
427 }
428
429 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
430 | DWC3_DEPCFG_XFER_NOT_READY_EN;
431
432 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
433 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
434 | DWC3_DEPCFG_STREAM_EVENT_EN;
435 dep->stream_capable = true;
436 }
437
438 if (!usb_endpoint_xfer_control(desc))
439 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
440
441 /*
442 * We are doing 1:1 mapping for endpoints, meaning
443 * Physical Endpoints 2 maps to Logical Endpoint 2 and
444 * so on. We consider the direction bit as part of the physical
445 * endpoint number. So USB endpoint 0x81 is 0x03.
446 */
447 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
448
449 /*
450 * We must use the lower 16 TX FIFOs even though
451 * HW might have more
452 */
453 if (dep->direction)
454 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
455
456 if (desc->bInterval) {
457 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
458 dep->interval = 1 << (desc->bInterval - 1);
459 }
460
461 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
462 DWC3_DEPCMD_SETEPCONFIG, &params);
463}
464
465static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
466{
467 struct dwc3_gadget_ep_cmd_params params;
468
469 memset(&params, 0x00, sizeof(params));
470
471 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
472
473 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
474 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
475}
476
477/**
478 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
479 * @dep: endpoint to be initialized
480 * @desc: USB Endpoint Descriptor
481 *
482 * Caller should take care of locking
483 */
484static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
485 const struct usb_endpoint_descriptor *desc,
486 const struct usb_ss_ep_comp_descriptor *comp_desc,
487 bool ignore, bool restore)
488{
489 struct dwc3 *dwc = dep->dwc;
490 u32 reg;
491 int ret;
492
493 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
494
495 if (!(dep->flags & DWC3_EP_ENABLED)) {
496 ret = dwc3_gadget_start_config(dwc, dep);
497 if (ret)
498 return ret;
499 }
500
501 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
502 restore);
503 if (ret)
504 return ret;
505
506 if (!(dep->flags & DWC3_EP_ENABLED)) {
507 struct dwc3_trb *trb_st_hw;
508 struct dwc3_trb *trb_link;
509
510 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
511 if (ret)
512 return ret;
513
514 dep->endpoint.desc = desc;
515 dep->comp_desc = comp_desc;
516 dep->type = usb_endpoint_type(desc);
517 dep->flags |= DWC3_EP_ENABLED;
518
519 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
520 reg |= DWC3_DALEPENA_EP(dep->number);
521 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
522
523 if (!usb_endpoint_xfer_isoc(desc))
524 return 0;
525
526 /* Link TRB for ISOC. The HWO bit is never reset */
527 trb_st_hw = &dep->trb_pool[0];
528
529 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
530 memset(trb_link, 0, sizeof(*trb_link));
531
532 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
533 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
534 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
535 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
536 }
537
538 return 0;
539}
540
541static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
542static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
543{
544 struct dwc3_request *req;
545
546 if (!list_empty(&dep->req_queued)) {
547 dwc3_stop_active_transfer(dwc, dep->number, true);
548
549 /* - giveback all requests to gadget driver */
550 while (!list_empty(&dep->req_queued)) {
551 req = next_request(&dep->req_queued);
552
553 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
554 }
555 }
556
557 while (!list_empty(&dep->request_list)) {
558 req = next_request(&dep->request_list);
559
560 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
561 }
562}
563
564/**
565 * __dwc3_gadget_ep_disable - Disables a HW endpoint
566 * @dep: the endpoint to disable
567 *
568 * This function also removes requests which are currently processed ny the
569 * hardware and those which are not yet scheduled.
570 * Caller should take care of locking.
571 */
572static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
573{
574 struct dwc3 *dwc = dep->dwc;
575 u32 reg;
576
577 dwc3_remove_requests(dwc, dep);
578
579 /* make sure HW endpoint isn't stalled */
580 if (dep->flags & DWC3_EP_STALL)
581 __dwc3_gadget_ep_set_halt(dep, 0, false);
582
583 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
584 reg &= ~DWC3_DALEPENA_EP(dep->number);
585 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
586
587 dep->stream_capable = false;
588 dep->endpoint.desc = NULL;
589 dep->comp_desc = NULL;
590 dep->type = 0;
591 dep->flags = 0;
592
593 return 0;
594}
595
596/* -------------------------------------------------------------------------- */
597
598static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
599 const struct usb_endpoint_descriptor *desc)
600{
601 return -EINVAL;
602}
603
604static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
605{
606 return -EINVAL;
607}
608
609/* -------------------------------------------------------------------------- */
610
611static int dwc3_gadget_ep_enable(struct usb_ep *ep,
612 const struct usb_endpoint_descriptor *desc)
613{
614 struct dwc3_ep *dep;
615 struct dwc3 *dwc;
616 unsigned long flags;
617 int ret;
618
619 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
620 pr_debug("dwc3: invalid parameters\n");
621 return -EINVAL;
622 }
623
624 if (!desc->wMaxPacketSize) {
625 pr_debug("dwc3: missing wMaxPacketSize\n");
626 return -EINVAL;
627 }
628
629 dep = to_dwc3_ep(ep);
630 dwc = dep->dwc;
631
632 if (dep->flags & DWC3_EP_ENABLED) {
633 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
634 dep->name);
635 return 0;
636 }
637
638 switch (usb_endpoint_type(desc)) {
639 case USB_ENDPOINT_XFER_CONTROL:
640 strlcat(dep->name, "-control", sizeof(dep->name));
641 break;
642 case USB_ENDPOINT_XFER_ISOC:
643 strlcat(dep->name, "-isoc", sizeof(dep->name));
644 break;
645 case USB_ENDPOINT_XFER_BULK:
646 strlcat(dep->name, "-bulk", sizeof(dep->name));
647 break;
648 case USB_ENDPOINT_XFER_INT:
649 strlcat(dep->name, "-int", sizeof(dep->name));
650 break;
651 default:
652 dev_err(dwc->dev, "invalid endpoint transfer type\n");
653 }
654
655 spin_lock_irqsave(&dwc->lock, flags);
656 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
657 spin_unlock_irqrestore(&dwc->lock, flags);
658
659 return ret;
660}
661
662static int dwc3_gadget_ep_disable(struct usb_ep *ep)
663{
664 struct dwc3_ep *dep;
665 struct dwc3 *dwc;
666 unsigned long flags;
667 int ret;
668
669 if (!ep) {
670 pr_debug("dwc3: invalid parameters\n");
671 return -EINVAL;
672 }
673
674 dep = to_dwc3_ep(ep);
675 dwc = dep->dwc;
676
677 if (!(dep->flags & DWC3_EP_ENABLED)) {
678 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
679 dep->name);
680 return 0;
681 }
682
683 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
684 dep->number >> 1,
685 (dep->number & 1) ? "in" : "out");
686
687 spin_lock_irqsave(&dwc->lock, flags);
688 ret = __dwc3_gadget_ep_disable(dep);
689 spin_unlock_irqrestore(&dwc->lock, flags);
690
691 return ret;
692}
693
694static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
695 gfp_t gfp_flags)
696{
697 struct dwc3_request *req;
698 struct dwc3_ep *dep = to_dwc3_ep(ep);
699
700 req = kzalloc(sizeof(*req), gfp_flags);
701 if (!req)
702 return NULL;
703
704 req->epnum = dep->number;
705 req->dep = dep;
706
707 trace_dwc3_alloc_request(req);
708
709 return &req->request;
710}
711
712static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
713 struct usb_request *request)
714{
715 struct dwc3_request *req = to_dwc3_request(request);
716
717 trace_dwc3_free_request(req);
718 kfree(req);
719}
720
721/**
722 * dwc3_prepare_one_trb - setup one TRB from one request
723 * @dep: endpoint for which this request is prepared
724 * @req: dwc3_request pointer
725 */
726static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
727 struct dwc3_request *req, dma_addr_t dma,
728 unsigned length, unsigned last, unsigned chain, unsigned node)
729{
730 struct dwc3 *dwc = dep->dwc;
731 struct dwc3_trb *trb;
732
733 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
734 dep->name, req, (unsigned long long) dma,
735 length, last ? " last" : "",
736 chain ? " chain" : "");
737
738
739 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
740
741 if (!req->trb) {
742 dwc3_gadget_move_request_queued(req);
743 req->trb = trb;
744 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
745 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
746 }
747
748 dep->free_slot++;
749 /* Skip the LINK-TRB on ISOC */
750 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
751 usb_endpoint_xfer_isoc(dep->endpoint.desc))
752 dep->free_slot++;
753
754 trb->size = DWC3_TRB_SIZE_LENGTH(length);
755 trb->bpl = lower_32_bits(dma);
756 trb->bph = upper_32_bits(dma);
757
758 switch (usb_endpoint_type(dep->endpoint.desc)) {
759 case USB_ENDPOINT_XFER_CONTROL:
760 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
761 break;
762
763 case USB_ENDPOINT_XFER_ISOC:
764 if (!node)
765 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
766 else
767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
768 break;
769
770 case USB_ENDPOINT_XFER_BULK:
771 case USB_ENDPOINT_XFER_INT:
772 trb->ctrl = DWC3_TRBCTL_NORMAL;
773 break;
774 default:
775 /*
776 * This is only possible with faulty memory because we
777 * checked it already :)
778 */
779 BUG();
780 }
781
782 if (!req->request.no_interrupt && !chain)
783 trb->ctrl |= DWC3_TRB_CTRL_IOC;
784
785 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
786 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
787 trb->ctrl |= DWC3_TRB_CTRL_CSP;
788 } else if (last) {
789 trb->ctrl |= DWC3_TRB_CTRL_LST;
790 }
791
792 if (chain)
793 trb->ctrl |= DWC3_TRB_CTRL_CHN;
794
795 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
796 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
797
798 trb->ctrl |= DWC3_TRB_CTRL_HWO;
799
800 trace_dwc3_prepare_trb(dep, trb);
801}
802
803/*
804 * dwc3_prepare_trbs - setup TRBs from requests
805 * @dep: endpoint for which requests are being prepared
806 * @starting: true if the endpoint is idle and no requests are queued.
807 *
808 * The function goes through the requests list and sets up TRBs for the
809 * transfers. The function returns once there are no more TRBs available or
810 * it runs out of requests.
811 */
812static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
813{
814 struct dwc3_request *req, *n;
815 u32 trbs_left;
816 u32 max;
817 unsigned int last_one = 0;
818
819 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
820
821 /* the first request must not be queued */
822 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
823
824 /* Can't wrap around on a non-isoc EP since there's no link TRB */
825 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
826 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
827 if (trbs_left > max)
828 trbs_left = max;
829 }
830
831 /*
832 * If busy & slot are equal than it is either full or empty. If we are
833 * starting to process requests then we are empty. Otherwise we are
834 * full and don't do anything
835 */
836 if (!trbs_left) {
837 if (!starting)
838 return;
839 trbs_left = DWC3_TRB_NUM;
840 /*
841 * In case we start from scratch, we queue the ISOC requests
842 * starting from slot 1. This is done because we use ring
843 * buffer and have no LST bit to stop us. Instead, we place
844 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
845 * after the first request so we start at slot 1 and have
846 * 7 requests proceed before we hit the first IOC.
847 * Other transfer types don't use the ring buffer and are
848 * processed from the first TRB until the last one. Since we
849 * don't wrap around we have to start at the beginning.
850 */
851 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
852 dep->busy_slot = 1;
853 dep->free_slot = 1;
854 } else {
855 dep->busy_slot = 0;
856 dep->free_slot = 0;
857 }
858 }
859
860 /* The last TRB is a link TRB, not used for xfer */
861 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
862 return;
863
864 list_for_each_entry_safe(req, n, &dep->request_list, list) {
865 unsigned length;
866 dma_addr_t dma;
867 last_one = false;
868
869 if (req->request.num_mapped_sgs > 0) {
870 struct usb_request *request = &req->request;
871 struct scatterlist *sg = request->sg;
872 struct scatterlist *s;
873 int i;
874
875 for_each_sg(sg, s, request->num_mapped_sgs, i) {
876 unsigned chain = true;
877
878 length = sg_dma_len(s);
879 dma = sg_dma_address(s);
880
881 if (i == (request->num_mapped_sgs - 1) ||
882 sg_is_last(s)) {
883 if (list_is_last(&req->list,
884 &dep->request_list))
885 last_one = true;
886 chain = false;
887 }
888
889 trbs_left--;
890 if (!trbs_left)
891 last_one = true;
892
893 if (last_one)
894 chain = false;
895
896 dwc3_prepare_one_trb(dep, req, dma, length,
897 last_one, chain, i);
898
899 if (last_one)
900 break;
901 }
902 } else {
903 dma = req->request.dma;
904 length = req->request.length;
905 trbs_left--;
906
907 if (!trbs_left)
908 last_one = 1;
909
910 /* Is this the last request? */
911 if (list_is_last(&req->list, &dep->request_list))
912 last_one = 1;
913
914 dwc3_prepare_one_trb(dep, req, dma, length,
915 last_one, false, 0);
916
917 if (last_one)
918 break;
919 }
920 }
921}
922
923static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
924 int start_new)
925{
926 struct dwc3_gadget_ep_cmd_params params;
927 struct dwc3_request *req;
928 struct dwc3 *dwc = dep->dwc;
929 int ret;
930 u32 cmd;
931
932 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
933 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
934 return -EBUSY;
935 }
936 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
937
938 /*
939 * If we are getting here after a short-out-packet we don't enqueue any
940 * new requests as we try to set the IOC bit only on the last request.
941 */
942 if (start_new) {
943 if (list_empty(&dep->req_queued))
944 dwc3_prepare_trbs(dep, start_new);
945
946 /* req points to the first request which will be sent */
947 req = next_request(&dep->req_queued);
948 } else {
949 dwc3_prepare_trbs(dep, start_new);
950
951 /*
952 * req points to the first request where HWO changed from 0 to 1
953 */
954 req = next_request(&dep->req_queued);
955 }
956 if (!req) {
957 dep->flags |= DWC3_EP_PENDING_REQUEST;
958 return 0;
959 }
960
961 memset(&params, 0, sizeof(params));
962
963 if (start_new) {
964 params.param0 = upper_32_bits(req->trb_dma);
965 params.param1 = lower_32_bits(req->trb_dma);
966 cmd = DWC3_DEPCMD_STARTTRANSFER;
967 } else {
968 cmd = DWC3_DEPCMD_UPDATETRANSFER;
969 }
970
971 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
972 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
973 if (ret < 0) {
974 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
975
976 /*
977 * FIXME we need to iterate over the list of requests
978 * here and stop, unmap, free and del each of the linked
979 * requests instead of what we do now.
980 */
981 usb_gadget_unmap_request(&dwc->gadget, &req->request,
982 req->direction);
983 list_del(&req->list);
984 return ret;
985 }
986
987 dep->flags |= DWC3_EP_BUSY;
988
989 if (start_new) {
990 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
991 dep->number);
992 WARN_ON_ONCE(!dep->resource_index);
993 }
994
995 return 0;
996}
997
998static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
999 struct dwc3_ep *dep, u32 cur_uf)
1000{
1001 u32 uf;
1002
1003 if (list_empty(&dep->request_list)) {
1004 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1005 dep->name);
1006 dep->flags |= DWC3_EP_PENDING_REQUEST;
1007 return;
1008 }
1009
1010 /* 4 micro frames in the future */
1011 uf = cur_uf + dep->interval * 4;
1012
1013 __dwc3_gadget_kick_transfer(dep, uf, 1);
1014}
1015
1016static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1017 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1018{
1019 u32 cur_uf, mask;
1020
1021 mask = ~(dep->interval - 1);
1022 cur_uf = event->parameters & mask;
1023
1024 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1025}
1026
1027static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1028{
1029 struct dwc3 *dwc = dep->dwc;
1030 int ret;
1031
1032 req->request.actual = 0;
1033 req->request.status = -EINPROGRESS;
1034 req->direction = dep->direction;
1035 req->epnum = dep->number;
1036
1037 /*
1038 * We only add to our list of requests now and
1039 * start consuming the list once we get XferNotReady
1040 * IRQ.
1041 *
1042 * That way, we avoid doing anything that we don't need
1043 * to do now and defer it until the point we receive a
1044 * particular token from the Host side.
1045 *
1046 * This will also avoid Host cancelling URBs due to too
1047 * many NAKs.
1048 */
1049 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1050 dep->direction);
1051 if (ret)
1052 return ret;
1053
1054 list_add_tail(&req->list, &dep->request_list);
1055
1056 /*
1057 * There are a few special cases:
1058 *
1059 * 1. XferNotReady with empty list of requests. We need to kick the
1060 * transfer here in that situation, otherwise we will be NAKing
1061 * forever. If we get XferNotReady before gadget driver has a
1062 * chance to queue a request, we will ACK the IRQ but won't be
1063 * able to receive the data until the next request is queued.
1064 * The following code is handling exactly that.
1065 *
1066 */
1067 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1068 /*
1069 * If xfernotready is already elapsed and it is a case
1070 * of isoc transfer, then issue END TRANSFER, so that
1071 * you can receive xfernotready again and can have
1072 * notion of current microframe.
1073 */
1074 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1075 if (list_empty(&dep->req_queued)) {
1076 dwc3_stop_active_transfer(dwc, dep->number, true);
1077 dep->flags = DWC3_EP_ENABLED;
1078 }
1079 return 0;
1080 }
1081
1082 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1083 if (ret && ret != -EBUSY)
1084 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1085 dep->name);
1086 return ret;
1087 }
1088
1089 /*
1090 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1091 * kick the transfer here after queuing a request, otherwise the
1092 * core may not see the modified TRB(s).
1093 */
1094 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1095 (dep->flags & DWC3_EP_BUSY) &&
1096 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1097 WARN_ON_ONCE(!dep->resource_index);
1098 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1099 false);
1100 if (ret && ret != -EBUSY)
1101 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1102 dep->name);
1103 return ret;
1104 }
1105
1106 /*
1107 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1108 * right away, otherwise host will not know we have streams to be
1109 * handled.
1110 */
1111 if (dep->stream_capable) {
1112 int ret;
1113
1114 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1115 if (ret && ret != -EBUSY) {
1116 struct dwc3 *dwc = dep->dwc;
1117
1118 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1119 dep->name);
1120 }
1121 }
1122
1123 return 0;
1124}
1125
1126static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1127 gfp_t gfp_flags)
1128{
1129 struct dwc3_request *req = to_dwc3_request(request);
1130 struct dwc3_ep *dep = to_dwc3_ep(ep);
1131 struct dwc3 *dwc = dep->dwc;
1132
1133 unsigned long flags;
1134
1135 int ret;
1136
1137 spin_lock_irqsave(&dwc->lock, flags);
1138 if (!dep->endpoint.desc) {
1139 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1140 request, ep->name);
1141 ret = -ESHUTDOWN;
1142 goto out;
1143 }
1144
1145 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1146 request, req->dep->name)) {
1147 ret = -EINVAL;
1148 goto out;
1149 }
1150
1151 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1152 request, ep->name, request->length);
1153 trace_dwc3_ep_queue(req);
1154
1155 ret = __dwc3_gadget_ep_queue(dep, req);
1156
1157out:
1158 spin_unlock_irqrestore(&dwc->lock, flags);
1159
1160 return ret;
1161}
1162
1163static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1164 struct usb_request *request)
1165{
1166 struct dwc3_request *req = to_dwc3_request(request);
1167 struct dwc3_request *r = NULL;
1168
1169 struct dwc3_ep *dep = to_dwc3_ep(ep);
1170 struct dwc3 *dwc = dep->dwc;
1171
1172 unsigned long flags;
1173 int ret = 0;
1174
1175 trace_dwc3_ep_dequeue(req);
1176
1177 spin_lock_irqsave(&dwc->lock, flags);
1178
1179 list_for_each_entry(r, &dep->request_list, list) {
1180 if (r == req)
1181 break;
1182 }
1183
1184 if (r != req) {
1185 list_for_each_entry(r, &dep->req_queued, list) {
1186 if (r == req)
1187 break;
1188 }
1189 if (r == req) {
1190 /* wait until it is processed */
1191 dwc3_stop_active_transfer(dwc, dep->number, true);
1192 goto out1;
1193 }
1194 dev_err(dwc->dev, "request %p was not queued to %s\n",
1195 request, ep->name);
1196 ret = -EINVAL;
1197 goto out0;
1198 }
1199
1200out1:
1201 /* giveback the request */
1202 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1203
1204out0:
1205 spin_unlock_irqrestore(&dwc->lock, flags);
1206
1207 return ret;
1208}
1209
1210int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1211{
1212 struct dwc3_gadget_ep_cmd_params params;
1213 struct dwc3 *dwc = dep->dwc;
1214 int ret;
1215
1216 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1217 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1218 return -EINVAL;
1219 }
1220
1221 memset(&params, 0x00, sizeof(params));
1222
1223 if (value) {
1224 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1225 (!list_empty(&dep->req_queued) ||
1226 !list_empty(&dep->request_list)))) {
1227 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1228 dep->name);
1229 return -EAGAIN;
1230 }
1231
1232 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1233 DWC3_DEPCMD_SETSTALL, &params);
1234 if (ret)
1235 dev_err(dwc->dev, "failed to set STALL on %s\n",
1236 dep->name);
1237 else
1238 dep->flags |= DWC3_EP_STALL;
1239 } else {
1240 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1241 DWC3_DEPCMD_CLEARSTALL, &params);
1242 if (ret)
1243 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1244 dep->name);
1245 else
1246 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1247 }
1248
1249 return ret;
1250}
1251
1252static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1253{
1254 struct dwc3_ep *dep = to_dwc3_ep(ep);
1255 struct dwc3 *dwc = dep->dwc;
1256
1257 unsigned long flags;
1258
1259 int ret;
1260
1261 spin_lock_irqsave(&dwc->lock, flags);
1262 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1263 spin_unlock_irqrestore(&dwc->lock, flags);
1264
1265 return ret;
1266}
1267
1268static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1269{
1270 struct dwc3_ep *dep = to_dwc3_ep(ep);
1271 struct dwc3 *dwc = dep->dwc;
1272 unsigned long flags;
1273 int ret;
1274
1275 spin_lock_irqsave(&dwc->lock, flags);
1276 dep->flags |= DWC3_EP_WEDGE;
1277
1278 if (dep->number == 0 || dep->number == 1)
1279 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1280 else
1281 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1282 spin_unlock_irqrestore(&dwc->lock, flags);
1283
1284 return ret;
1285}
1286
1287/* -------------------------------------------------------------------------- */
1288
1289static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1290 .bLength = USB_DT_ENDPOINT_SIZE,
1291 .bDescriptorType = USB_DT_ENDPOINT,
1292 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1293};
1294
1295static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1296 .enable = dwc3_gadget_ep0_enable,
1297 .disable = dwc3_gadget_ep0_disable,
1298 .alloc_request = dwc3_gadget_ep_alloc_request,
1299 .free_request = dwc3_gadget_ep_free_request,
1300 .queue = dwc3_gadget_ep0_queue,
1301 .dequeue = dwc3_gadget_ep_dequeue,
1302 .set_halt = dwc3_gadget_ep0_set_halt,
1303 .set_wedge = dwc3_gadget_ep_set_wedge,
1304};
1305
1306static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1307 .enable = dwc3_gadget_ep_enable,
1308 .disable = dwc3_gadget_ep_disable,
1309 .alloc_request = dwc3_gadget_ep_alloc_request,
1310 .free_request = dwc3_gadget_ep_free_request,
1311 .queue = dwc3_gadget_ep_queue,
1312 .dequeue = dwc3_gadget_ep_dequeue,
1313 .set_halt = dwc3_gadget_ep_set_halt,
1314 .set_wedge = dwc3_gadget_ep_set_wedge,
1315};
1316
1317/* -------------------------------------------------------------------------- */
1318
1319static int dwc3_gadget_get_frame(struct usb_gadget *g)
1320{
1321 struct dwc3 *dwc = gadget_to_dwc(g);
1322 u32 reg;
1323
1324 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1325 return DWC3_DSTS_SOFFN(reg);
1326}
1327
1328static int dwc3_gadget_wakeup(struct usb_gadget *g)
1329{
1330 struct dwc3 *dwc = gadget_to_dwc(g);
1331
1332 unsigned long timeout;
1333 unsigned long flags;
1334
1335 u32 reg;
1336
1337 int ret = 0;
1338
1339 u8 link_state;
1340 u8 speed;
1341
1342 spin_lock_irqsave(&dwc->lock, flags);
1343
1344 /*
1345 * According to the Databook Remote wakeup request should
1346 * be issued only when the device is in early suspend state.
1347 *
1348 * We can check that via USB Link State bits in DSTS register.
1349 */
1350 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1351
1352 speed = reg & DWC3_DSTS_CONNECTSPD;
1353 if (speed == DWC3_DSTS_SUPERSPEED) {
1354 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1355 ret = -EINVAL;
1356 goto out;
1357 }
1358
1359 link_state = DWC3_DSTS_USBLNKST(reg);
1360
1361 switch (link_state) {
1362 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1363 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1364 break;
1365 default:
1366 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1367 link_state);
1368 ret = -EINVAL;
1369 goto out;
1370 }
1371
1372 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1373 if (ret < 0) {
1374 dev_err(dwc->dev, "failed to put link in Recovery\n");
1375 goto out;
1376 }
1377
1378 /* Recent versions do this automatically */
1379 if (dwc->revision < DWC3_REVISION_194A) {
1380 /* write zeroes to Link Change Request */
1381 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1382 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1383 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1384 }
1385
1386 /* poll until Link State changes to ON */
1387 timeout = jiffies + msecs_to_jiffies(100);
1388
1389 while (!time_after(jiffies, timeout)) {
1390 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1391
1392 /* in HS, means ON */
1393 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1394 break;
1395 }
1396
1397 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1398 dev_err(dwc->dev, "failed to send remote wakeup\n");
1399 ret = -EINVAL;
1400 }
1401
1402out:
1403 spin_unlock_irqrestore(&dwc->lock, flags);
1404
1405 return ret;
1406}
1407
1408static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1409 int is_selfpowered)
1410{
1411 struct dwc3 *dwc = gadget_to_dwc(g);
1412 unsigned long flags;
1413
1414 spin_lock_irqsave(&dwc->lock, flags);
1415 dwc->is_selfpowered = !!is_selfpowered;
1416 spin_unlock_irqrestore(&dwc->lock, flags);
1417
1418 return 0;
1419}
1420
1421static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1422{
1423 u32 reg;
1424 u32 timeout = 500;
1425
1426 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1427 if (is_on) {
1428 if (dwc->revision <= DWC3_REVISION_187A) {
1429 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1430 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1431 }
1432
1433 if (dwc->revision >= DWC3_REVISION_194A)
1434 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1435 reg |= DWC3_DCTL_RUN_STOP;
1436
1437 if (dwc->has_hibernation)
1438 reg |= DWC3_DCTL_KEEP_CONNECT;
1439
1440 dwc->pullups_connected = true;
1441 } else {
1442 reg &= ~DWC3_DCTL_RUN_STOP;
1443
1444 if (dwc->has_hibernation && !suspend)
1445 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1446
1447 dwc->pullups_connected = false;
1448 }
1449
1450 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1451
1452 do {
1453 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1454 if (is_on) {
1455 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1456 break;
1457 } else {
1458 if (reg & DWC3_DSTS_DEVCTRLHLT)
1459 break;
1460 }
1461 timeout--;
1462 if (!timeout)
1463 return -ETIMEDOUT;
1464 udelay(1);
1465 } while (1);
1466
1467 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1468 dwc->gadget_driver
1469 ? dwc->gadget_driver->function : "no-function",
1470 is_on ? "connect" : "disconnect");
1471
1472 return 0;
1473}
1474
1475static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1476{
1477 struct dwc3 *dwc = gadget_to_dwc(g);
1478 unsigned long flags;
1479 int ret;
1480
1481 is_on = !!is_on;
1482
1483 spin_lock_irqsave(&dwc->lock, flags);
1484 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1485 spin_unlock_irqrestore(&dwc->lock, flags);
1486
1487 return ret;
1488}
1489
1490static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1491{
1492 u32 reg;
1493
1494 /* Enable all but Start and End of Frame IRQs */
1495 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1496 DWC3_DEVTEN_EVNTOVERFLOWEN |
1497 DWC3_DEVTEN_CMDCMPLTEN |
1498 DWC3_DEVTEN_ERRTICERREN |
1499 DWC3_DEVTEN_WKUPEVTEN |
1500 DWC3_DEVTEN_ULSTCNGEN |
1501 DWC3_DEVTEN_CONNECTDONEEN |
1502 DWC3_DEVTEN_USBRSTEN |
1503 DWC3_DEVTEN_DISCONNEVTEN);
1504
1505 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1506}
1507
1508static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1509{
1510 /* mask all interrupts */
1511 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1512}
1513
1514static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1515static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1516
1517static int dwc3_gadget_start(struct usb_gadget *g,
1518 struct usb_gadget_driver *driver)
1519{
1520 struct dwc3 *dwc = gadget_to_dwc(g);
1521 struct dwc3_ep *dep;
1522 unsigned long flags;
1523 int ret = 0;
1524 int irq;
1525 u32 reg;
1526
1527 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1528 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1529 IRQF_SHARED, "dwc3", dwc);
1530 if (ret) {
1531 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1532 irq, ret);
1533 goto err0;
1534 }
1535
1536 spin_lock_irqsave(&dwc->lock, flags);
1537
1538 if (dwc->gadget_driver) {
1539 dev_err(dwc->dev, "%s is already bound to %s\n",
1540 dwc->gadget.name,
1541 dwc->gadget_driver->driver.name);
1542 ret = -EBUSY;
1543 goto err1;
1544 }
1545
1546 dwc->gadget_driver = driver;
1547
1548 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1549 reg &= ~(DWC3_DCFG_SPEED_MASK);
1550
1551 /**
1552 * WORKAROUND: DWC3 revision < 2.20a have an issue
1553 * which would cause metastability state on Run/Stop
1554 * bit if we try to force the IP to USB2-only mode.
1555 *
1556 * Because of that, we cannot configure the IP to any
1557 * speed other than the SuperSpeed
1558 *
1559 * Refers to:
1560 *
1561 * STAR#9000525659: Clock Domain Crossing on DCTL in
1562 * USB 2.0 Mode
1563 */
1564 if (dwc->revision < DWC3_REVISION_220A) {
1565 reg |= DWC3_DCFG_SUPERSPEED;
1566 } else {
1567 switch (dwc->maximum_speed) {
1568 case USB_SPEED_LOW:
1569 reg |= DWC3_DSTS_LOWSPEED;
1570 break;
1571 case USB_SPEED_FULL:
1572 reg |= DWC3_DSTS_FULLSPEED1;
1573 break;
1574 case USB_SPEED_HIGH:
1575 reg |= DWC3_DSTS_HIGHSPEED;
1576 break;
1577 case USB_SPEED_SUPER: /* FALLTHROUGH */
1578 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1579 default:
1580 reg |= DWC3_DSTS_SUPERSPEED;
1581 }
1582 }
1583 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1584
1585 dwc->start_config_issued = false;
1586
1587 /* Start with SuperSpeed Default */
1588 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1589
1590 dep = dwc->eps[0];
1591 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1592 false);
1593 if (ret) {
1594 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1595 goto err2;
1596 }
1597
1598 dep = dwc->eps[1];
1599 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1600 false);
1601 if (ret) {
1602 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1603 goto err3;
1604 }
1605
1606 /* begin to receive SETUP packets */
1607 dwc->ep0state = EP0_SETUP_PHASE;
1608 dwc3_ep0_out_start(dwc);
1609
1610 dwc3_gadget_enable_irq(dwc);
1611
1612 spin_unlock_irqrestore(&dwc->lock, flags);
1613
1614 return 0;
1615
1616err3:
1617 __dwc3_gadget_ep_disable(dwc->eps[0]);
1618
1619err2:
1620 dwc->gadget_driver = NULL;
1621
1622err1:
1623 spin_unlock_irqrestore(&dwc->lock, flags);
1624
1625 free_irq(irq, dwc);
1626
1627err0:
1628 return ret;
1629}
1630
1631static int dwc3_gadget_stop(struct usb_gadget *g)
1632{
1633 struct dwc3 *dwc = gadget_to_dwc(g);
1634 unsigned long flags;
1635 int irq;
1636
1637 spin_lock_irqsave(&dwc->lock, flags);
1638
1639 dwc3_gadget_disable_irq(dwc);
1640 __dwc3_gadget_ep_disable(dwc->eps[0]);
1641 __dwc3_gadget_ep_disable(dwc->eps[1]);
1642
1643 dwc->gadget_driver = NULL;
1644
1645 spin_unlock_irqrestore(&dwc->lock, flags);
1646
1647 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1648 free_irq(irq, dwc);
1649
1650 return 0;
1651}
1652
1653static const struct usb_gadget_ops dwc3_gadget_ops = {
1654 .get_frame = dwc3_gadget_get_frame,
1655 .wakeup = dwc3_gadget_wakeup,
1656 .set_selfpowered = dwc3_gadget_set_selfpowered,
1657 .pullup = dwc3_gadget_pullup,
1658 .udc_start = dwc3_gadget_start,
1659 .udc_stop = dwc3_gadget_stop,
1660};
1661
1662/* -------------------------------------------------------------------------- */
1663
1664static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1665 u8 num, u32 direction)
1666{
1667 struct dwc3_ep *dep;
1668 u8 i;
1669
1670 for (i = 0; i < num; i++) {
1671 u8 epnum = (i << 1) | (!!direction);
1672
1673 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1674 if (!dep)
1675 return -ENOMEM;
1676
1677 dep->dwc = dwc;
1678 dep->number = epnum;
1679 dep->direction = !!direction;
1680 dwc->eps[epnum] = dep;
1681
1682 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1683 (epnum & 1) ? "in" : "out");
1684
1685 dep->endpoint.name = dep->name;
1686
1687 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1688
1689 if (epnum == 0 || epnum == 1) {
1690 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1691 dep->endpoint.maxburst = 1;
1692 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1693 if (!epnum)
1694 dwc->gadget.ep0 = &dep->endpoint;
1695 } else {
1696 int ret;
1697
1698 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1699 dep->endpoint.max_streams = 15;
1700 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1701 list_add_tail(&dep->endpoint.ep_list,
1702 &dwc->gadget.ep_list);
1703
1704 ret = dwc3_alloc_trb_pool(dep);
1705 if (ret)
1706 return ret;
1707 }
1708
1709 INIT_LIST_HEAD(&dep->request_list);
1710 INIT_LIST_HEAD(&dep->req_queued);
1711 }
1712
1713 return 0;
1714}
1715
1716static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1717{
1718 int ret;
1719
1720 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1721
1722 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1723 if (ret < 0) {
1724 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1725 return ret;
1726 }
1727
1728 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1729 if (ret < 0) {
1730 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1731 return ret;
1732 }
1733
1734 return 0;
1735}
1736
1737static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1738{
1739 struct dwc3_ep *dep;
1740 u8 epnum;
1741
1742 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1743 dep = dwc->eps[epnum];
1744 if (!dep)
1745 continue;
1746 /*
1747 * Physical endpoints 0 and 1 are special; they form the
1748 * bi-directional USB endpoint 0.
1749 *
1750 * For those two physical endpoints, we don't allocate a TRB
1751 * pool nor do we add them the endpoints list. Due to that, we
1752 * shouldn't do these two operations otherwise we would end up
1753 * with all sorts of bugs when removing dwc3.ko.
1754 */
1755 if (epnum != 0 && epnum != 1) {
1756 dwc3_free_trb_pool(dep);
1757 list_del(&dep->endpoint.ep_list);
1758 }
1759
1760 kfree(dep);
1761 }
1762}
1763
1764/* -------------------------------------------------------------------------- */
1765
1766static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1767 struct dwc3_request *req, struct dwc3_trb *trb,
1768 const struct dwc3_event_depevt *event, int status)
1769{
1770 unsigned int count;
1771 unsigned int s_pkt = 0;
1772 unsigned int trb_status;
1773
1774 trace_dwc3_complete_trb(dep, trb);
1775
1776 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1777 /*
1778 * We continue despite the error. There is not much we
1779 * can do. If we don't clean it up we loop forever. If
1780 * we skip the TRB then it gets overwritten after a
1781 * while since we use them in a ring buffer. A BUG()
1782 * would help. Lets hope that if this occurs, someone
1783 * fixes the root cause instead of looking away :)
1784 */
1785 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1786 dep->name, trb);
1787 count = trb->size & DWC3_TRB_SIZE_MASK;
1788
1789 if (dep->direction) {
1790 if (count) {
1791 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1792 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1793 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1794 dep->name);
1795 /*
1796 * If missed isoc occurred and there is
1797 * no request queued then issue END
1798 * TRANSFER, so that core generates
1799 * next xfernotready and we will issue
1800 * a fresh START TRANSFER.
1801 * If there are still queued request
1802 * then wait, do not issue either END
1803 * or UPDATE TRANSFER, just attach next
1804 * request in request_list during
1805 * giveback.If any future queued request
1806 * is successfully transferred then we
1807 * will issue UPDATE TRANSFER for all
1808 * request in the request_list.
1809 */
1810 dep->flags |= DWC3_EP_MISSED_ISOC;
1811 } else {
1812 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1813 dep->name);
1814 status = -ECONNRESET;
1815 }
1816 } else {
1817 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1818 }
1819 } else {
1820 if (count && (event->status & DEPEVT_STATUS_SHORT))
1821 s_pkt = 1;
1822 }
1823
1824 /*
1825 * We assume here we will always receive the entire data block
1826 * which we should receive. Meaning, if we program RX to
1827 * receive 4K but we receive only 2K, we assume that's all we
1828 * should receive and we simply bounce the request back to the
1829 * gadget driver for further processing.
1830 */
1831 req->request.actual += req->request.length - count;
1832 if (s_pkt)
1833 return 1;
1834 if ((event->status & DEPEVT_STATUS_LST) &&
1835 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1836 DWC3_TRB_CTRL_HWO)))
1837 return 1;
1838 if ((event->status & DEPEVT_STATUS_IOC) &&
1839 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1840 return 1;
1841 return 0;
1842}
1843
1844static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1845 const struct dwc3_event_depevt *event, int status)
1846{
1847 struct dwc3_request *req;
1848 struct dwc3_trb *trb;
1849 unsigned int slot;
1850 unsigned int i;
1851 int ret;
1852
1853 do {
1854 req = next_request(&dep->req_queued);
1855 if (!req) {
1856 WARN_ON_ONCE(1);
1857 return 1;
1858 }
1859 i = 0;
1860 do {
1861 slot = req->start_slot + i;
1862 if ((slot == DWC3_TRB_NUM - 1) &&
1863 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1864 slot++;
1865 slot %= DWC3_TRB_NUM;
1866 trb = &dep->trb_pool[slot];
1867
1868 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1869 event, status);
1870 if (ret)
1871 break;
1872 }while (++i < req->request.num_mapped_sgs);
1873
1874 dwc3_gadget_giveback(dep, req, status);
1875
1876 if (ret)
1877 break;
1878 } while (1);
1879
1880 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1881 list_empty(&dep->req_queued)) {
1882 if (list_empty(&dep->request_list)) {
1883 /*
1884 * If there is no entry in request list then do
1885 * not issue END TRANSFER now. Just set PENDING
1886 * flag, so that END TRANSFER is issued when an
1887 * entry is added into request list.
1888 */
1889 dep->flags = DWC3_EP_PENDING_REQUEST;
1890 } else {
1891 dwc3_stop_active_transfer(dwc, dep->number, true);
1892 dep->flags = DWC3_EP_ENABLED;
1893 }
1894 return 1;
1895 }
1896
1897 return 1;
1898}
1899
1900static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1901 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1902{
1903 unsigned status = 0;
1904 int clean_busy;
1905
1906 if (event->status & DEPEVT_STATUS_BUSERR)
1907 status = -ECONNRESET;
1908
1909 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1910 if (clean_busy)
1911 dep->flags &= ~DWC3_EP_BUSY;
1912
1913 /*
1914 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1915 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1916 */
1917 if (dwc->revision < DWC3_REVISION_183A) {
1918 u32 reg;
1919 int i;
1920
1921 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1922 dep = dwc->eps[i];
1923
1924 if (!(dep->flags & DWC3_EP_ENABLED))
1925 continue;
1926
1927 if (!list_empty(&dep->req_queued))
1928 return;
1929 }
1930
1931 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1932 reg |= dwc->u1u2;
1933 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1934
1935 dwc->u1u2 = 0;
1936 }
1937}
1938
1939static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1940 const struct dwc3_event_depevt *event)
1941{
1942 struct dwc3_ep *dep;
1943 u8 epnum = event->endpoint_number;
1944
1945 dep = dwc->eps[epnum];
1946
1947 if (!(dep->flags & DWC3_EP_ENABLED))
1948 return;
1949
1950 if (epnum == 0 || epnum == 1) {
1951 dwc3_ep0_interrupt(dwc, event);
1952 return;
1953 }
1954
1955 switch (event->endpoint_event) {
1956 case DWC3_DEPEVT_XFERCOMPLETE:
1957 dep->resource_index = 0;
1958
1959 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1960 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1961 dep->name);
1962 return;
1963 }
1964
1965 dwc3_endpoint_transfer_complete(dwc, dep, event);
1966 break;
1967 case DWC3_DEPEVT_XFERINPROGRESS:
1968 dwc3_endpoint_transfer_complete(dwc, dep, event);
1969 break;
1970 case DWC3_DEPEVT_XFERNOTREADY:
1971 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1972 dwc3_gadget_start_isoc(dwc, dep, event);
1973 } else {
1974 int ret;
1975
1976 dev_vdbg(dwc->dev, "%s: reason %s\n",
1977 dep->name, event->status &
1978 DEPEVT_STATUS_TRANSFER_ACTIVE
1979 ? "Transfer Active"
1980 : "Transfer Not Active");
1981
1982 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1983 if (!ret || ret == -EBUSY)
1984 return;
1985
1986 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1987 dep->name);
1988 }
1989
1990 break;
1991 case DWC3_DEPEVT_STREAMEVT:
1992 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1993 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1994 dep->name);
1995 return;
1996 }
1997
1998 switch (event->status) {
1999 case DEPEVT_STREAMEVT_FOUND:
2000 dev_vdbg(dwc->dev, "Stream %d found and started\n",
2001 event->parameters);
2002
2003 break;
2004 case DEPEVT_STREAMEVT_NOTFOUND:
2005 /* FALLTHROUGH */
2006 default:
2007 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2008 }
2009 break;
2010 case DWC3_DEPEVT_RXTXFIFOEVT:
2011 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2012 break;
2013 case DWC3_DEPEVT_EPCMDCMPLT:
2014 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
2015 break;
2016 }
2017}
2018
2019static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2020{
2021 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2022 spin_unlock(&dwc->lock);
2023 dwc->gadget_driver->disconnect(&dwc->gadget);
2024 spin_lock(&dwc->lock);
2025 }
2026}
2027
2028static void dwc3_suspend_gadget(struct dwc3 *dwc)
2029{
2030 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2031 spin_unlock(&dwc->lock);
2032 dwc->gadget_driver->suspend(&dwc->gadget);
2033 spin_lock(&dwc->lock);
2034 }
2035}
2036
2037static void dwc3_resume_gadget(struct dwc3 *dwc)
2038{
2039 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2040 spin_unlock(&dwc->lock);
2041 dwc->gadget_driver->resume(&dwc->gadget);
2042 }
2043}
2044
2045static void dwc3_reset_gadget(struct dwc3 *dwc)
2046{
2047 if (!dwc->gadget_driver)
2048 return;
2049
2050 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2051 spin_unlock(&dwc->lock);
2052 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2053 spin_lock(&dwc->lock);
2054 }
2055}
2056
2057static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2058{
2059 struct dwc3_ep *dep;
2060 struct dwc3_gadget_ep_cmd_params params;
2061 u32 cmd;
2062 int ret;
2063
2064 dep = dwc->eps[epnum];
2065
2066 if (!dep->resource_index)
2067 return;
2068
2069 /*
2070 * NOTICE: We are violating what the Databook says about the
2071 * EndTransfer command. Ideally we would _always_ wait for the
2072 * EndTransfer Command Completion IRQ, but that's causing too
2073 * much trouble synchronizing between us and gadget driver.
2074 *
2075 * We have discussed this with the IP Provider and it was
2076 * suggested to giveback all requests here, but give HW some
2077 * extra time to synchronize with the interconnect. We're using
2078 * an arbitraty 100us delay for that.
2079 *
2080 * Note also that a similar handling was tested by Synopsys
2081 * (thanks a lot Paul) and nothing bad has come out of it.
2082 * In short, what we're doing is:
2083 *
2084 * - Issue EndTransfer WITH CMDIOC bit set
2085 * - Wait 100us
2086 */
2087
2088 cmd = DWC3_DEPCMD_ENDTRANSFER;
2089 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2090 cmd |= DWC3_DEPCMD_CMDIOC;
2091 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2092 memset(&params, 0, sizeof(params));
2093 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2094 WARN_ON_ONCE(ret);
2095 dep->resource_index = 0;
2096 dep->flags &= ~DWC3_EP_BUSY;
2097 udelay(100);
2098}
2099
2100static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2101{
2102 u32 epnum;
2103
2104 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2105 struct dwc3_ep *dep;
2106
2107 dep = dwc->eps[epnum];
2108 if (!dep)
2109 continue;
2110
2111 if (!(dep->flags & DWC3_EP_ENABLED))
2112 continue;
2113
2114 dwc3_remove_requests(dwc, dep);
2115 }
2116}
2117
2118static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2119{
2120 u32 epnum;
2121
2122 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2123 struct dwc3_ep *dep;
2124 struct dwc3_gadget_ep_cmd_params params;
2125 int ret;
2126
2127 dep = dwc->eps[epnum];
2128 if (!dep)
2129 continue;
2130
2131 if (!(dep->flags & DWC3_EP_STALL))
2132 continue;
2133
2134 dep->flags &= ~DWC3_EP_STALL;
2135
2136 memset(&params, 0, sizeof(params));
2137 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2138 DWC3_DEPCMD_CLEARSTALL, &params);
2139 WARN_ON_ONCE(ret);
2140 }
2141}
2142
2143static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2144{
2145 int reg;
2146
2147 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2148 reg &= ~DWC3_DCTL_INITU1ENA;
2149 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2150
2151 reg &= ~DWC3_DCTL_INITU2ENA;
2152 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2153
2154 dwc3_disconnect_gadget(dwc);
2155 dwc->start_config_issued = false;
2156
2157 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2158 dwc->setup_packet_pending = false;
2159 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2160}
2161
2162static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2163{
2164 u32 reg;
2165
2166 /*
2167 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2168 * would cause a missing Disconnect Event if there's a
2169 * pending Setup Packet in the FIFO.
2170 *
2171 * There's no suggested workaround on the official Bug
2172 * report, which states that "unless the driver/application
2173 * is doing any special handling of a disconnect event,
2174 * there is no functional issue".
2175 *
2176 * Unfortunately, it turns out that we _do_ some special
2177 * handling of a disconnect event, namely complete all
2178 * pending transfers, notify gadget driver of the
2179 * disconnection, and so on.
2180 *
2181 * Our suggested workaround is to follow the Disconnect
2182 * Event steps here, instead, based on a setup_packet_pending
2183 * flag. Such flag gets set whenever we have a XferNotReady
2184 * event on EP0 and gets cleared on XferComplete for the
2185 * same endpoint.
2186 *
2187 * Refers to:
2188 *
2189 * STAR#9000466709: RTL: Device : Disconnect event not
2190 * generated if setup packet pending in FIFO
2191 */
2192 if (dwc->revision < DWC3_REVISION_188A) {
2193 if (dwc->setup_packet_pending)
2194 dwc3_gadget_disconnect_interrupt(dwc);
2195 }
2196
2197 dwc3_reset_gadget(dwc);
2198
2199 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2200 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2201 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2202 dwc->test_mode = false;
2203
2204 dwc3_stop_active_transfers(dwc);
2205 dwc3_clear_stall_all_ep(dwc);
2206 dwc->start_config_issued = false;
2207
2208 /* Reset device address to zero */
2209 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2210 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2211 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2212}
2213
2214static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2215{
2216 u32 reg;
2217 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2218
2219 /*
2220 * We change the clock only at SS but I dunno why I would want to do
2221 * this. Maybe it becomes part of the power saving plan.
2222 */
2223
2224 if (speed != DWC3_DSTS_SUPERSPEED)
2225 return;
2226
2227 /*
2228 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2229 * each time on Connect Done.
2230 */
2231 if (!usb30_clock)
2232 return;
2233
2234 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2235 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2236 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2237}
2238
2239static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2240{
2241 struct dwc3_ep *dep;
2242 int ret;
2243 u32 reg;
2244 u8 speed;
2245
2246 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2247 speed = reg & DWC3_DSTS_CONNECTSPD;
2248 dwc->speed = speed;
2249
2250 dwc3_update_ram_clk_sel(dwc, speed);
2251
2252 switch (speed) {
2253 case DWC3_DCFG_SUPERSPEED:
2254 /*
2255 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2256 * would cause a missing USB3 Reset event.
2257 *
2258 * In such situations, we should force a USB3 Reset
2259 * event by calling our dwc3_gadget_reset_interrupt()
2260 * routine.
2261 *
2262 * Refers to:
2263 *
2264 * STAR#9000483510: RTL: SS : USB3 reset event may
2265 * not be generated always when the link enters poll
2266 */
2267 if (dwc->revision < DWC3_REVISION_190A)
2268 dwc3_gadget_reset_interrupt(dwc);
2269
2270 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2271 dwc->gadget.ep0->maxpacket = 512;
2272 dwc->gadget.speed = USB_SPEED_SUPER;
2273 break;
2274 case DWC3_DCFG_HIGHSPEED:
2275 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2276 dwc->gadget.ep0->maxpacket = 64;
2277 dwc->gadget.speed = USB_SPEED_HIGH;
2278 break;
2279 case DWC3_DCFG_FULLSPEED2:
2280 case DWC3_DCFG_FULLSPEED1:
2281 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2282 dwc->gadget.ep0->maxpacket = 64;
2283 dwc->gadget.speed = USB_SPEED_FULL;
2284 break;
2285 case DWC3_DCFG_LOWSPEED:
2286 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2287 dwc->gadget.ep0->maxpacket = 8;
2288 dwc->gadget.speed = USB_SPEED_LOW;
2289 break;
2290 }
2291
2292 /* Enable USB2 LPM Capability */
2293
2294 if ((dwc->revision > DWC3_REVISION_194A)
2295 && (speed != DWC3_DCFG_SUPERSPEED)) {
2296 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2297 reg |= DWC3_DCFG_LPM_CAP;
2298 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2299
2300 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2301 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2302
2303 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2304
2305 /*
2306 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2307 * DCFG.LPMCap is set, core responses with an ACK and the
2308 * BESL value in the LPM token is less than or equal to LPM
2309 * NYET threshold.
2310 */
2311 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2312 && dwc->has_lpm_erratum,
2313 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2314
2315 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2316 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2317
2318 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2319 } else {
2320 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2321 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2322 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2323 }
2324
2325 dep = dwc->eps[0];
2326 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2327 false);
2328 if (ret) {
2329 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2330 return;
2331 }
2332
2333 dep = dwc->eps[1];
2334 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2335 false);
2336 if (ret) {
2337 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2338 return;
2339 }
2340
2341 /*
2342 * Configure PHY via GUSB3PIPECTLn if required.
2343 *
2344 * Update GTXFIFOSIZn
2345 *
2346 * In both cases reset values should be sufficient.
2347 */
2348}
2349
2350static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2351{
2352 /*
2353 * TODO take core out of low power mode when that's
2354 * implemented.
2355 */
2356
2357 dwc->gadget_driver->resume(&dwc->gadget);
2358}
2359
2360static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2361 unsigned int evtinfo)
2362{
2363 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2364 unsigned int pwropt;
2365
2366 /*
2367 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2368 * Hibernation mode enabled which would show up when device detects
2369 * host-initiated U3 exit.
2370 *
2371 * In that case, device will generate a Link State Change Interrupt
2372 * from U3 to RESUME which is only necessary if Hibernation is
2373 * configured in.
2374 *
2375 * There are no functional changes due to such spurious event and we
2376 * just need to ignore it.
2377 *
2378 * Refers to:
2379 *
2380 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2381 * operational mode
2382 */
2383 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2384 if ((dwc->revision < DWC3_REVISION_250A) &&
2385 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2386 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2387 (next == DWC3_LINK_STATE_RESUME)) {
2388 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2389 return;
2390 }
2391 }
2392
2393 /*
2394 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2395 * on the link partner, the USB session might do multiple entry/exit
2396 * of low power states before a transfer takes place.
2397 *
2398 * Due to this problem, we might experience lower throughput. The
2399 * suggested workaround is to disable DCTL[12:9] bits if we're
2400 * transitioning from U1/U2 to U0 and enable those bits again
2401 * after a transfer completes and there are no pending transfers
2402 * on any of the enabled endpoints.
2403 *
2404 * This is the first half of that workaround.
2405 *
2406 * Refers to:
2407 *
2408 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2409 * core send LGO_Ux entering U0
2410 */
2411 if (dwc->revision < DWC3_REVISION_183A) {
2412 if (next == DWC3_LINK_STATE_U0) {
2413 u32 u1u2;
2414 u32 reg;
2415
2416 switch (dwc->link_state) {
2417 case DWC3_LINK_STATE_U1:
2418 case DWC3_LINK_STATE_U2:
2419 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2420 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2421 | DWC3_DCTL_ACCEPTU2ENA
2422 | DWC3_DCTL_INITU1ENA
2423 | DWC3_DCTL_ACCEPTU1ENA);
2424
2425 if (!dwc->u1u2)
2426 dwc->u1u2 = reg & u1u2;
2427
2428 reg &= ~u1u2;
2429
2430 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2431 break;
2432 default:
2433 /* do nothing */
2434 break;
2435 }
2436 }
2437 }
2438
2439 switch (next) {
2440 case DWC3_LINK_STATE_U1:
2441 if (dwc->speed == USB_SPEED_SUPER)
2442 dwc3_suspend_gadget(dwc);
2443 break;
2444 case DWC3_LINK_STATE_U2:
2445 case DWC3_LINK_STATE_U3:
2446 dwc3_suspend_gadget(dwc);
2447 break;
2448 case DWC3_LINK_STATE_RESUME:
2449 dwc3_resume_gadget(dwc);
2450 break;
2451 default:
2452 /* do nothing */
2453 break;
2454 }
2455
2456 dwc->link_state = next;
2457}
2458
2459static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2460 unsigned int evtinfo)
2461{
2462 unsigned int is_ss = evtinfo & BIT(4);
2463
2464 /**
2465 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2466 * have a known issue which can cause USB CV TD.9.23 to fail
2467 * randomly.
2468 *
2469 * Because of this issue, core could generate bogus hibernation
2470 * events which SW needs to ignore.
2471 *
2472 * Refers to:
2473 *
2474 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2475 * Device Fallback from SuperSpeed
2476 */
2477 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2478 return;
2479
2480 /* enter hibernation here */
2481}
2482
2483static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2484 const struct dwc3_event_devt *event)
2485{
2486 switch (event->type) {
2487 case DWC3_DEVICE_EVENT_DISCONNECT:
2488 dwc3_gadget_disconnect_interrupt(dwc);
2489 break;
2490 case DWC3_DEVICE_EVENT_RESET:
2491 dwc3_gadget_reset_interrupt(dwc);
2492 break;
2493 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2494 dwc3_gadget_conndone_interrupt(dwc);
2495 break;
2496 case DWC3_DEVICE_EVENT_WAKEUP:
2497 dwc3_gadget_wakeup_interrupt(dwc);
2498 break;
2499 case DWC3_DEVICE_EVENT_HIBER_REQ:
2500 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2501 "unexpected hibernation event\n"))
2502 break;
2503
2504 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2505 break;
2506 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2507 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2508 break;
2509 case DWC3_DEVICE_EVENT_EOPF:
2510 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2511 break;
2512 case DWC3_DEVICE_EVENT_SOF:
2513 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2514 break;
2515 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2516 dev_vdbg(dwc->dev, "Erratic Error\n");
2517 break;
2518 case DWC3_DEVICE_EVENT_CMD_CMPL:
2519 dev_vdbg(dwc->dev, "Command Complete\n");
2520 break;
2521 case DWC3_DEVICE_EVENT_OVERFLOW:
2522 dev_vdbg(dwc->dev, "Overflow\n");
2523 break;
2524 default:
2525 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2526 }
2527}
2528
2529static void dwc3_process_event_entry(struct dwc3 *dwc,
2530 const union dwc3_event *event)
2531{
2532 trace_dwc3_event(event->raw);
2533
2534 /* Endpoint IRQ, handle it and return early */
2535 if (event->type.is_devspec == 0) {
2536 /* depevt */
2537 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2538 }
2539
2540 switch (event->type.type) {
2541 case DWC3_EVENT_TYPE_DEV:
2542 dwc3_gadget_interrupt(dwc, &event->devt);
2543 break;
2544 /* REVISIT what to do with Carkit and I2C events ? */
2545 default:
2546 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2547 }
2548}
2549
2550static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2551{
2552 struct dwc3_event_buffer *evt;
2553 irqreturn_t ret = IRQ_NONE;
2554 int left;
2555 u32 reg;
2556
2557 evt = dwc->ev_buffs[buf];
2558 left = evt->count;
2559
2560 if (!(evt->flags & DWC3_EVENT_PENDING))
2561 return IRQ_NONE;
2562
2563 while (left > 0) {
2564 union dwc3_event event;
2565
2566 event.raw = *(u32 *) (evt->buf + evt->lpos);
2567
2568 dwc3_process_event_entry(dwc, &event);
2569
2570 /*
2571 * FIXME we wrap around correctly to the next entry as
2572 * almost all entries are 4 bytes in size. There is one
2573 * entry which has 12 bytes which is a regular entry
2574 * followed by 8 bytes data. ATM I don't know how
2575 * things are organized if we get next to the a
2576 * boundary so I worry about that once we try to handle
2577 * that.
2578 */
2579 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2580 left -= 4;
2581
2582 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2583 }
2584
2585 evt->count = 0;
2586 evt->flags &= ~DWC3_EVENT_PENDING;
2587 ret = IRQ_HANDLED;
2588
2589 /* Unmask interrupt */
2590 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2591 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2592 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2593
2594 return ret;
2595}
2596
2597static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2598{
2599 struct dwc3 *dwc = _dwc;
2600 unsigned long flags;
2601 irqreturn_t ret = IRQ_NONE;
2602 int i;
2603
2604 spin_lock_irqsave(&dwc->lock, flags);
2605
2606 for (i = 0; i < dwc->num_event_buffers; i++)
2607 ret |= dwc3_process_event_buf(dwc, i);
2608
2609 spin_unlock_irqrestore(&dwc->lock, flags);
2610
2611 return ret;
2612}
2613
2614static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2615{
2616 struct dwc3_event_buffer *evt;
2617 u32 count;
2618 u32 reg;
2619
2620 evt = dwc->ev_buffs[buf];
2621
2622 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2623 count &= DWC3_GEVNTCOUNT_MASK;
2624 if (!count)
2625 return IRQ_NONE;
2626
2627 evt->count = count;
2628 evt->flags |= DWC3_EVENT_PENDING;
2629
2630 /* Mask interrupt */
2631 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2632 reg |= DWC3_GEVNTSIZ_INTMASK;
2633 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2634
2635 return IRQ_WAKE_THREAD;
2636}
2637
2638static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2639{
2640 struct dwc3 *dwc = _dwc;
2641 int i;
2642 irqreturn_t ret = IRQ_NONE;
2643
2644 spin_lock(&dwc->lock);
2645
2646 for (i = 0; i < dwc->num_event_buffers; i++) {
2647 irqreturn_t status;
2648
2649 status = dwc3_check_event_buf(dwc, i);
2650 if (status == IRQ_WAKE_THREAD)
2651 ret = status;
2652 }
2653
2654 spin_unlock(&dwc->lock);
2655
2656 return ret;
2657}
2658
2659/**
2660 * dwc3_gadget_init - Initializes gadget related registers
2661 * @dwc: pointer to our controller context structure
2662 *
2663 * Returns 0 on success otherwise negative errno.
2664 */
2665int dwc3_gadget_init(struct dwc3 *dwc)
2666{
2667 int ret;
2668
2669 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2670 &dwc->ctrl_req_addr, GFP_KERNEL);
2671 if (!dwc->ctrl_req) {
2672 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2673 ret = -ENOMEM;
2674 goto err0;
2675 }
2676
2677 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2678 &dwc->ep0_trb_addr, GFP_KERNEL);
2679 if (!dwc->ep0_trb) {
2680 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2681 ret = -ENOMEM;
2682 goto err1;
2683 }
2684
2685 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2686 if (!dwc->setup_buf) {
2687 ret = -ENOMEM;
2688 goto err2;
2689 }
2690
2691 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2692 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2693 GFP_KERNEL);
2694 if (!dwc->ep0_bounce) {
2695 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2696 ret = -ENOMEM;
2697 goto err3;
2698 }
2699
2700 dwc->gadget.ops = &dwc3_gadget_ops;
2701 dwc->gadget.max_speed = USB_SPEED_SUPER;
2702 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2703 dwc->gadget.sg_supported = true;
2704 dwc->gadget.name = "dwc3-gadget";
2705
2706 /*
2707 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2708 * on ep out.
2709 */
2710 dwc->gadget.quirk_ep_out_aligned_size = true;
2711
2712 /*
2713 * REVISIT: Here we should clear all pending IRQs to be
2714 * sure we're starting from a well known location.
2715 */
2716
2717 ret = dwc3_gadget_init_endpoints(dwc);
2718 if (ret)
2719 goto err4;
2720
2721 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2722 if (ret) {
2723 dev_err(dwc->dev, "failed to register udc\n");
2724 goto err4;
2725 }
2726
2727 return 0;
2728
2729err4:
2730 dwc3_gadget_free_endpoints(dwc);
2731 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2732 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2733
2734err3:
2735 kfree(dwc->setup_buf);
2736
2737err2:
2738 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2739 dwc->ep0_trb, dwc->ep0_trb_addr);
2740
2741err1:
2742 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2743 dwc->ctrl_req, dwc->ctrl_req_addr);
2744
2745err0:
2746 return ret;
2747}
2748
2749/* -------------------------------------------------------------------------- */
2750
2751void dwc3_gadget_exit(struct dwc3 *dwc)
2752{
2753 usb_del_gadget_udc(&dwc->gadget);
2754
2755 dwc3_gadget_free_endpoints(dwc);
2756
2757 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2758 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2759
2760 kfree(dwc->setup_buf);
2761
2762 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2763 dwc->ep0_trb, dwc->ep0_trb_addr);
2764
2765 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2766 dwc->ctrl_req, dwc->ctrl_req_addr);
2767}
2768
2769int dwc3_gadget_suspend(struct dwc3 *dwc)
2770{
2771 if (dwc->pullups_connected) {
2772 dwc3_gadget_disable_irq(dwc);
2773 dwc3_gadget_run_stop(dwc, true, true);
2774 }
2775
2776 __dwc3_gadget_ep_disable(dwc->eps[0]);
2777 __dwc3_gadget_ep_disable(dwc->eps[1]);
2778
2779 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2780
2781 return 0;
2782}
2783
2784int dwc3_gadget_resume(struct dwc3 *dwc)
2785{
2786 struct dwc3_ep *dep;
2787 int ret;
2788
2789 /* Start with SuperSpeed Default */
2790 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2791
2792 dep = dwc->eps[0];
2793 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2794 false);
2795 if (ret)
2796 goto err0;
2797
2798 dep = dwc->eps[1];
2799 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2800 false);
2801 if (ret)
2802 goto err1;
2803
2804 /* begin to receive SETUP packets */
2805 dwc->ep0state = EP0_SETUP_PHASE;
2806 dwc3_ep0_out_start(dwc);
2807
2808 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2809
2810 if (dwc->pullups_connected) {
2811 dwc3_gadget_enable_irq(dwc);
2812 dwc3_gadget_run_stop(dwc, true, false);
2813 }
2814
2815 return 0;
2816
2817err1:
2818 __dwc3_gadget_ep_disable(dwc->eps[0]);
2819
2820err0:
2821 return ret;
2822}