Lines Matching +full:disable +full:- +full:hibernation +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0
11 * S3C USB2.0 High-speed / OtG driver
19 #include <linux/dma-mapping.h>
65 return hsotg->eps_in[ep_index];
67 return hsotg->eps_out[ep_index];
74 * using_dma - return the DMA status of the driver.
94 return hsotg->params.g_dma;
98 * using_desc_dma - return the descriptor DMA status of the driver.
105 return hsotg->params.g_dma_desc;
109 * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
117 struct dwc2_hsotg *hsotg = hs_ep->parent;
120 if (hsotg->gadget.speed != USB_SPEED_HIGH)
123 hs_ep->target_frame += hs_ep->interval;
124 if (hs_ep->target_frame > limit) {
125 hs_ep->frame_overrun = true;
126 hs_ep->target_frame &= limit;
128 hs_ep->frame_overrun = false;
133 * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
138 * descriptor frame number filed value. For service interval mode frame
144 struct dwc2_hsotg *hsotg = hs_ep->parent;
147 if (hsotg->gadget.speed != USB_SPEED_HIGH)
150 if (hs_ep->target_frame)
151 hs_ep->target_frame -= 1;
153 hs_ep->target_frame = limit;
157 * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
169 dev_dbg(hsotg->dev, "gsintmsk now 0x%08x\n", new_gsintmsk);
175 * dwc2_hsotg_disable_gsint - disable one or more of the general interrupt
191 * dwc2_hsotg_ctrl_epint - enable/disable an endpoint irq
222 * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode
228 if (hsotg->hw_params.en_multiple_tx_fifo)
229 /* In dedicated FIFO mode we need count of IN EPs */
230 return hsotg->hw_params.num_dev_in_eps;
232 /* In shared FIFO mode we need count of Periodic IN EPs */
233 return hsotg->hw_params.num_dev_perio_in_ep;
237 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
238 * device mode TX FIFOs
248 np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size,
249 hsotg->params.g_np_tx_fifo_size);
252 tx_addr_max = hsotg->hw_params.total_fifo_size;
254 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
258 return tx_addr_max - addr;
262 * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
277 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
284 * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
305 * dwc2_hsotg_init_fifo - initialise non-periodic FIFOs
315 u32 *txfsz = hsotg->params.g_tx_fifo_size;
318 WARN_ON(hsotg->fifo_map);
319 hsotg->fifo_map = 0;
322 dwc2_writel(hsotg, hsotg->params.g_rx_fifo_size, GRXFSIZ);
323 dwc2_writel(hsotg, (hsotg->params.g_rx_fifo_size <<
325 (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
336 addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
348 WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
356 dwc2_writel(hsotg, hsotg->hw_params.total_fifo_size |
375 if (--timeout == 0) {
376 dev_err(hsotg->dev,
385 dev_dbg(hsotg->dev, "FIFOs reset, timeout at %d\n", timeout);
389 * dwc2_hsotg_ep_alloc_request - allocate USB rerequest structure
404 INIT_LIST_HEAD(&req->queue);
406 return &req->req;
410 * is_ep_periodic - return true if the endpoint is in periodic mode.
413 * Returns true if the endpoint is in periodic mode, meaning it is being
418 return hs_ep->periodic;
422 * dwc2_hsotg_unmap_dma - unmap the DMA memory being used for the request
434 struct usb_request *req = &hs_req->req;
436 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
440 * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
449 hsotg->setup_desc[0] =
450 dmam_alloc_coherent(hsotg->dev,
452 &hsotg->setup_desc_dma[0],
454 if (!hsotg->setup_desc[0])
457 hsotg->setup_desc[1] =
458 dmam_alloc_coherent(hsotg->dev,
460 &hsotg->setup_desc_dma[1],
462 if (!hsotg->setup_desc[1])
465 hsotg->ctrl_in_desc =
466 dmam_alloc_coherent(hsotg->dev,
468 &hsotg->ctrl_in_desc_dma,
470 if (!hsotg->ctrl_in_desc)
473 hsotg->ctrl_out_desc =
474 dmam_alloc_coherent(hsotg->dev,
476 &hsotg->ctrl_out_desc_dma,
478 if (!hsotg->ctrl_out_desc)
484 return -ENOMEM;
488 * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
499 * otherwise -ENOSPC is returned if the FIFO space was used up.
509 int buf_pos = hs_req->req.actual;
510 int to_write = hs_ep->size_loaded;
516 to_write -= (buf_pos - hs_ep->last_load);
522 if (periodic && !hsotg->dedicated_fifos) {
523 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
538 if (hs_ep->fifo_load != 0) {
540 return -ENOSPC;
543 dev_dbg(hsotg->dev, "%s: left=%d, load=%d, fifo=%d, size %d\n",
545 hs_ep->size_loaded, hs_ep->fifo_load, hs_ep->fifo_size);
548 size_done = hs_ep->size_loaded - size_left;
551 can_write = hs_ep->fifo_load - size_done;
552 dev_dbg(hsotg->dev, "%s: => can_write1=%d\n",
555 can_write = hs_ep->fifo_size - can_write;
556 dev_dbg(hsotg->dev, "%s: => can_write2=%d\n",
561 return -ENOSPC;
563 } else if (hsotg->dedicated_fifos && hs_ep->index != 0) {
565 DTXFSTS(hs_ep->fifo_index));
571 dev_dbg(hsotg->dev,
576 return -ENOSPC;
583 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc;
585 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n",
589 * limit to 512 bytes of data, it seems at least on the non-periodic
597 * limit the write to one max-packet size worth of data, but allow
605 if (!hsotg->dedicated_fifos)
626 to_write -= pkt_round;
634 if (!hsotg->dedicated_fifos)
640 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n",
641 to_write, hs_req->req.length, can_write, buf_pos);
644 return -ENOSPC;
646 hs_req->req.actual = buf_pos + to_write;
647 hs_ep->total_data += to_write;
650 hs_ep->fifo_load += to_write;
653 data = hs_req->req.buf + buf_pos;
655 dwc2_writel_rep(hsotg, EPFIFO(hs_ep->index), data, to_write);
657 return (to_write >= can_write) ? -ENOSPC : 0;
661 * get_ep_limit - get the maximum data legnth for this endpoint
669 int index = hs_ep->index;
678 if (hs_ep->dir_in)
685 maxpkt--;
686 maxsize--;
693 if ((maxpkt * hs_ep->ep.maxpacket) < maxsize)
694 maxsize = maxpkt * hs_ep->ep.maxpacket;
700 * dwc2_hsotg_read_frameno - read current frame number
717 * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
727 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
728 int is_isoc = hs_ep->isochronous;
730 u32 mps = hs_ep->ep.maxpacket;
731 int dir_in = hs_ep->dir_in;
734 maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
741 if (hs_ep->index)
749 * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
756 * Control out - MPS,
757 * Isochronous - descriptor rx/tx bytes bitfield limit,
758 * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
760 * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds
767 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
768 u32 mps = hs_ep->ep.maxpacket;
769 int dir_in = hs_ep->dir_in;
772 if (!hs_ep->index && !dir_in) {
775 } else if (hs_ep->isochronous) {
788 desc_size -= desc_size % mps;
792 if (hs_ep->index)
807 int dir_in = hs_ep->dir_in;
808 u32 mps = hs_ep->ep.maxpacket;
816 hs_ep->desc_count = (len / maxsize) +
819 hs_ep->desc_count = 1;
821 for (i = 0; i < hs_ep->desc_count; ++i) {
822 (*desc)->status = 0;
823 (*desc)->status |= (DEV_DMA_BUFF_STS_HBUSY
827 if (!hs_ep->index && !dir_in)
828 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
830 (*desc)->status |=
832 (*desc)->buf = dma_buff + offset;
834 len -= maxsize;
838 (*desc)->status |= (DEV_DMA_L | DEV_DMA_IOC);
841 (*desc)->status |= (len % mps) ? DEV_DMA_SHORT :
842 ((hs_ep->send_zlp && true_last) ?
845 (*desc)->status |=
847 (*desc)->buf = dma_buff + offset;
850 (*desc)->status &= ~DEV_DMA_BUFF_STS_MASK;
851 (*desc)->status |= (DEV_DMA_BUFF_STS_HREADY
858 * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
872 struct dwc2_dma_desc *desc = hs_ep->desc_list;
877 if (hs_ep->req)
878 ureq = &hs_ep->req->req;
880 /* non-DMA sg buffer */
881 if (!ureq || !ureq->num_sgs) {
888 for_each_sg(ureq->sg, sg, ureq->num_mapped_sgs, i) {
890 sg_dma_address(sg) + sg->offset, sg_dma_len(sg),
891 (i == (ureq->num_mapped_sgs - 1)));
892 desc_count += hs_ep->desc_count;
895 hs_ep->desc_count = desc_count;
899 * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
913 struct dwc2_hsotg *hsotg = hs_ep->parent;
920 index = hs_ep->next_desc;
921 desc = &hs_ep->desc_list[index];
924 if ((desc->status >> DEV_DMA_BUFF_STS_SHIFT) ==
926 dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
931 if (hs_ep->next_desc)
932 hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
934 dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
935 __func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
937 desc->status = 0;
938 desc->status |= (DEV_DMA_BUFF_STS_HBUSY << DEV_DMA_BUFF_STS_SHIFT);
940 desc->buf = dma_buff;
941 desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
944 if (hs_ep->dir_in) {
946 pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
949 desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
951 ((len % hs_ep->ep.maxpacket) ?
953 ((hs_ep->target_frame <<
958 desc->status &= ~DEV_DMA_BUFF_STS_MASK;
959 desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
962 if (hs_ep->dir_in)
966 hs_ep->next_desc++;
967 if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
968 hs_ep->next_desc = 0;
974 * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
982 struct dwc2_hsotg *hsotg = hs_ep->parent;
984 int index = hs_ep->index;
992 if (list_empty(&hs_ep->queue)) {
993 hs_ep->target_frame = TARGET_FRAME_INITIAL;
994 dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
1000 desc = &hs_ep->desc_list[i];
1001 desc->status = 0;
1002 desc->status |= (DEV_DMA_BUFF_STS_HBUSY
1006 hs_ep->next_desc = 0;
1007 list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
1008 dma_addr_t dma_addr = hs_req->req.dma;
1010 if (hs_req->req.num_sgs) {
1011 WARN_ON(hs_req->req.num_sgs > 1);
1012 dma_addr = sg_dma_address(hs_req->req.sg);
1015 hs_req->req.length);
1020 hs_ep->compl_desc = 0;
1021 depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
1022 dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
1025 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1039 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
1053 struct usb_request *ureq = &hs_req->req;
1054 int index = hs_ep->index;
1055 int dir_in = hs_ep->dir_in;
1066 if (hs_ep->req && !continuing) {
1067 dev_err(hsotg->dev, "%s: active request\n", __func__);
1070 } else if (hs_ep->req != hs_req && continuing) {
1071 dev_err(hsotg->dev,
1082 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x, ep %d, dir %s\n",
1084 hs_ep->dir_in ? "in" : "out");
1090 dev_warn(hsotg->dev, "%s: ep%d is stalled\n", __func__, index);
1094 length = ureq->length - ureq->actual;
1095 dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
1096 ureq->length, ureq->actual);
1104 int round = maxreq % hs_ep->ep.maxpacket;
1106 dev_dbg(hsotg->dev, "%s: length %d, max-req %d, r %d\n",
1111 maxreq -= round;
1117 packets = DIV_ROUND_UP(length, hs_ep->ep.maxpacket);
1122 if (hs_ep->isochronous)
1133 if (dir_in && ureq->zero && !continuing) {
1135 if ((ureq->length >= hs_ep->ep.maxpacket) &&
1136 !(ureq->length % hs_ep->ep.maxpacket))
1137 hs_ep->send_zlp = 1;
1143 dev_dbg(hsotg->dev, "%s: %d@%d/%d, 0x%08x => 0x%08x\n",
1144 __func__, packets, length, ureq->length, epsize, epsize_reg);
1147 hs_ep->req = hs_req;
1151 u32 mps = hs_ep->ep.maxpacket;
1153 /* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
1158 length += (mps - (length % mps));
1162 offset = ureq->actual;
1165 dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
1169 dwc2_writel(hsotg, hs_ep->desc_list_dma, dma_reg);
1171 dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
1172 __func__, (u32)hs_ep->desc_list_dma, dma_reg);
1183 dwc2_writel(hsotg, ureq->dma, dma_reg);
1185 dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
1186 __func__, &ureq->dma, dma_reg);
1190 if (hs_ep->isochronous) {
1192 if (hs_ep->interval == 1) {
1193 if (hs_ep->target_frame & 0x1)
1200 hs_req->req.frame_number = hs_ep->target_frame;
1201 hs_req->req.actual = 0;
1202 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
1209 dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
1212 if (!(index == 0 && hsotg->ep0_state == DWC2_EP0_SETUP))
1215 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
1223 hs_ep->size_loaded = length;
1224 hs_ep->last_load = ureq->actual;
1227 /* set these anyway, we may need them for non-periodic in */
1228 hs_ep->fifo_load = 0;
1240 dev_dbg(hsotg->dev,
1244 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n",
1248 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 1);
1252 * dwc2_hsotg_map_dma - map the DMA memory being used for the request
1269 hs_ep->map_dir = hs_ep->dir_in;
1270 ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
1277 dev_err(hsotg->dev, "%s: failed to map buffer %p, %d bytes\n",
1278 __func__, req->buf, req->length);
1280 return -EIO;
1287 void *req_buf = hs_req->req.buf;
1293 WARN_ON(hs_req->saved_req_buf);
1295 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__,
1296 hs_ep->ep.name, req_buf, hs_req->req.length);
1298 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC);
1299 if (!hs_req->req.buf) {
1300 hs_req->req.buf = req_buf;
1301 dev_err(hsotg->dev,
1304 return -ENOMEM;
1308 hs_req->saved_req_buf = req_buf;
1310 if (hs_ep->dir_in)
1311 memcpy(hs_req->req.buf, req_buf, hs_req->req.length);
1321 if (!using_dma(hsotg) || !hs_req->saved_req_buf)
1324 dev_dbg(hsotg->dev, "%s: %s: status=%d actual-length=%d\n", __func__,
1325 hs_ep->ep.name, hs_req->req.status, hs_req->req.actual);
1328 if (!hs_ep->dir_in && !hs_req->req.status)
1329 memcpy(hs_req->saved_req_buf, hs_req->req.buf,
1330 hs_req->req.actual);
1333 kfree(hs_req->req.buf);
1335 hs_req->req.buf = hs_req->saved_req_buf;
1336 hs_req->saved_req_buf = NULL;
1340 * dwc2_gadget_target_frame_elapsed - Checks target frame
1348 struct dwc2_hsotg *hsotg = hs_ep->parent;
1349 u32 target_frame = hs_ep->target_frame;
1350 u32 current_frame = hsotg->frame_number;
1351 bool frame_overrun = hs_ep->frame_overrun;
1354 if (hsotg->gadget.speed != USB_SPEED_HIGH)
1361 ((current_frame - target_frame) < limit / 2))
1368 * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
1378 switch (hsotg->ep0_state) {
1381 hs_ep->desc_list = hsotg->setup_desc[0];
1382 hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
1386 hs_ep->desc_list = hsotg->ctrl_in_desc;
1387 hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
1390 hs_ep->desc_list = hsotg->ctrl_out_desc;
1391 hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
1394 dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
1395 hsotg->ep0_state);
1396 return -EINVAL;
1407 struct dwc2_hsotg *hs = hs_ep->parent;
1414 dev_dbg(hs->dev, "%s: req %p: %d@%p, noi=%d, zero=%d, snok=%d\n",
1415 ep->name, req, req->length, req->buf, req->no_interrupt,
1416 req->zero, req->short_not_ok);
1418 if (hs->lx_state == DWC2_L1) {
1423 if (hs->lx_state != DWC2_L0) {
1424 dev_dbg(hs->dev, "%s: submit request only in active state\n",
1426 return -EAGAIN;
1430 INIT_LIST_HEAD(&hs_req->queue);
1431 req->actual = 0;
1432 req->status = -EINPROGRESS;
1435 if (hs_ep->isochronous &&
1436 req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
1437 dev_err(hs->dev, "req length > maxpacket*mc\n");
1438 return -EINVAL;
1441 /* In DDMA mode for ISOC's don't queue request if length greater
1444 if (using_desc_dma(hs) && hs_ep->isochronous) {
1446 if (hs_ep->dir_in && req->length > maxsize) {
1447 dev_err(hs->dev, "wrong length %d (maxsize=%d)\n",
1448 req->length, maxsize);
1449 return -EINVAL;
1452 if (!hs_ep->dir_in && req->length > hs_ep->ep.maxpacket) {
1453 dev_err(hs->dev, "ISOC OUT: wrong length %d (mps=%d)\n",
1454 req->length, hs_ep->ep.maxpacket);
1455 return -EINVAL;
1470 if (using_desc_dma(hs) && !hs_ep->index) {
1476 first = list_empty(&hs_ep->queue);
1477 list_add_tail(&hs_req->queue, &hs_ep->queue);
1480 * Handle DDMA isochronous transfers separately - just add new entry
1485 if (using_desc_dma(hs) && hs_ep->isochronous) {
1486 if (hs_ep->target_frame != TARGET_FRAME_INITIAL) {
1487 dma_addr_t dma_addr = hs_req->req.dma;
1489 if (hs_req->req.num_sgs) {
1490 WARN_ON(hs_req->req.num_sgs > 1);
1491 dma_addr = sg_dma_address(hs_req->req.sg);
1494 hs_req->req.length);
1500 if (!hs_ep->index && !req->length && !hs_ep->dir_in &&
1501 hs->ep0_state == DWC2_EP0_DATA_OUT)
1502 hs_ep->dir_in = 1;
1505 if (!hs_ep->isochronous) {
1511 hs->frame_number = dwc2_hsotg_read_frameno(hs);
1517 hs->frame_number = dwc2_hsotg_read_frameno(hs);
1520 if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
1530 struct dwc2_hsotg *hs = hs_ep->parent;
1534 spin_lock_irqsave(&hs->lock, flags);
1536 spin_unlock_irqrestore(&hs->lock, flags);
1550 * dwc2_hsotg_complete_oursetup - setup completion callback
1561 struct dwc2_hsotg *hsotg = hs_ep->parent;
1563 dev_dbg(hsotg->dev, "%s: ep %p, req %p\n", __func__, ep, req);
1569 * ep_from_windex - convert control wIndex value to endpoint
1585 if (idx > hsotg->num_of_eps)
1592 * dwc2_hsotg_set_test_mode - Enable usb Test Modes
1594 * @testmode: requested usb test mode
1595 * Enable usb Test Mode requested by the Host.
1611 return -EINVAL;
1618 * dwc2_hsotg_send_reply - send reply to control request
1635 dev_dbg(hsotg->dev, "%s: buff %p, len %d\n", __func__, buff, length);
1637 req = dwc2_hsotg_ep_alloc_request(&ep->ep, GFP_ATOMIC);
1638 hsotg->ep0_reply = req;
1640 dev_warn(hsotg->dev, "%s: cannot alloc req\n", __func__);
1641 return -ENOMEM;
1644 req->buf = hsotg->ep0_buff;
1645 req->length = length;
1650 req->zero = 0;
1651 req->complete = dwc2_hsotg_complete_oursetup;
1654 memcpy(req->buf, buff, length);
1656 ret = dwc2_hsotg_ep_queue(&ep->ep, req, GFP_ATOMIC);
1658 dev_warn(hsotg->dev, "%s: cannot queue req\n", __func__);
1666 * dwc2_hsotg_process_req_status - process request GET_STATUS
1673 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1679 dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
1681 if (!ep0->dir_in) {
1682 dev_warn(hsotg->dev, "%s: direction out?\n", __func__);
1683 return -EINVAL;
1686 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1688 status = hsotg->gadget.is_selfpowered <<
1690 status |= hsotg->remote_wakeup_allowed <<
1701 ep = ep_from_windex(hsotg, le16_to_cpu(ctrl->wIndex));
1703 return -ENOENT;
1705 reply = cpu_to_le16(ep->halted ? 1 : 0);
1712 if (le16_to_cpu(ctrl->wLength) != 2)
1713 return -EINVAL;
1717 dev_err(hsotg->dev, "%s: failed to send reply\n", __func__);
1727 * get_ep_head - return the first request on the endpoint
1734 return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
1739 * dwc2_gadget_start_next_request - Starts next request from ep queue
1742 * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
1748 struct dwc2_hsotg *hsotg = hs_ep->parent;
1749 int dir_in = hs_ep->dir_in;
1752 if (!list_empty(&hs_ep->queue)) {
1757 if (!hs_ep->isochronous)
1761 dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
1764 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
1770 * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
1777 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1779 bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
1787 dev_dbg(hsotg->dev, "%s: %s_FEATURE\n",
1790 wValue = le16_to_cpu(ctrl->wValue);
1791 wIndex = le16_to_cpu(ctrl->wIndex);
1792 recip = ctrl->bRequestType & USB_RECIP_MASK;
1799 hsotg->remote_wakeup_allowed = 1;
1801 hsotg->remote_wakeup_allowed = 0;
1806 return -EINVAL;
1808 return -EINVAL;
1810 hsotg->test_mode = wIndex >> 8;
1813 return -ENOENT;
1818 dev_err(hsotg->dev,
1827 dev_dbg(hsotg->dev, "%s: no endpoint for 0x%04x\n",
1829 return -ENOENT;
1834 halted = ep->halted;
1836 if (!ep->wedged)
1837 dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
1841 dev_err(hsotg->dev,
1856 if (ep->req) {
1857 hs_req = ep->req;
1858 ep->req = NULL;
1859 list_del_init(&hs_req->queue);
1860 if (hs_req->req.complete) {
1861 spin_unlock(&hsotg->lock);
1863 &ep->ep, &hs_req->req);
1864 spin_lock(&hsotg->lock);
1869 if (!ep->req)
1876 return -ENOENT;
1880 return -ENOENT;
1888 * dwc2_hsotg_stall_ep0 - stall ep0
1895 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1899 dev_dbg(hsotg->dev, "ep0 stall (dir=%d)\n", ep0->dir_in);
1900 reg = (ep0->dir_in) ? DIEPCTL0 : DOEPCTL0;
1912 dev_dbg(hsotg->dev,
1924 * dwc2_hsotg_process_control - process a control request
1935 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
1939 dev_dbg(hsotg->dev,
1941 ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
1942 ctrl->wIndex, ctrl->wLength);
1944 if (ctrl->wLength == 0) {
1945 ep0->dir_in = 1;
1946 hsotg->ep0_state = DWC2_EP0_STATUS_IN;
1947 } else if (ctrl->bRequestType & USB_DIR_IN) {
1948 ep0->dir_in = 1;
1949 hsotg->ep0_state = DWC2_EP0_DATA_IN;
1951 ep0->dir_in = 0;
1952 hsotg->ep0_state = DWC2_EP0_DATA_OUT;
1955 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1956 switch (ctrl->bRequest) {
1958 hsotg->connected = 1;
1961 dcfg |= (le16_to_cpu(ctrl->wValue) <<
1965 dev_info(hsotg->dev, "new address %d\n", ctrl->wValue);
1983 if (ret == 0 && hsotg->driver) {
1984 spin_unlock(&hsotg->lock);
1985 ret = hsotg->driver->setup(&hsotg->gadget, ctrl);
1986 spin_lock(&hsotg->lock);
1988 dev_dbg(hsotg->dev, "driver->setup() ret %d\n", ret);
1991 hsotg->delayed_status = false;
1993 hsotg->delayed_status = true;
2005 * dwc2_hsotg_complete_setup - completion of a setup transfer
2016 struct dwc2_hsotg *hsotg = hs_ep->parent;
2018 if (req->status < 0) {
2019 dev_dbg(hsotg->dev, "%s: failed %d\n", __func__, req->status);
2023 spin_lock(&hsotg->lock);
2024 if (req->actual == 0)
2027 dwc2_hsotg_process_control(hsotg, req->buf);
2028 spin_unlock(&hsotg->lock);
2032 * dwc2_hsotg_enqueue_setup - start a request for EP0 packets
2040 struct usb_request *req = hsotg->ctrl_req;
2044 dev_dbg(hsotg->dev, "%s: queueing setup request\n", __func__);
2046 req->zero = 0;
2047 req->length = 8;
2048 req->buf = hsotg->ctrl_buff;
2049 req->complete = dwc2_hsotg_complete_setup;
2051 if (!list_empty(&hs_req->queue)) {
2052 dev_dbg(hsotg->dev, "%s already queued???\n", __func__);
2056 hsotg->eps_out[0]->dir_in = 0;
2057 hsotg->eps_out[0]->send_zlp = 0;
2058 hsotg->ep0_state = DWC2_EP0_SETUP;
2060 ret = dwc2_hsotg_ep_queue(&hsotg->eps_out[0]->ep, req, GFP_ATOMIC);
2062 dev_err(hsotg->dev, "%s: failed queue (%d)\n", __func__, ret);
2074 u8 index = hs_ep->index;
2075 u32 epctl_reg = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
2076 u32 epsiz_reg = hs_ep->dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
2078 if (hs_ep->dir_in)
2079 dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
2082 dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
2086 dma_addr_t dma = hs_ep->desc_list_dma;
2106 * dwc2_hsotg_complete_request - complete a request given to us
2124 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
2128 dev_dbg(hsotg->dev, "complete: ep %p %s, req %p, %d => %p\n",
2129 hs_ep, hs_ep->ep.name, hs_req, result, hs_req->req.complete);
2136 if (hs_req->req.status == -EINPROGRESS)
2137 hs_req->req.status = result;
2144 hs_ep->req = NULL;
2145 list_del_init(&hs_req->queue);
2152 if (hs_req->req.complete) {
2153 spin_unlock(&hsotg->lock);
2154 usb_gadget_giveback_request(&hs_ep->ep, &hs_req->req);
2155 spin_lock(&hsotg->lock);
2159 if (using_desc_dma(hsotg) && hs_ep->isochronous)
2168 if (!hs_ep->req && result >= 0)
2173 * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
2183 struct dwc2_hsotg *hsotg = hs_ep->parent;
2189 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2197 dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
2200 ureq = &hs_req->req;
2205 mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
2207 ureq->actual = ureq->length - ((desc_sts & mask) >>
2213 if (!hs_ep->dir_in && ureq->length & 0x3)
2214 ureq->actual += 4 - (ureq->length & 0x3);
2217 ureq->frame_number =
2224 hs_ep->compl_desc++;
2225 if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
2226 hs_ep->compl_desc = 0;
2227 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
2232 * dwc2_gadget_handle_isoc_bna - handle BNA interrupt for ISOC.
2242 struct dwc2_hsotg *hsotg = hs_ep->parent;
2244 if (!hs_ep->dir_in)
2248 hs_ep->target_frame = TARGET_FRAME_INITIAL;
2249 hs_ep->next_desc = 0;
2250 hs_ep->compl_desc = 0;
2254 * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
2265 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[ep_idx];
2266 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2275 dev_dbg(hsotg->dev,
2287 read_ptr = hs_req->req.actual;
2288 max_req = hs_req->req.length - read_ptr;
2290 dev_dbg(hsotg->dev, "%s: read %d/%d, done %d/%d\n",
2291 __func__, to_read, max_req, read_ptr, hs_req->req.length);
2303 hs_ep->total_data += to_read;
2304 hs_req->req.actual += to_read;
2308 * note, we might over-write the buffer end by 3 bytes depending on
2312 hs_req->req.buf + read_ptr, to_read);
2316 * dwc2_hsotg_ep0_zlp - send/receive zero-length packet on control endpoint
2320 * Generate a zero-length IN packet request for terminating a SETUP
2330 hsotg->eps_out[0]->dir_in = dir_in;
2331 hsotg->ep0_state = dir_in ? DWC2_EP0_STATUS_IN : DWC2_EP0_STATUS_OUT;
2333 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
2337 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
2338 * @hs_ep - The endpoint on which transfer went
2345 const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc;
2346 struct dwc2_hsotg *hsotg = hs_ep->parent;
2349 struct dwc2_dma_desc *desc = hs_ep->desc_list;
2352 u32 mps = hs_ep->ep.maxpacket;
2353 int dir_in = hs_ep->dir_in;
2356 return -EINVAL;
2359 if (hs_ep->index)
2361 bytes_rem_correction = 4 - (mps % 4);
2363 for (i = 0; i < hs_ep->desc_count; ++i) {
2364 status = desc->status;
2366 bytes_rem -= bytes_rem_correction;
2369 dev_err(hsotg->dev, "descriptor %d closed with %x\n",
2382 * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
2393 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum];
2394 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2395 struct usb_request *req = &hs_req->req;
2400 dev_dbg(hsotg->dev, "%s: no request active\n", __func__);
2404 if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_OUT) {
2405 dev_dbg(hsotg->dev, "zlp packet received\n");
2426 size_done = hs_ep->size_loaded - size_left;
2427 size_done += hs_ep->last_load;
2429 req->actual = size_done;
2433 if (req->actual < req->length && size_left == 0) {
2438 if (req->actual < req->length && req->short_not_ok) {
2439 dev_dbg(hsotg->dev, "%s: got %d/%d (short not ok) => error\n",
2440 __func__, req->actual, req->length);
2443 * todo - what should we return here? there's no one else
2450 hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
2452 if (!hsotg->delayed_status)
2457 if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2458 req->frame_number = hs_ep->target_frame;
2466 * dwc2_hsotg_handle_rx - RX FIFO has data
2494 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n",
2499 dev_dbg(hsotg->dev, "GLOBALOUTNAK\n");
2503 dev_dbg(hsotg->dev, "OutDone (Frame=0x%08x)\n",
2511 dev_dbg(hsotg->dev,
2520 if (hsotg->ep0_state == DWC2_EP0_SETUP)
2529 dev_dbg(hsotg->dev,
2534 WARN_ON(hsotg->ep0_state != DWC2_EP0_SETUP);
2540 dev_warn(hsotg->dev, "%s: unknown status %08x\n",
2549 * dwc2_hsotg_ep0_mps - turn max packet size into register setting
2567 return (u32)-1;
2571 * dwc2_hsotg_set_ep_maxpacket - set endpoint's max-packet field
2599 hs_ep->ep.maxpacket = mps_bytes;
2600 hs_ep->mc = 1;
2604 hs_ep->mc = mc;
2607 hs_ep->ep.maxpacket = mps;
2625 dev_err(hsotg->dev, "ep%d: bad mps of %d\n", ep, mps);
2629 * dwc2_hsotg_txfifo_flush - flush Tx FIFO
2640 dev_warn(hsotg->dev, "%s: timeout flushing fifo GRSTCTL_TXFFLSH\n",
2645 * dwc2_hsotg_trytx - check to see if anything needs transmitting
2655 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2657 if (!hs_ep->dir_in || !hs_req) {
2659 * if request is not enqueued, we disable interrupts
2662 if (hs_ep->index != 0)
2663 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index,
2664 hs_ep->dir_in, 0);
2668 if (hs_req->req.actual < hs_req->req.length) {
2669 dev_dbg(hsotg->dev, "trying to write more for ep%d\n",
2670 hs_ep->index);
2678 * dwc2_hsotg_complete_in - complete IN transfer
2688 struct dwc2_hsotg_req *hs_req = hs_ep->req;
2689 u32 epsize = dwc2_readl(hsotg, DIEPTSIZ(hs_ep->index));
2693 dev_dbg(hsotg->dev, "XferCompl but no req\n");
2698 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
2699 dev_dbg(hsotg->dev, "zlp packet sent\n");
2705 hs_ep->dir_in = 0;
2708 if (hsotg->test_mode) {
2711 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode);
2713 dev_dbg(hsotg->dev, "Invalid Test #%d\n",
2714 hsotg->test_mode);
2735 dev_err(hsotg->dev, "error parsing DDMA results %d\n",
2741 size_done = hs_ep->size_loaded - size_left;
2742 size_done += hs_ep->last_load;
2744 if (hs_req->req.actual != size_done)
2745 dev_dbg(hsotg->dev, "%s: adjusting size done %d => %d\n",
2746 __func__, hs_req->req.actual, size_done);
2748 hs_req->req.actual = size_done;
2749 dev_dbg(hsotg->dev, "req->length:%d req->actual:%d req->zero:%d\n",
2750 hs_req->req.length, hs_req->req.actual, hs_req->req.zero);
2752 if (!size_left && hs_req->req.actual < hs_req->req.length) {
2753 dev_dbg(hsotg->dev, "%s trying more for req...\n", __func__);
2759 if (hs_ep->send_zlp) {
2760 hs_ep->send_zlp = 0;
2768 if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) {
2775 if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
2776 hs_req->req.frame_number = hs_ep->target_frame;
2784 * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
2787 * @dir_in: Endpoint direction 1-in 0-out.
2812 * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
2821 * For ISOC-OUT endpoints completes expired requests. If there is remaining
2826 struct dwc2_hsotg *hsotg = hs_ep->parent;
2828 unsigned char idx = hs_ep->index;
2829 int dir_in = hs_ep->dir_in;
2833 dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
2838 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
2854 if (!hs_ep->isochronous)
2857 if (list_empty(&hs_ep->queue)) {
2858 dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
2866 hs_req->req.frame_number = hs_ep->target_frame;
2867 hs_req->req.actual = 0;
2869 -ENODATA);
2873 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2878 * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
2881 * This is starting point for ISOC-OUT transfer, synchronization done with
2885 * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
2890 struct dwc2_hsotg *hsotg = ep->parent;
2892 int dir_in = ep->dir_in;
2894 if (dir_in || !ep->isochronous)
2898 if (ep->target_frame == TARGET_FRAME_INITIAL) {
2900 ep->target_frame = hsotg->frame_number;
2906 if (ep->target_frame == TARGET_FRAME_INITIAL) {
2909 ep->target_frame = hsotg->frame_number;
2910 if (ep->interval > 1) {
2911 ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
2912 if (ep->target_frame & 0x1)
2917 dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
2924 hs_req->req.frame_number = ep->target_frame;
2925 hs_req->req.actual = 0;
2926 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2931 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2934 if (!ep->req)
2943 * dwc2_gadget_handle_nak - handle NAK interrupt
2946 * This is starting point for ISOC-IN transfer, synchronization done with
2951 * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
2958 struct dwc2_hsotg *hsotg = hs_ep->parent;
2960 int dir_in = hs_ep->dir_in;
2963 if (!dir_in || !hs_ep->isochronous)
2966 if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
2969 hs_ep->target_frame = hsotg->frame_number;
2972 /* In service interval mode target_frame must
2975 if (hsotg->params.service_interval) {
2979 hs_ep->target_frame &= ~hs_ep->interval + 1;
2992 hs_ep->target_frame = hsotg->frame_number;
2993 if (hs_ep->interval > 1) {
2995 DIEPCTL(hs_ep->index));
2996 if (hs_ep->target_frame & 0x1)
3001 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
3008 ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
3012 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
3017 hs_req->req.frame_number = hs_ep->target_frame;
3018 hs_req->req.actual = 0;
3019 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
3024 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
3027 if (!hs_ep->req)
3032 * dwc2_hsotg_epint - handle an in/out endpoint interrupt
3054 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n",
3059 dev_dbg(hsotg->dev, "%s: ep%d(%s) DxEPINT=0x%08x\n",
3072 if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
3073 hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
3077 dev_dbg(hsotg->dev,
3083 if (using_desc_dma(hsotg) && hs_ep->isochronous) {
3089 * if operating slave mode
3091 if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
3094 if (idx == 0 && !hs_ep->req)
3101 if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
3116 dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
3119 dev_dbg(hsotg->dev, "%s: Setup/Timeout\n", __func__);
3124 * setup packet. In non-DMA mode we'd get this
3137 dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
3140 if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
3143 if (!hsotg->delayed_status)
3161 dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
3164 dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
3165 if (hs_ep->isochronous)
3169 if (dir_in && !hs_ep->isochronous) {
3172 dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
3178 dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
3183 if (hsotg->dedicated_fifos &&
3185 dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
3194 * dwc2_hsotg_irq_enumdone - Handle EnumDone interrupt (enumeration done)
3211 dev_dbg(hsotg->dev, "EnumDone (DSTS=0x%08x)\n", dsts);
3223 hsotg->gadget.speed = USB_SPEED_FULL;
3229 hsotg->gadget.speed = USB_SPEED_HIGH;
3235 hsotg->gadget.speed = USB_SPEED_LOW;
3245 dev_info(hsotg->dev, "new device is %s\n",
3246 usb_speed_string(hsotg->gadget.speed));
3258 for (i = 1; i < hsotg->num_of_eps; i++) {
3259 if (hsotg->eps_in[i])
3262 if (hsotg->eps_out[i])
3272 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3278 * kill_all_requests - remove all requests from the endpoint's queue
3292 ep->req = NULL;
3294 while (!list_empty(&ep->queue)) {
3300 if (!hsotg->dedicated_fifos)
3302 size = (dwc2_readl(hsotg, DTXFSTS(ep->fifo_index)) & 0xffff) * 4;
3303 if (size < ep->fifo_size)
3304 dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
3308 * dwc2_hsotg_disconnect - disconnect service
3319 if (!hsotg->connected)
3322 hsotg->connected = 0;
3323 hsotg->test_mode = 0;
3326 for (ep = 0; ep < hsotg->num_of_eps; ep++) {
3327 if (hsotg->eps_in[ep])
3328 kill_all_requests(hsotg, hsotg->eps_in[ep],
3329 -ESHUTDOWN);
3330 if (hsotg->eps_out[ep])
3331 kill_all_requests(hsotg, hsotg->eps_out[ep],
3332 -ESHUTDOWN);
3336 hsotg->lx_state = DWC2_L3;
3338 usb_gadget_set_state(&hsotg->gadget, USB_STATE_NOTATTACHED);
3342 * dwc2_hsotg_irq_fifoempty - TX FIFO empty interrupt handler
3352 for (epno = 0; epno < hsotg->num_of_eps; epno++) {
3358 if (!ep->dir_in)
3361 if ((periodic && !ep->periodic) ||
3362 (!periodic && ep->periodic))
3378 * dwc2_hsotg_core_init_disconnected - issue softreset to the core
3394 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
3401 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
3402 if (hsotg->eps_in[ep])
3403 dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
3404 if (hsotg->eps_out[ep])
3405 dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
3432 switch (hsotg->params.speed) {
3437 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
3446 if (hsotg->params.ipg_isoc_en)
3466 if (!hsotg->params.external_id_pin_ctl)
3473 hsotg->params.ahbcfg,
3476 /* Set DDMA mode support in the core if needed */
3481 dwc2_writel(hsotg, ((hsotg->dedicated_fifos) ?
3488 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
3493 dwc2_writel(hsotg, ((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
3500 * don't need XferCompl, we get that from RXFIFO in slave mode. In
3501 * DMA mode we may need this and StsPhseRcvd.
3515 /* Enable Service Interval mode if supported */
3516 if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3521 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3529 * Enable the RXFIFO when in slave mode, as this is how we collect
3530 * the data. In DMA mode, we get events from the FIFO but also
3546 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", dwc2_readl(hsotg, DCTL));
3557 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3563 dwc2_writel(hsotg, dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
3576 if (using_desc_dma(hsotg) && hsotg->params.service_interval)
3579 /* must be at-least 3ms to allow bus to see disconnect */
3582 hsotg->lx_state = DWC2_L0;
3586 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
3593 /* set the soft-disconnect bit */
3599 /* remove the soft-disconnect and let's go */
3600 if (!hsotg->role_sw || (dwc2_readl(hsotg, GOTGCTL) & GOTGCTL_BSESVLD))
3605 * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
3610 * - Corrupted IN Token for ISOC EP.
3611 * - Packet not complete in FIFO.
3614 * - Determine the EP
3615 * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
3624 dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
3628 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3629 hs_ep = hsotg->eps_in[idx];
3631 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3648 * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
3653 * - Corrupted OUT Token for ISOC EP.
3654 * - Packet not complete in FIFO.
3657 * - Determine the EP
3658 * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
3669 dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
3674 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3675 hs_ep = hsotg->eps_out[idx];
3677 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3701 * dwc2_hsotg_irq - handle device interrupt
3715 spin_lock(&hsotg->lock);
3720 dev_dbg(hsotg->dev, "%s: %08x %08x (%08x) retry %d\n",
3726 dev_dbg(hsotg->dev, "%s: USBRstDet\n", __func__);
3731 if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
3734 /* Exit gadget mode clock gating. */
3735 if (hsotg->params.power_down ==
3736 DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
3737 !hsotg->params.no_clock_gating)
3740 hsotg->lx_state = DWC2_L0;
3745 u32 connected = hsotg->connected;
3747 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
3748 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
3779 dev_dbg(hsotg->dev, "%s: daint=%08x\n", __func__, daint);
3781 for (ep = 0; ep < hsotg->num_of_eps && daint_out;
3787 for (ep = 0; ep < hsotg->num_of_eps && daint_in;
3797 dev_dbg(hsotg->dev, "NPTxFEmp\n");
3800 * Disable the interrupt to stop it happening again
3802 * it needs re-enabling
3810 dev_dbg(hsotg->dev, "PTxFEmp\n");
3820 * note, since GINTSTS_RxFLvl doubles as FIFO-not-empty,
3829 dev_dbg(hsotg->dev, "GINTSTS_ErlySusp\n");
3834 * these next two seem to crop-up occasionally causing the core
3853 dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
3854 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3855 hs_ep = hsotg->eps_out[idx];
3863 if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
3870 //Non-ISOC EP's
3871 if (hs_ep->halted) {
3884 dev_info(hsotg->dev, "GINNakEff triggered\n");
3902 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
3906 if (hsotg->params.service_interval)
3909 spin_unlock(&hsotg->lock);
3920 epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
3921 DOEPCTL(hs_ep->index);
3922 epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
3923 DOEPINT(hs_ep->index);
3925 dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
3926 hs_ep->name);
3928 if (hs_ep->dir_in) {
3929 if (hsotg->dedicated_fifos || hs_ep->periodic) {
3934 dev_warn(hsotg->dev,
3942 dev_warn(hsotg->dev,
3957 dev_warn(hsotg->dev, "%s: timeout GINTSTS.RXFLVL\n",
3971 dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
3975 /* Disable ep */
3980 dev_warn(hsotg->dev,
3986 if (hs_ep->dir_in) {
3989 if (hsotg->dedicated_fifos || hs_ep->periodic)
3990 fifo_index = hs_ep->fifo_index;
3998 if (!hsotg->dedicated_fifos && !hs_ep->periodic)
4008 * dwc2_hsotg_ep_enable - enable the given endpoint
4018 struct dwc2_hsotg *hsotg = hs_ep->parent;
4020 unsigned int index = hs_ep->index;
4032 dev_dbg(hsotg->dev,
4034 __func__, ep->name, desc->bEndpointAddress, desc->bmAttributes,
4035 desc->wMaxPacketSize, desc->bInterval);
4039 dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
4040 return -EINVAL;
4043 dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
4044 if (dir_in != hs_ep->dir_in) {
4045 dev_err(hsotg->dev, "%s: direction mismatch!\n", __func__);
4046 return -EINVAL;
4049 ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
4055 dir_in && desc->bInterval > 10) {
4056 dev_err(hsotg->dev,
4058 return -EINVAL;
4064 dev_err(hsotg->dev,
4066 return -EINVAL;
4074 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
4082 /* Allocate DMA descriptor chain for non-ctrl endpoints */
4083 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
4084 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
4086 &hs_ep->desc_list_dma, GFP_ATOMIC);
4087 if (!hs_ep->desc_list) {
4088 ret = -ENOMEM;
4093 spin_lock_irqsave(&hsotg->lock, flags);
4105 dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
4107 /* default, set to non-periodic */
4108 hs_ep->isochronous = 0;
4109 hs_ep->periodic = 0;
4110 hs_ep->halted = 0;
4111 hs_ep->wedged = 0;
4112 hs_ep->interval = desc->bInterval;
4118 hs_ep->isochronous = 1;
4119 hs_ep->interval = 1 << (desc->bInterval - 1);
4120 hs_ep->target_frame = TARGET_FRAME_INITIAL;
4121 hs_ep->next_desc = 0;
4122 hs_ep->compl_desc = 0;
4124 hs_ep->periodic = 1;
4142 hs_ep->periodic = 1;
4144 if (hsotg->gadget.speed == USB_SPEED_HIGH)
4145 hs_ep->interval = 1 << (desc->bInterval - 1);
4157 * a unique tx-fifo even if it is non-periodic.
4159 if (dir_in && hsotg->dedicated_fifos) {
4164 size = hs_ep->ep.maxpacket * hs_ep->mc;
4166 if (hsotg->fifo_map & (1 << i))
4179 dev_err(hsotg->dev,
4181 ret = -ENOMEM;
4185 hsotg->fifo_map |= 1 << fifo_index;
4187 hs_ep->fifo_index = fifo_index;
4188 hs_ep->fifo_size = fifo_size;
4192 if (index && !hs_ep->isochronous)
4195 /* WA for Full speed ISOC IN in DDMA mode.
4201 if (hsotg->gadget.speed == USB_SPEED_FULL &&
4202 hs_ep->isochronous && dir_in) {
4216 dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
4220 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x\n",
4227 spin_unlock_irqrestore(&hsotg->lock, flags);
4230 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
4231 dmam_free_coherent(hsotg->dev, desc_num *
4233 hs_ep->desc_list, hs_ep->desc_list_dma);
4234 hs_ep->desc_list = NULL;
4241 * dwc2_hsotg_ep_disable - disable given endpoint
4242 * @ep: The endpoint to disable.
4247 struct dwc2_hsotg *hsotg = hs_ep->parent;
4248 int dir_in = hs_ep->dir_in;
4249 int index = hs_ep->index;
4253 dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
4255 if (ep == &hsotg->eps_out[0]->ep) {
4256 dev_err(hsotg->dev, "%s: called for ep0\n", __func__);
4257 return -EINVAL;
4260 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4261 dev_err(hsotg->dev, "%s: called in host mode?\n", __func__);
4262 return -EINVAL;
4276 dev_dbg(hsotg->dev, "%s: DxEPCTL=0x%08x\n", __func__, ctrl);
4279 /* disable endpoint interrupts */
4280 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, hs_ep->dir_in, 0);
4283 kill_all_requests(hsotg, hs_ep, -ESHUTDOWN);
4285 hsotg->fifo_map &= ~(1 << hs_ep->fifo_index);
4286 hs_ep->fifo_index = 0;
4287 hs_ep->fifo_size = 0;
4295 struct dwc2_hsotg *hsotg = hs_ep->parent;
4299 spin_lock_irqsave(&hsotg->lock, flags);
4301 spin_unlock_irqrestore(&hsotg->lock, flags);
4306 * on_list - check request is on the given endpoint
4314 list_for_each_entry_safe(req, treq, &ep->queue, queue) {
4323 * dwc2_hsotg_ep_dequeue - dequeue given endpoint
4331 struct dwc2_hsotg *hs = hs_ep->parent;
4334 dev_dbg(hs->dev, "ep_dequeue(%p,%p)\n", ep, req);
4336 spin_lock_irqsave(&hs->lock, flags);
4339 spin_unlock_irqrestore(&hs->lock, flags);
4340 return -EINVAL;
4344 if (req == &hs_ep->req->req)
4347 dwc2_hsotg_complete_request(hs, hs_ep, hs_req, -ECONNRESET);
4348 spin_unlock_irqrestore(&hs->lock, flags);
4354 * dwc2_gadget_ep_set_wedge - set wedge on a given endpoint
4361 struct dwc2_hsotg *hs = hs_ep->parent;
4366 spin_lock_irqsave(&hs->lock, flags);
4367 hs_ep->wedged = 1;
4369 spin_unlock_irqrestore(&hs->lock, flags);
4375 * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
4378 * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
4387 struct dwc2_hsotg *hs = hs_ep->parent;
4388 int index = hs_ep->index;
4393 dev_info(hs->dev, "%s(ep %p %s, %d)\n", __func__, ep, ep->name, value);
4399 dev_warn(hs->dev,
4404 if (hs_ep->isochronous) {
4405 dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
4406 return -EINVAL;
4409 if (!now && value && !list_empty(&hs_ep->queue)) {
4410 dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
4411 ep->name);
4412 return -EAGAIN;
4415 if (hs_ep->dir_in) {
4425 hs_ep->wedged = 0;
4445 hs_ep->wedged = 0;
4454 hs_ep->halted = value;
4459 * dwc2_hsotg_ep_sethalt_lock - set halt on a given endpoint with lock held
4466 struct dwc2_hsotg *hs = hs_ep->parent;
4470 spin_lock_irqsave(&hs->lock, flags);
4472 spin_unlock_irqrestore(&hs->lock, flags);
4479 .disable = dwc2_hsotg_ep_disable_lock,
4490 * dwc2_hsotg_init - initialize the usb core
4512 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
4523 * dwc2_hsotg_udc_start - prepare the udc for work
4539 return -ENODEV;
4543 dev_err(hsotg->dev, "%s: no driver\n", __func__);
4544 return -EINVAL;
4547 if (driver->max_speed < USB_SPEED_FULL)
4548 dev_err(hsotg->dev, "%s: bad speed\n", __func__);
4550 if (!driver->setup) {
4551 dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
4552 return -EINVAL;
4555 WARN_ON(hsotg->driver);
4557 hsotg->driver = driver;
4558 hsotg->gadget.dev.of_node = hsotg->dev->of_node;
4559 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4561 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
4567 if (!IS_ERR_OR_NULL(hsotg->uphy))
4568 otg_set_peripheral(hsotg->uphy->otg, &hsotg->gadget);
4570 spin_lock_irqsave(&hsotg->lock, flags);
4576 hsotg->enabled = 0;
4577 spin_unlock_irqrestore(&hsotg->lock, flags);
4579 gadget->sg_supported = using_desc_dma(hsotg);
4580 dev_info(hsotg->dev, "bound driver %s\n", driver->driver.name);
4585 hsotg->driver = NULL;
4590 * dwc2_hsotg_udc_stop - stop the udc
4602 return -ENODEV;
4605 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
4606 if (hsotg->eps_in[ep])
4607 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
4608 if (hsotg->eps_out[ep])
4609 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
4612 spin_lock_irqsave(&hsotg->lock, flags);
4614 hsotg->driver = NULL;
4615 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4616 hsotg->enabled = 0;
4618 spin_unlock_irqrestore(&hsotg->lock, flags);
4620 if (!IS_ERR_OR_NULL(hsotg->uphy))
4621 otg_set_peripheral(hsotg->uphy->otg, NULL);
4623 if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
4630 * dwc2_hsotg_gadget_getframe - read the frame number
4641 * dwc2_hsotg_set_selfpowered - set if device is self/bus powered
4643 * @is_selfpowered: Whether the device is self-powered
4653 spin_lock_irqsave(&hsotg->lock, flags);
4654 gadget->is_selfpowered = !!is_selfpowered;
4655 spin_unlock_irqrestore(&hsotg->lock, flags);
4661 * dwc2_hsotg_pullup - connect/disconnect the USB PHY
4672 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on,
4673 hsotg->op_state);
4675 /* Don't modify pullup state while in host mode */
4676 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) {
4677 hsotg->enabled = is_on;
4681 spin_lock_irqsave(&hsotg->lock, flags);
4683 hsotg->enabled = 1;
4685 /* Enable ACG feature in device mode,if supported */
4691 hsotg->enabled = 0;
4694 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
4695 spin_unlock_irqrestore(&hsotg->lock, flags);
4705 dev_dbg(hsotg->dev, "%s: is_active: %d\n", __func__, is_active);
4706 spin_lock_irqsave(&hsotg->lock, flags);
4710 * that state before being initialized / de-initialized
4712 if (hsotg->lx_state == DWC2_L2 && hsotg->in_ppd)
4720 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
4723 if (hsotg->enabled) {
4724 /* Enable ACG feature in device mode,if supported */
4733 spin_unlock_irqrestore(&hsotg->lock, flags);
4738 * dwc2_hsotg_vbus_draw - report bMaxPower field
4748 if (IS_ERR_OR_NULL(hsotg->uphy))
4749 return -ENOTSUPP;
4750 return usb_phy_set_power(hsotg->uphy, mA);
4758 spin_lock_irqsave(&hsotg->lock, flags);
4761 hsotg->params.speed = DWC2_SPEED_PARAM_HIGH;
4764 hsotg->params.speed = DWC2_SPEED_PARAM_FULL;
4767 hsotg->params.speed = DWC2_SPEED_PARAM_LOW;
4770 dev_err(hsotg->dev, "invalid speed (%d)\n", speed);
4772 spin_unlock_irqrestore(&hsotg->lock, flags);
4787 * dwc2_hsotg_initep - initialise a single endpoint
4811 hs_ep->dir_in = dir_in;
4812 hs_ep->index = epnum;
4814 snprintf(hs_ep->name, sizeof(hs_ep->name), "ep%d%s", epnum, dir);
4816 INIT_LIST_HEAD(&hs_ep->queue);
4817 INIT_LIST_HEAD(&hs_ep->ep.ep_list);
4821 list_add_tail(&hs_ep->ep.ep_list, &hsotg->gadget.ep_list);
4823 hs_ep->parent = hsotg;
4824 hs_ep->ep.name = hs_ep->name;
4826 if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
4827 usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
4829 usb_ep_set_maxpacket_limit(&hs_ep->ep,
4831 hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
4834 hs_ep->ep.caps.type_control = true;
4836 if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
4837 hs_ep->ep.caps.type_iso = true;
4838 hs_ep->ep.caps.type_bulk = true;
4840 hs_ep->ep.caps.type_int = true;
4844 hs_ep->ep.caps.dir_in = true;
4846 hs_ep->ep.caps.dir_out = true;
4849 * if we're using dma, we need to set the next-endpoint pointer
4864 * dwc2_hsotg_hw_cfg - read HW configuration registers
4877 hsotg->num_of_eps = hsotg->hw_params.num_dev_ep;
4880 hsotg->num_of_eps++;
4882 hsotg->eps_in[0] = devm_kzalloc(hsotg->dev,
4885 if (!hsotg->eps_in[0])
4886 return -ENOMEM;
4888 hsotg->eps_out[0] = hsotg->eps_in[0];
4890 cfg = hsotg->hw_params.dev_ep_dirs;
4891 for (i = 1, cfg >>= 2; i < hsotg->num_of_eps; i++, cfg >>= 2) {
4895 hsotg->eps_in[i] = devm_kzalloc(hsotg->dev,
4897 if (!hsotg->eps_in[i])
4898 return -ENOMEM;
4902 hsotg->eps_out[i] = devm_kzalloc(hsotg->dev,
4904 if (!hsotg->eps_out[i])
4905 return -ENOMEM;
4909 hsotg->fifo_mem = hsotg->hw_params.total_fifo_size;
4910 hsotg->dedicated_fifos = hsotg->hw_params.en_multiple_tx_fifo;
4912 dev_info(hsotg->dev, "EPs: %d, %s fifos, %d entries in SPRAM\n",
4913 hsotg->num_of_eps,
4914 hsotg->dedicated_fifos ? "dedicated" : "shared",
4915 hsotg->fifo_mem);
4920 * dwc2_hsotg_dump - dump state of the udc
4927 struct device *dev = hsotg->dev;
4943 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
4950 for (idx = 0; idx < hsotg->num_of_eps; idx++) {
4952 "ep%d-in: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n", idx,
4959 "ep%d-out: EPCTL=0x%08x, SIZ=0x%08x, DMA=0x%08x\n",
4971 * dwc2_gadget_init - init function for gadget
4977 struct device *dev = hsotg->dev;
4983 hsotg->params.g_np_tx_fifo_size);
4984 dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
4986 switch (hsotg->params.speed) {
4988 hsotg->gadget.max_speed = USB_SPEED_LOW;
4991 hsotg->gadget.max_speed = USB_SPEED_FULL;
4994 hsotg->gadget.max_speed = USB_SPEED_HIGH;
4998 hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
4999 hsotg->gadget.name = dev_name(dev);
5000 hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
5001 hsotg->remote_wakeup_allowed = 0;
5003 if (hsotg->params.lpm)
5004 hsotg->gadget.lpm_capable = true;
5006 if (hsotg->dr_mode == USB_DR_MODE_OTG)
5007 hsotg->gadget.is_otg = 1;
5008 else if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
5009 hsotg->op_state = OTG_STATE_B_PERIPHERAL;
5013 dev_err(hsotg->dev, "Hardware configuration failed: %d\n", ret);
5017 hsotg->ctrl_buff = devm_kzalloc(hsotg->dev,
5019 if (!hsotg->ctrl_buff)
5020 return -ENOMEM;
5022 hsotg->ep0_buff = devm_kzalloc(hsotg->dev,
5024 if (!hsotg->ep0_buff)
5025 return -ENOMEM;
5033 ret = devm_request_irq(hsotg->dev, hsotg->irq, dwc2_hsotg_irq,
5034 IRQF_SHARED, dev_name(hsotg->dev), hsotg);
5040 /* hsotg->num_of_eps holds number of EPs other than ep0 */
5042 if (hsotg->num_of_eps == 0) {
5044 return -EINVAL;
5049 INIT_LIST_HEAD(&hsotg->gadget.ep_list);
5050 hsotg->gadget.ep0 = &hsotg->eps_out[0]->ep;
5054 hsotg->ctrl_req = dwc2_hsotg_ep_alloc_request(&hsotg->eps_out[0]->ep,
5056 if (!hsotg->ctrl_req) {
5058 return -ENOMEM;
5062 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) {
5063 if (hsotg->eps_in[epnum])
5064 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum],
5066 if (hsotg->eps_out[epnum])
5067 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum],
5077 * dwc2_hsotg_remove - remove function for hsotg driver
5083 usb_del_gadget_udc(&hsotg->gadget);
5084 dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
5093 if (hsotg->lx_state != DWC2_L0)
5096 if (hsotg->driver) {
5099 dev_info(hsotg->dev, "suspending usb gadget %s\n",
5100 hsotg->driver->driver.name);
5102 spin_lock_irqsave(&hsotg->lock, flags);
5103 if (hsotg->enabled)
5106 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
5107 spin_unlock_irqrestore(&hsotg->lock, flags);
5109 for (ep = 1; ep < hsotg->num_of_eps; ep++) {
5110 if (hsotg->eps_in[ep])
5111 dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
5112 if (hsotg->eps_out[ep])
5113 dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
5124 if (hsotg->lx_state == DWC2_L2)
5127 if (hsotg->driver) {
5128 dev_info(hsotg->dev, "resuming usb gadget %s\n",
5129 hsotg->driver->driver.name);
5131 spin_lock_irqsave(&hsotg->lock, flags);
5133 if (hsotg->enabled) {
5134 /* Enable ACG feature in device mode,if supported */
5138 spin_unlock_irqrestore(&hsotg->lock, flags);
5145 * dwc2_backup_device_registers() - Backup controller device registers.
5156 dev_dbg(hsotg->dev, "%s\n", __func__);
5159 dr = &hsotg->dr_backup;
5161 dr->dcfg = dwc2_readl(hsotg, DCFG);
5162 dr->dctl = dwc2_readl(hsotg, DCTL);
5163 dr->daintmsk = dwc2_readl(hsotg, DAINTMSK);
5164 dr->diepmsk = dwc2_readl(hsotg, DIEPMSK);
5165 dr->doepmsk = dwc2_readl(hsotg, DOEPMSK);
5167 for (i = 0; i < hsotg->num_of_eps; i++) {
5169 dr->diepctl[i] = dwc2_readl(hsotg, DIEPCTL(i));
5172 if (dr->diepctl[i] & DXEPCTL_DPID)
5173 dr->diepctl[i] |= DXEPCTL_SETD1PID;
5175 dr->diepctl[i] |= DXEPCTL_SETD0PID;
5177 dr->dieptsiz[i] = dwc2_readl(hsotg, DIEPTSIZ(i));
5178 dr->diepdma[i] = dwc2_readl(hsotg, DIEPDMA(i));
5181 dr->doepctl[i] = dwc2_readl(hsotg, DOEPCTL(i));
5184 if (dr->doepctl[i] & DXEPCTL_DPID)
5185 dr->doepctl[i] |= DXEPCTL_SETD1PID;
5187 dr->doepctl[i] |= DXEPCTL_SETD0PID;
5189 dr->doeptsiz[i] = dwc2_readl(hsotg, DOEPTSIZ(i));
5190 dr->doepdma[i] = dwc2_readl(hsotg, DOEPDMA(i));
5191 dr->dtxfsiz[i] = dwc2_readl(hsotg, DPTXFSIZN(i));
5193 dr->valid = true;
5198 * dwc2_restore_device_registers() - Restore controller device registers.
5212 dev_dbg(hsotg->dev, "%s\n", __func__);
5215 dr = &hsotg->dr_backup;
5216 if (!dr->valid) {
5217 dev_err(hsotg->dev, "%s: no device registers to restore\n",
5219 return -EINVAL;
5221 dr->valid = false;
5224 dwc2_writel(hsotg, dr->dctl, DCTL);
5226 dwc2_writel(hsotg, dr->daintmsk, DAINTMSK);
5227 dwc2_writel(hsotg, dr->diepmsk, DIEPMSK);
5228 dwc2_writel(hsotg, dr->doepmsk, DOEPMSK);
5230 for (i = 0; i < hsotg->num_of_eps; i++) {
5232 dwc2_writel(hsotg, dr->dieptsiz[i], DIEPTSIZ(i));
5233 dwc2_writel(hsotg, dr->diepdma[i], DIEPDMA(i));
5234 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5235 /** WA for enabled EPx's IN in DDMA mode. On entering to
5236 * hibernation wrong value read and saved from DIEPDMAx,
5237 * as result BNA interrupt asserted on hibernation exit
5241 (dr->diepctl[i] & DXEPCTL_EPENA))
5242 dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
5243 dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
5244 dwc2_writel(hsotg, dr->diepctl[i], DIEPCTL(i));
5246 dwc2_writel(hsotg, dr->doeptsiz[i], DOEPTSIZ(i));
5247 /* WA for enabled EPx's OUT in DDMA mode. On entering to
5248 * hibernation wrong value read and saved from DOEPDMAx,
5249 * as result BNA interrupt asserted on hibernation exit
5253 (dr->doepctl[i] & DXEPCTL_EPENA))
5254 dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
5255 dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
5256 dwc2_writel(hsotg, dr->doepctl[i], DOEPCTL(i));
5263 * dwc2_gadget_init_lpm - Configure the core to support LPM in device mode
5272 if (!hsotg->params.lpm)
5276 val |= hsotg->params.hird_threshold_en ? GLPMCFG_HIRD_THRES_EN : 0;
5277 val |= hsotg->params.lpm_clock_gating ? GLPMCFG_ENBLSLPM : 0;
5278 val |= hsotg->params.hird_threshold << GLPMCFG_HIRD_THRES_SHIFT;
5279 val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
5283 dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
5286 if (hsotg->params.service_interval)
5291 * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
5301 val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
5302 val |= hsotg->params.sof_cnt_wkup_alert <<
5306 dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
5310 * dwc2_gadget_enter_hibernation() - Put controller in Hibernation.
5314 * Return non-zero if failed to enter to hibernation.
5322 hsotg->lx_state = DWC2_L2;
5323 dev_dbg(hsotg->dev, "Start of hibernation completed\n");
5326 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5332 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5342 /* Set flag to indicate that we are in hibernation */
5343 hsotg->hibernated = 1;
5351 /* Unmask device mode interrupts in GPWRDN */
5372 hsotg->gr_backup.gpwrdn = dwc2_readl(hsotg, GPWRDN);
5373 dev_dbg(hsotg->dev, "Hibernation completed\n");
5380 * This function is for exiting from Device mode hibernation by host initiated
5381 * resume/reset and device initiated remote-wakeup.
5387 * Return non-zero if failed to exit from hibernation.
5399 gr = &hsotg->gr_backup;
5400 dr = &hsotg->dr_backup;
5402 if (!hsotg->hibernated) {
5403 dev_dbg(hsotg->dev, "Already exited from Hibernation\n");
5406 dev_dbg(hsotg->dev,
5417 /* De-assert Restore */
5430 dwc2_writel(hsotg, gr->gusbcfg, GUSBCFG);
5431 dwc2_writel(hsotg, dr->dcfg, DCFG);
5432 dwc2_writel(hsotg, dr->dctl, DCTL);
5438 /* De-assert Wakeup Logic */
5446 dwc2_writel(hsotg, dr->dctl | DCTL_RMTWKUPSIG, DCTL);
5462 dev_err(hsotg->dev, "%s: failed to restore registers\n",
5470 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5482 hsotg->hibernated = 0;
5483 hsotg->lx_state = DWC2_L0;
5484 dev_dbg(hsotg->dev, "Hibernation recovery completes here\n");
5490 * dwc2_gadget_enter_partial_power_down() - Put controller in partial
5495 * Return: non-zero if failed to enter device partial power down.
5497 * This function is for entering device mode partial power down.
5504 dev_dbg(hsotg->dev, "Entering device partial power down started.\n");
5509 dev_err(hsotg->dev, "%s: failed to backup global registers\n",
5516 dev_err(hsotg->dev, "%s: failed to backup device registers\n",
5542 hsotg->in_ppd = 1;
5543 hsotg->lx_state = DWC2_L2;
5545 dev_dbg(hsotg->dev, "Entering device partial power down completed.\n");
5551 * dwc2_gadget_exit_partial_power_down() - Exit controller from device partial
5557 * Return: non-zero if failed to exit device partial power down.
5559 * This function is for exiting from device mode partial power down.
5569 dr = &hsotg->dr_backup;
5571 dev_dbg(hsotg->dev, "Exiting device partial Power Down started.\n");
5589 dev_err(hsotg->dev, "%s: failed to restore registers\n",
5594 dwc2_writel(hsotg, dr->dcfg, DCFG);
5598 dev_err(hsotg->dev, "%s: failed to restore device registers\n",
5604 /* Set the Power-On Programming done bit */
5610 hsotg->in_ppd = 0;
5611 hsotg->lx_state = DWC2_L0;
5613 dev_dbg(hsotg->dev, "Exiting device partial Power Down completed.\n");
5618 * dwc2_gadget_enter_clock_gating() - Put controller in clock gating.
5622 * Return: non-zero if failed to enter device partial power down.
5624 * This function is for entering device mode clock gating.
5630 dev_dbg(hsotg->dev, "Entering device clock gating.\n");
5644 hsotg->lx_state = DWC2_L2;
5645 hsotg->bus_suspended = true;
5649 * dwc2_gadget_exit_clock_gating() - Exit controller from device clock gating.
5654 * This function is for exiting from device mode clock gating.
5661 dev_dbg(hsotg->dev, "Exiting device clock gating.\n");
5684 hsotg->lx_state = DWC2_L0;
5685 hsotg->bus_suspended = false;