Lines Matching +full:ring +full:- +full:disable +full:- +full:pullup
1 // SPDX-License-Identifier: GPL-2.0+
3 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
16 #include <linux/dma-mapping.h>
56 EP_INFO("ep1in-bulk",
58 EP_INFO("ep2out-bulk",
60 EP_INFO("ep3in-int",
62 EP_INFO("ep4out-int",
75 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
78 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
83 * true - one IRQ per transfer, for transfers <= 2048B. Generates
146 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
148 * @n_bds: Number of buffer descriptors in the ring.
176 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
187 * struct iudma_ch - Represents the current state of a single IUDMA channel.
188 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
189 * @ep_num: USB endpoint number. -1 for ep0 RX.
197 * @end_bd: Points to the final BD in the ring.
199 * @bd_ring: Base pointer to the BD ring.
201 * @n_bds: Total number of BDs in the ring.
230 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
248 * struct bcm63xx_req - Internal (driver) state of a single request.
264 * struct bcm63xx_udc - Driver/hardware private context.
279 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
354 return bcm_readl(udc->usbd_regs + off); in usbd_readl()
359 bcm_writel(val, udc->usbd_regs + off); in usbd_writel()
364 return bcm_readl(udc->iudma_regs + off); in usb_dma_readl()
369 bcm_writel(val, udc->iudma_regs + off); in usb_dma_writel()
374 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off + in usb_dmac_readl()
381 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off + in usb_dmac_writel()
387 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off + in usb_dmas_readl()
394 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off + in usb_dmas_writel()
401 clk_enable(udc->usbh_clk); in set_clocks()
402 clk_enable(udc->usbd_clk); in set_clocks()
405 clk_disable(udc->usbd_clk); in set_clocks()
406 clk_disable(udc->usbh_clk); in set_clocks()
411 * Low-level IUDMA / FIFO operations
415 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
433 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
436 * @is_stalled: true to enable stall, false to disable.
448 (bep->ep_num << USBD_STALL_EPNUM_SHIFT); in bcm63xx_set_stall()
453 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
457 * per-IUDMA-channel-pair.
461 int is_hs = udc->gadget.speed == USB_SPEED_HIGH; in bcm63xx_fifo_setup()
473 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) << in bcm63xx_fifo_setup()
475 rx_fifo_slot += rx_cfg->n_fifo_slots; in bcm63xx_fifo_setup()
478 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs, in bcm63xx_fifo_setup()
482 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) << in bcm63xx_fifo_setup()
484 tx_fifo_slot += tx_cfg->n_fifo_slots; in bcm63xx_fifo_setup()
487 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs, in bcm63xx_fifo_setup()
495 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
512 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
524 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
534 if (cfg->ep_num < 0) in bcm63xx_ep_init()
537 bcm63xx_ep_dma_select(udc, cfg->ep_num); in bcm63xx_ep_init()
538 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) | in bcm63xx_ep_init()
545 * bcm63xx_ep_setup - Configure per-endpoint settings.
558 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ? in bcm63xx_ep_setup()
559 cfg->max_pkt_hs : cfg->max_pkt_fs; in bcm63xx_ep_setup()
560 int idx = cfg->ep_num; in bcm63xx_ep_setup()
562 udc->iudma[i].max_pkt = max_pkt; in bcm63xx_ep_setup()
566 usb_ep_set_maxpacket_limit(&udc->bep[idx].ep, max_pkt); in bcm63xx_ep_setup()
569 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) | in bcm63xx_ep_setup()
570 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) | in bcm63xx_ep_setup()
571 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) | in bcm63xx_ep_setup()
572 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) | in bcm63xx_ep_setup()
573 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) | in bcm63xx_ep_setup()
580 * iudma_write - Queue a single IUDMA transaction.
596 unsigned int bytes_left = breq->req.length - breq->offset; in iudma_write()
597 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ? in iudma_write()
598 iudma->max_pkt : IUDMA_MAX_FRAGMENT; in iudma_write()
600 iudma->n_bds_used = 0; in iudma_write()
601 breq->bd_bytes = 0; in iudma_write()
602 breq->iudma = iudma; in iudma_write()
604 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero) in iudma_write()
608 struct bcm_enet_desc *d = iudma->write_bd; in iudma_write()
612 if (d == iudma->end_bd) { in iudma_write()
614 iudma->write_bd = iudma->bd_ring; in iudma_write()
616 iudma->write_bd++; in iudma_write()
618 iudma->n_bds_used++; in iudma_write()
640 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds || in iudma_write()
646 d->address = breq->req.dma + breq->offset; in iudma_write()
648 d->len_stat = dmaflags; in iudma_write()
650 breq->offset += n_bytes; in iudma_write()
651 breq->bd_bytes += n_bytes; in iudma_write()
652 bytes_left -= n_bytes; in iudma_write()
656 ENETDMAC_CHANCFG_REG, iudma->ch_idx); in iudma_write()
660 * iudma_read - Check for IUDMA buffer completion.
666 * otherwise it returns -EBUSY.
671 struct bcm_enet_desc *d = iudma->read_bd; in iudma_read()
673 if (!iudma->n_bds_used) in iudma_read()
674 return -EINVAL; in iudma_read()
676 for (i = 0; i < iudma->n_bds_used; i++) { in iudma_read()
679 dmaflags = d->len_stat; in iudma_read()
682 return -EBUSY; in iudma_read()
686 if (d == iudma->end_bd) in iudma_read()
687 d = iudma->bd_ring; in iudma_read()
692 iudma->read_bd = d; in iudma_read()
693 iudma->n_bds_used = 0; in iudma_read()
698 * iudma_reset_channel - Stop DMA on a single channel.
706 int ch_idx = iudma->ch_idx; in iudma_reset_channel()
708 if (!iudma->is_tx) in iudma_reset_channel()
709 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num)); in iudma_reset_channel()
719 if (iudma->is_tx && iudma->ep_num >= 0) in iudma_reset_channel()
720 bcm63xx_fifo_reset_ep(udc, iudma->ep_num); in iudma_reset_channel()
722 if (!timeout--) { in iudma_reset_channel()
723 dev_err(udc->dev, "can't reset IUDMA channel %d\n", in iudma_reset_channel()
728 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n", in iudma_reset_channel()
736 /* don't leave "live" HW-owned entries for the next guy to step on */ in iudma_reset_channel()
737 for (d = iudma->bd_ring; d <= iudma->end_bd; d++) in iudma_reset_channel()
738 d->len_stat = 0; in iudma_reset_channel()
741 iudma->read_bd = iudma->write_bd = iudma->bd_ring; in iudma_reset_channel()
742 iudma->n_bds_used = 0; in iudma_reset_channel()
749 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx); in iudma_reset_channel()
754 * iudma_init_channel - One-time IUDMA channel initialization.
760 struct iudma_ch *iudma = &udc->iudma[ch_idx]; in iudma_init_channel()
762 unsigned int n_bds = cfg->n_bds; in iudma_init_channel()
765 iudma->ep_num = cfg->ep_num; in iudma_init_channel()
766 iudma->ch_idx = ch_idx; in iudma_init_channel()
767 iudma->is_tx = !!(ch_idx & 0x01); in iudma_init_channel()
768 if (iudma->ep_num >= 0) { in iudma_init_channel()
769 bep = &udc->bep[iudma->ep_num]; in iudma_init_channel()
770 bep->iudma = iudma; in iudma_init_channel()
771 INIT_LIST_HEAD(&bep->queue); in iudma_init_channel()
774 iudma->bep = bep; in iudma_init_channel()
775 iudma->udc = udc; in iudma_init_channel()
778 if (iudma->ep_num <= 0) in iudma_init_channel()
779 iudma->enabled = true; in iudma_init_channel()
781 iudma->n_bds = n_bds; in iudma_init_channel()
782 iudma->bd_ring = dmam_alloc_coherent(udc->dev, in iudma_init_channel()
784 &iudma->bd_ring_dma, GFP_KERNEL); in iudma_init_channel()
785 if (!iudma->bd_ring) in iudma_init_channel()
786 return -ENOMEM; in iudma_init_channel()
787 iudma->end_bd = &iudma->bd_ring[n_bds - 1]; in iudma_init_channel()
793 * iudma_init - One-time initialization of all IUDMA channels.
808 iudma_reset_channel(udc, &udc->iudma[i]); in iudma_init()
811 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG); in iudma_init()
816 * iudma_uninit - Uninitialize IUDMA channels.
828 iudma_reset_channel(udc, &udc->iudma[i]); in iudma_uninit()
834 * Other low-level USBD operations
838 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
840 * @enable_irqs: true to enable, false to disable.
858 * bcm63xx_select_phy_mode - Select between USB device and host mode.
870 u32 val, portmask = BIT(udc->pd->port_no); in bcm63xx_select_phy_mode()
900 * bcm63xx_select_pullup - Enable/disable the pullup on D+
902 * @is_on: true to enable the pullup, false to disable.
904 * If the pullup is active, the host will sense a FS/HS device connected to
905 * the port. If the pullup is inactive, the host will think the USB
910 u32 val, portmask = BIT(udc->pd->port_no); in bcm63xx_select_pullup()
921 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
933 clk_put(udc->usbd_clk); in bcm63xx_uninit_udc_hw()
934 clk_put(udc->usbh_clk); in bcm63xx_uninit_udc_hw()
938 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
946 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT, in bcm63xx_init_udc_hw()
948 if (!udc->ep0_ctrl_buf) in bcm63xx_init_udc_hw()
949 return -ENOMEM; in bcm63xx_init_udc_hw()
951 INIT_LIST_HEAD(&udc->gadget.ep_list); in bcm63xx_init_udc_hw()
953 struct bcm63xx_ep *bep = &udc->bep[i]; in bcm63xx_init_udc_hw()
955 bep->ep.name = bcm63xx_ep_info[i].name; in bcm63xx_init_udc_hw()
956 bep->ep.caps = bcm63xx_ep_info[i].caps; in bcm63xx_init_udc_hw()
957 bep->ep_num = i; in bcm63xx_init_udc_hw()
958 bep->ep.ops = &bcm63xx_udc_ep_ops; in bcm63xx_init_udc_hw()
959 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list); in bcm63xx_init_udc_hw()
960 bep->halted = 0; in bcm63xx_init_udc_hw()
961 usb_ep_set_maxpacket_limit(&bep->ep, BCM63XX_MAX_CTRL_PKT); in bcm63xx_init_udc_hw()
962 bep->udc = udc; in bcm63xx_init_udc_hw()
963 bep->ep.desc = NULL; in bcm63xx_init_udc_hw()
964 INIT_LIST_HEAD(&bep->queue); in bcm63xx_init_udc_hw()
967 udc->gadget.ep0 = &udc->bep[0].ep; in bcm63xx_init_udc_hw()
968 list_del(&udc->bep[0].ep.ep_list); in bcm63xx_init_udc_hw()
970 udc->gadget.speed = USB_SPEED_UNKNOWN; in bcm63xx_init_udc_hw()
971 udc->ep0state = EP0_SHUTDOWN; in bcm63xx_init_udc_hw()
973 udc->usbh_clk = clk_get(udc->dev, "usbh"); in bcm63xx_init_udc_hw()
974 if (IS_ERR(udc->usbh_clk)) in bcm63xx_init_udc_hw()
975 return -EIO; in bcm63xx_init_udc_hw()
977 udc->usbd_clk = clk_get(udc->dev, "usbd"); in bcm63xx_init_udc_hw()
978 if (IS_ERR(udc->usbd_clk)) { in bcm63xx_init_udc_hw()
979 clk_put(udc->usbh_clk); in bcm63xx_init_udc_hw()
980 return -EIO; in bcm63xx_init_udc_hw()
996 if (udc->gadget.max_speed == USB_SPEED_HIGH) in bcm63xx_init_udc_hw()
1023 * bcm63xx_ep_enable - Enable one endpoint.
1034 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_ep_enable()
1035 struct iudma_ch *iudma = bep->iudma; in bcm63xx_ep_enable()
1038 if (!ep || !desc || ep->name == bcm63xx_ep0name) in bcm63xx_ep_enable()
1039 return -EINVAL; in bcm63xx_ep_enable()
1041 if (!udc->driver) in bcm63xx_ep_enable()
1042 return -ESHUTDOWN; in bcm63xx_ep_enable()
1044 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_ep_enable()
1045 if (iudma->enabled) { in bcm63xx_ep_enable()
1046 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_ep_enable()
1047 return -EINVAL; in bcm63xx_ep_enable()
1050 iudma->enabled = true; in bcm63xx_ep_enable()
1051 BUG_ON(!list_empty(&bep->queue)); in bcm63xx_ep_enable()
1055 bep->halted = 0; in bcm63xx_ep_enable()
1057 clear_bit(bep->ep_num, &udc->wedgemap); in bcm63xx_ep_enable()
1059 ep->desc = desc; in bcm63xx_ep_enable()
1060 ep->maxpacket = usb_endpoint_maxp(desc); in bcm63xx_ep_enable()
1062 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_ep_enable()
1067 * bcm63xx_ep_disable - Disable one endpoint.
1068 * @ep: Endpoint to disable.
1073 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_ep_disable()
1074 struct iudma_ch *iudma = bep->iudma; in bcm63xx_ep_disable()
1078 if (!ep || !ep->desc) in bcm63xx_ep_disable()
1079 return -EINVAL; in bcm63xx_ep_disable()
1081 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_ep_disable()
1082 if (!iudma->enabled) { in bcm63xx_ep_disable()
1083 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_ep_disable()
1084 return -EINVAL; in bcm63xx_ep_disable()
1086 iudma->enabled = false; in bcm63xx_ep_disable()
1090 if (!list_empty(&bep->queue)) { in bcm63xx_ep_disable()
1091 list_for_each_entry_safe(breq, n, &bep->queue, queue) { in bcm63xx_ep_disable()
1092 usb_gadget_unmap_request(&udc->gadget, &breq->req, in bcm63xx_ep_disable()
1093 iudma->is_tx); in bcm63xx_ep_disable()
1094 list_del(&breq->queue); in bcm63xx_ep_disable()
1095 breq->req.status = -ESHUTDOWN; in bcm63xx_ep_disable()
1097 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_ep_disable()
1098 usb_gadget_giveback_request(&iudma->bep->ep, &breq->req); in bcm63xx_ep_disable()
1099 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_ep_disable()
1102 ep->desc = NULL; in bcm63xx_ep_disable()
1104 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_ep_disable()
1109 * bcm63xx_udc_alloc_request - Allocate a new request.
1121 return &breq->req; in bcm63xx_udc_alloc_request()
1125 * bcm63xx_udc_free_request - Free a request.
1137 * bcm63xx_udc_queue - Queue up a new request.
1154 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_udc_queue()
1159 if (unlikely(!req || !req->complete || !req->buf || !ep)) in bcm63xx_udc_queue()
1160 return -EINVAL; in bcm63xx_udc_queue()
1162 req->actual = 0; in bcm63xx_udc_queue()
1163 req->status = 0; in bcm63xx_udc_queue()
1164 breq->offset = 0; in bcm63xx_udc_queue()
1166 if (bep == &udc->bep[0]) { in bcm63xx_udc_queue()
1168 if (udc->ep0_reply) in bcm63xx_udc_queue()
1169 return -EINVAL; in bcm63xx_udc_queue()
1171 udc->ep0_reply = req; in bcm63xx_udc_queue()
1172 schedule_work(&udc->ep0_wq); in bcm63xx_udc_queue()
1176 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_queue()
1177 if (!bep->iudma->enabled) { in bcm63xx_udc_queue()
1178 rc = -ESHUTDOWN; in bcm63xx_udc_queue()
1182 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx); in bcm63xx_udc_queue()
1184 list_add_tail(&breq->queue, &bep->queue); in bcm63xx_udc_queue()
1185 if (list_is_singular(&bep->queue)) in bcm63xx_udc_queue()
1186 iudma_write(udc, bep->iudma, breq); in bcm63xx_udc_queue()
1190 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_queue()
1195 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1199 * If the request is not at the head of the queue, this is easy - just nuke
1206 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_udc_dequeue()
1211 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_dequeue()
1212 if (list_empty(&bep->queue)) { in bcm63xx_udc_dequeue()
1213 rc = -EINVAL; in bcm63xx_udc_dequeue()
1217 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue); in bcm63xx_udc_dequeue()
1218 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx); in bcm63xx_udc_dequeue()
1221 iudma_reset_channel(udc, bep->iudma); in bcm63xx_udc_dequeue()
1222 list_del(&breq->queue); in bcm63xx_udc_dequeue()
1224 if (!list_empty(&bep->queue)) { in bcm63xx_udc_dequeue()
1227 next = list_first_entry(&bep->queue, in bcm63xx_udc_dequeue()
1229 iudma_write(udc, bep->iudma, next); in bcm63xx_udc_dequeue()
1232 list_del(&breq->queue); in bcm63xx_udc_dequeue()
1236 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_dequeue()
1238 req->status = -ESHUTDOWN; in bcm63xx_udc_dequeue()
1239 req->complete(ep, req); in bcm63xx_udc_dequeue()
1245 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1254 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_udc_set_halt()
1257 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_set_halt()
1259 bep->halted = value; in bcm63xx_udc_set_halt()
1260 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_set_halt()
1266 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1274 struct bcm63xx_udc *udc = bep->udc; in bcm63xx_udc_set_wedge()
1277 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_set_wedge()
1278 set_bit(bep->ep_num, &udc->wedgemap); in bcm63xx_udc_set_wedge()
1280 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_set_wedge()
1287 .disable = bcm63xx_ep_disable,
1304 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1306 * @ctrl: 8-byte SETUP request.
1313 spin_unlock_irq(&udc->lock); in bcm63xx_ep0_setup_callback()
1314 rc = udc->driver->setup(&udc->gadget, ctrl); in bcm63xx_ep0_setup_callback()
1315 spin_lock_irq(&udc->lock); in bcm63xx_ep0_setup_callback()
1320 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1338 ctrl.wValue = cpu_to_le16(udc->cfg); in bcm63xx_ep0_spoof_set_cfg()
1344 dev_warn_ratelimited(udc->dev, in bcm63xx_ep0_spoof_set_cfg()
1345 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n", in bcm63xx_ep0_spoof_set_cfg()
1346 udc->cfg); in bcm63xx_ep0_spoof_set_cfg()
1352 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1362 ctrl.wValue = cpu_to_le16(udc->alt_iface); in bcm63xx_ep0_spoof_set_iface()
1363 ctrl.wIndex = cpu_to_le16(udc->iface); in bcm63xx_ep0_spoof_set_iface()
1368 dev_warn_ratelimited(udc->dev, in bcm63xx_ep0_spoof_set_iface()
1369 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n", in bcm63xx_ep0_spoof_set_iface()
1370 udc->iface, udc->alt_iface); in bcm63xx_ep0_spoof_set_iface()
1376 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1385 struct iudma_ch *iudma = &udc->iudma[ch_idx]; in bcm63xx_ep0_map_write()
1387 BUG_ON(udc->ep0_request); in bcm63xx_ep0_map_write()
1388 udc->ep0_request = req; in bcm63xx_ep0_map_write()
1390 req->actual = 0; in bcm63xx_ep0_map_write()
1391 breq->offset = 0; in bcm63xx_ep0_map_write()
1392 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx); in bcm63xx_ep0_map_write()
1397 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1405 req->status = status; in bcm63xx_ep0_complete()
1407 req->actual = 0; in bcm63xx_ep0_complete()
1408 if (req->complete) { in bcm63xx_ep0_complete()
1409 spin_unlock_irq(&udc->lock); in bcm63xx_ep0_complete()
1410 req->complete(&udc->bep[0].ep, req); in bcm63xx_ep0_complete()
1411 spin_lock_irq(&udc->lock); in bcm63xx_ep0_complete()
1416 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1423 struct usb_request *req = udc->ep0_reply; in bcm63xx_ep0_nuke_reply()
1425 udc->ep0_reply = NULL; in bcm63xx_ep0_nuke_reply()
1426 usb_gadget_unmap_request(&udc->gadget, req, is_tx); in bcm63xx_ep0_nuke_reply()
1427 if (udc->ep0_request == req) { in bcm63xx_ep0_nuke_reply()
1428 udc->ep0_req_completed = 0; in bcm63xx_ep0_nuke_reply()
1429 udc->ep0_request = NULL; in bcm63xx_ep0_nuke_reply()
1431 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN); in bcm63xx_ep0_nuke_reply()
1435 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1441 struct usb_request *req = udc->ep0_request; in bcm63xx_ep0_read_complete()
1443 udc->ep0_req_completed = 0; in bcm63xx_ep0_read_complete()
1444 udc->ep0_request = NULL; in bcm63xx_ep0_read_complete()
1446 return req->actual; in bcm63xx_ep0_read_complete()
1450 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1461 struct usb_request *req = &udc->ep0_ctrl_req.req; in bcm63xx_ep0_internal_request()
1463 req->buf = udc->ep0_ctrl_buf; in bcm63xx_ep0_internal_request()
1464 req->length = length; in bcm63xx_ep0_internal_request()
1465 req->complete = NULL; in bcm63xx_ep0_internal_request()
1471 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1481 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf; in bcm63xx_ep0_do_setup()
1486 dev_err(udc->dev, "missing SETUP packet\n"); in bcm63xx_ep0_do_setup()
1491 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't in bcm63xx_ep0_do_setup()
1500 dev_warn_ratelimited(udc->dev, in bcm63xx_ep0_do_setup()
1508 bcm63xx_set_stall(udc, &udc->bep[0], true); in bcm63xx_ep0_do_setup()
1512 if (!ctrl->wLength) in bcm63xx_ep0_do_setup()
1514 else if (ctrl->bRequestType & USB_DIR_IN) in bcm63xx_ep0_do_setup()
1521 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1529 * Returns 0 if work was done; -EAGAIN if nothing to do.
1533 if (udc->ep0_req_reset) { in bcm63xx_ep0_do_idle()
1534 udc->ep0_req_reset = 0; in bcm63xx_ep0_do_idle()
1535 } else if (udc->ep0_req_set_cfg) { in bcm63xx_ep0_do_idle()
1536 udc->ep0_req_set_cfg = 0; in bcm63xx_ep0_do_idle()
1538 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE; in bcm63xx_ep0_do_idle()
1539 } else if (udc->ep0_req_set_iface) { in bcm63xx_ep0_do_idle()
1540 udc->ep0_req_set_iface = 0; in bcm63xx_ep0_do_idle()
1542 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE; in bcm63xx_ep0_do_idle()
1543 } else if (udc->ep0_req_completed) { in bcm63xx_ep0_do_idle()
1544 udc->ep0state = bcm63xx_ep0_do_setup(udc); in bcm63xx_ep0_do_idle()
1545 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0; in bcm63xx_ep0_do_idle()
1546 } else if (udc->ep0_req_shutdown) { in bcm63xx_ep0_do_idle()
1547 udc->ep0_req_shutdown = 0; in bcm63xx_ep0_do_idle()
1548 udc->ep0_req_completed = 0; in bcm63xx_ep0_do_idle()
1549 udc->ep0_request = NULL; in bcm63xx_ep0_do_idle()
1550 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]); in bcm63xx_ep0_do_idle()
1551 usb_gadget_unmap_request(&udc->gadget, in bcm63xx_ep0_do_idle()
1552 &udc->ep0_ctrl_req.req, 0); in bcm63xx_ep0_do_idle()
1556 udc->ep0state = EP0_SHUTDOWN; in bcm63xx_ep0_do_idle()
1557 } else if (udc->ep0_reply) { in bcm63xx_ep0_do_idle()
1563 dev_warn(udc->dev, "nuking unexpected reply\n"); in bcm63xx_ep0_do_idle()
1566 return -EAGAIN; in bcm63xx_ep0_do_idle()
1573 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1576 * Returns 0 if work was done; -EAGAIN if nothing to do.
1580 enum bcm63xx_ep0_state ep0state = udc->ep0state; in bcm63xx_ep0_one_round()
1581 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown; in bcm63xx_ep0_one_round()
1583 switch (udc->ep0state) { in bcm63xx_ep0_one_round()
1599 * REQUEUE->IDLE. The gadget driver is NOT expected to in bcm63xx_ep0_one_round()
1602 if (udc->ep0_reply) { in bcm63xx_ep0_one_round()
1604 udc->ep0_reply); in bcm63xx_ep0_one_round()
1613 * it to finish, then go back to REQUEUE->IDLE. in bcm63xx_ep0_one_round()
1615 * Shutdown case: Reset the TX channel, send -ESHUTDOWN in bcm63xx_ep0_one_round()
1616 * completion to the gadget driver, then REQUEUE->IDLE. in bcm63xx_ep0_one_round()
1618 if (udc->ep0_req_completed) { in bcm63xx_ep0_one_round()
1619 udc->ep0_reply = NULL; in bcm63xx_ep0_one_round()
1627 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]); in bcm63xx_ep0_one_round()
1635 if (udc->ep0_reply) { in bcm63xx_ep0_one_round()
1637 udc->ep0_reply); in bcm63xx_ep0_one_round()
1645 if (udc->ep0_req_completed) { in bcm63xx_ep0_one_round()
1646 udc->ep0_reply = NULL; in bcm63xx_ep0_one_round()
1649 /* send 0-byte ack to host */ in bcm63xx_ep0_one_round()
1653 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]); in bcm63xx_ep0_one_round()
1661 * Normal case: 0-byte OUT ack packet is in flight; wait in bcm63xx_ep0_one_round()
1662 * for it to finish, then go back to REQUEUE->IDLE. in bcm63xx_ep0_one_round()
1666 * function anyway. Then go back to REQUEUE->IDLE. in bcm63xx_ep0_one_round()
1668 if (udc->ep0_req_completed) { in bcm63xx_ep0_one_round()
1672 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]); in bcm63xx_ep0_one_round()
1673 udc->ep0_request = NULL; in bcm63xx_ep0_one_round()
1680 * waiting for the gadget driver to send a 0-byte reply. in bcm63xx_ep0_one_round()
1692 struct usb_request *r = udc->ep0_reply; in bcm63xx_ep0_one_round()
1701 udc->ep0_reply = NULL; in bcm63xx_ep0_one_round()
1709 if (udc->ep0state == ep0state) in bcm63xx_ep0_one_round()
1710 return -EAGAIN; in bcm63xx_ep0_one_round()
1712 udc->ep0state = ep0state; in bcm63xx_ep0_one_round()
1717 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1722 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1733 spin_lock_irq(&udc->lock); in bcm63xx_ep0_process()
1736 spin_unlock_irq(&udc->lock); in bcm63xx_ep0_process()
1744 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1756 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1758 * @is_on: 0 to disable pullup, 1 to enable.
1766 int i, rc = -EINVAL; in bcm63xx_udc_pullup()
1768 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_pullup()
1769 if (is_on && udc->ep0state == EP0_SHUTDOWN) { in bcm63xx_udc_pullup()
1770 udc->gadget.speed = USB_SPEED_UNKNOWN; in bcm63xx_udc_pullup()
1771 udc->ep0state = EP0_REQUEUE; in bcm63xx_udc_pullup()
1776 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP); in bcm63xx_udc_pullup()
1778 bcm63xx_set_stall(udc, &udc->bep[i], false); in bcm63xx_udc_pullup()
1783 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) { in bcm63xx_udc_pullup()
1786 udc->ep0_req_shutdown = 1; in bcm63xx_udc_pullup()
1787 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_pullup()
1790 schedule_work(&udc->ep0_wq); in bcm63xx_udc_pullup()
1791 if (udc->ep0state == EP0_SHUTDOWN) in bcm63xx_udc_pullup()
1796 cancel_work_sync(&udc->ep0_wq); in bcm63xx_udc_pullup()
1800 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_pullup()
1805 * bcm63xx_udc_start - Start the controller.
1815 if (!driver || driver->max_speed < USB_SPEED_HIGH || in bcm63xx_udc_start()
1816 !driver->setup) in bcm63xx_udc_start()
1817 return -EINVAL; in bcm63xx_udc_start()
1819 return -ENODEV; in bcm63xx_udc_start()
1820 if (udc->driver) in bcm63xx_udc_start()
1821 return -EBUSY; in bcm63xx_udc_start()
1823 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_start()
1832 udc->driver = driver; in bcm63xx_udc_start()
1833 udc->gadget.dev.of_node = udc->dev->of_node; in bcm63xx_udc_start()
1835 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_start()
1841 * bcm63xx_udc_stop - Shut down the controller.
1850 spin_lock_irqsave(&udc->lock, flags); in bcm63xx_udc_stop()
1852 udc->driver = NULL; in bcm63xx_udc_stop()
1858 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling... in bcm63xx_udc_stop()
1865 spin_unlock_irqrestore(&udc->lock, flags); in bcm63xx_udc_stop()
1872 .pullup = bcm63xx_udc_pullup,
1882 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1894 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT; in bcm63xx_update_cfg_iface()
1895 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT; in bcm63xx_update_cfg_iface()
1896 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >> in bcm63xx_update_cfg_iface()
1902 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1911 enum usb_device_speed oldspeed = udc->gadget.speed; in bcm63xx_update_link_speed()
1915 udc->gadget.speed = USB_SPEED_HIGH; in bcm63xx_update_link_speed()
1918 udc->gadget.speed = USB_SPEED_FULL; in bcm63xx_update_link_speed()
1922 udc->gadget.speed = USB_SPEED_UNKNOWN; in bcm63xx_update_link_speed()
1923 dev_err(udc->dev, in bcm63xx_update_link_speed()
1928 if (udc->gadget.speed != oldspeed) { in bcm63xx_update_link_speed()
1929 dev_info(udc->dev, "link up, %s-speed mode\n", in bcm63xx_update_link_speed()
1930 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full"); in bcm63xx_update_link_speed()
1938 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1952 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) { in bcm63xx_update_wedge()
1953 bcm63xx_set_stall(udc, &udc->bep[i], new_status); in bcm63xx_update_wedge()
1955 clear_bit(i, &udc->wedgemap); in bcm63xx_update_wedge()
1960 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1978 spin_lock(&udc->lock); in bcm63xx_udc_ctrl_isr()
1984 udc->gadget.speed != USB_SPEED_UNKNOWN) in bcm63xx_udc_ctrl_isr()
1985 dev_info(udc->dev, "link down\n"); in bcm63xx_udc_ctrl_isr()
1987 udc->gadget.speed = USB_SPEED_UNKNOWN; in bcm63xx_udc_ctrl_isr()
1997 udc->ep0_req_reset = 1; in bcm63xx_udc_ctrl_isr()
1998 schedule_work(&udc->ep0_wq); in bcm63xx_udc_ctrl_isr()
2010 udc->ep0_req_set_cfg = 1; in bcm63xx_udc_ctrl_isr()
2011 schedule_work(&udc->ep0_wq); in bcm63xx_udc_ctrl_isr()
2015 udc->ep0_req_set_iface = 1; in bcm63xx_udc_ctrl_isr()
2016 schedule_work(&udc->ep0_wq); in bcm63xx_udc_ctrl_isr()
2018 spin_unlock(&udc->lock); in bcm63xx_udc_ctrl_isr()
2020 if (disconnected && udc->driver) in bcm63xx_udc_ctrl_isr()
2021 udc->driver->disconnect(&udc->gadget); in bcm63xx_udc_ctrl_isr()
2022 else if (bus_reset && udc->driver) in bcm63xx_udc_ctrl_isr()
2023 usb_gadget_udc_reset(&udc->gadget, udc->driver); in bcm63xx_udc_ctrl_isr()
2029 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2041 struct bcm63xx_udc *udc = iudma->udc; in bcm63xx_udc_data_isr()
2048 spin_lock(&udc->lock); in bcm63xx_udc_data_isr()
2051 ENETDMAC_IR_REG, iudma->ch_idx); in bcm63xx_udc_data_isr()
2052 bep = iudma->bep; in bcm63xx_udc_data_isr()
2056 if (iudma->ch_idx == IUDMA_EP0_RXCHAN || in bcm63xx_udc_data_isr()
2057 iudma->ch_idx == IUDMA_EP0_TXCHAN) { in bcm63xx_udc_data_isr()
2058 req = udc->ep0_request; in bcm63xx_udc_data_isr()
2063 req->actual += rc; in bcm63xx_udc_data_isr()
2065 if (req->actual >= req->length || breq->bd_bytes > rc) { in bcm63xx_udc_data_isr()
2066 udc->ep0_req_completed = 1; in bcm63xx_udc_data_isr()
2068 schedule_work(&udc->ep0_wq); in bcm63xx_udc_data_isr()
2071 req->actual = min(req->actual, req->length); in bcm63xx_udc_data_isr()
2077 } else if (!list_empty(&bep->queue)) { in bcm63xx_udc_data_isr()
2078 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue); in bcm63xx_udc_data_isr()
2079 req = &breq->req; in bcm63xx_udc_data_isr()
2082 req->actual += rc; in bcm63xx_udc_data_isr()
2084 if (req->actual >= req->length || breq->bd_bytes > rc) { in bcm63xx_udc_data_isr()
2086 list_del(&breq->queue); in bcm63xx_udc_data_isr()
2088 req->actual = min(req->actual, req->length); in bcm63xx_udc_data_isr()
2090 if (!list_empty(&bep->queue)) { in bcm63xx_udc_data_isr()
2093 next = list_first_entry(&bep->queue, in bcm63xx_udc_data_isr()
2102 spin_unlock(&udc->lock); in bcm63xx_udc_data_isr()
2105 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx); in bcm63xx_udc_data_isr()
2106 if (req->complete) in bcm63xx_udc_data_isr()
2107 req->complete(&bep->ep, req); in bcm63xx_udc_data_isr()
2118 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2126 struct bcm63xx_udc *udc = s->private; in bcm63xx_usbd_dbg_show()
2128 if (!udc->driver) in bcm63xx_usbd_dbg_show()
2129 return -ENODEV; in bcm63xx_usbd_dbg_show()
2132 bcm63xx_ep0_state_names[udc->ep0state]); in bcm63xx_usbd_dbg_show()
2134 udc->ep0_req_reset ? "reset " : "", in bcm63xx_usbd_dbg_show()
2135 udc->ep0_req_set_cfg ? "set_cfg " : "", in bcm63xx_usbd_dbg_show()
2136 udc->ep0_req_set_iface ? "set_iface " : "", in bcm63xx_usbd_dbg_show()
2137 udc->ep0_req_shutdown ? "shutdown " : "", in bcm63xx_usbd_dbg_show()
2138 udc->ep0_request ? "pending " : "", in bcm63xx_usbd_dbg_show()
2139 udc->ep0_req_completed ? "completed " : "", in bcm63xx_usbd_dbg_show()
2140 udc->ep0_reply ? "reply " : ""); in bcm63xx_usbd_dbg_show()
2142 udc->cfg, udc->iface, udc->alt_iface); in bcm63xx_usbd_dbg_show()
2157 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2165 struct bcm63xx_udc *udc = s->private; in bcm63xx_iudma_dbg_show()
2169 if (!udc->driver) in bcm63xx_iudma_dbg_show()
2170 return -ENODEV; in bcm63xx_iudma_dbg_show()
2173 struct iudma_ch *iudma = &udc->iudma[ch_idx]; in bcm63xx_iudma_dbg_show()
2175 seq_printf(s, "IUDMA channel %d -- ", ch_idx); in bcm63xx_iudma_dbg_show()
2203 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used, in bcm63xx_iudma_dbg_show()
2204 iudma->n_bds); in bcm63xx_iudma_dbg_show()
2206 if (iudma->bep) in bcm63xx_iudma_dbg_show()
2207 seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue)); in bcm63xx_iudma_dbg_show()
2211 for (i = 0; i < iudma->n_bds; i++) { in bcm63xx_iudma_dbg_show()
2212 struct bcm_enet_desc *d = &iudma->bd_ring[i]; in bcm63xx_iudma_dbg_show()
2216 d->len_stat >> 16, d->len_stat & 0xffff, in bcm63xx_iudma_dbg_show()
2217 d->address); in bcm63xx_iudma_dbg_show()
2218 if (d == iudma->read_bd) in bcm63xx_iudma_dbg_show()
2220 if (d == iudma->write_bd) in bcm63xx_iudma_dbg_show()
2233 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2243 root = debugfs_create_dir(udc->gadget.name, usb_debug_root); in bcm63xx_udc_init_debugfs()
2249 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2256 debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root); in bcm63xx_udc_cleanup_debugfs()
2264 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2272 struct device *dev = &pdev->dev; in bcm63xx_udc_probe()
2275 int rc = -ENOMEM, i, irq; in bcm63xx_udc_probe()
2279 return -ENOMEM; in bcm63xx_udc_probe()
2282 udc->dev = dev; in bcm63xx_udc_probe()
2283 udc->pd = pd; in bcm63xx_udc_probe()
2287 return -EINVAL; in bcm63xx_udc_probe()
2290 udc->usbd_regs = devm_platform_ioremap_resource(pdev, 0); in bcm63xx_udc_probe()
2291 if (IS_ERR(udc->usbd_regs)) in bcm63xx_udc_probe()
2292 return PTR_ERR(udc->usbd_regs); in bcm63xx_udc_probe()
2294 udc->iudma_regs = devm_platform_ioremap_resource(pdev, 1); in bcm63xx_udc_probe()
2295 if (IS_ERR(udc->iudma_regs)) in bcm63xx_udc_probe()
2296 return PTR_ERR(udc->iudma_regs); in bcm63xx_udc_probe()
2298 spin_lock_init(&udc->lock); in bcm63xx_udc_probe()
2299 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process); in bcm63xx_udc_probe()
2301 udc->gadget.ops = &bcm63xx_udc_ops; in bcm63xx_udc_probe()
2302 udc->gadget.name = dev_name(dev); in bcm63xx_udc_probe()
2304 if (!pd->use_fullspeed && !use_fullspeed) in bcm63xx_udc_probe()
2305 udc->gadget.max_speed = USB_SPEED_HIGH; in bcm63xx_udc_probe()
2307 udc->gadget.max_speed = USB_SPEED_FULL; in bcm63xx_udc_probe()
2314 rc = -ENXIO; in bcm63xx_udc_probe()
2326 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */ in bcm63xx_udc_probe()
2334 dev_name(dev), &udc->iudma[i]) < 0) in bcm63xx_udc_probe()
2339 rc = usb_add_gadget_udc(dev, &udc->gadget); in bcm63xx_udc_probe()
2354 * bcm63xx_udc_remove - Remove the device from the system.
2362 usb_del_gadget_udc(&udc->gadget); in bcm63xx_udc_remove()
2363 BUG_ON(udc->driver); in bcm63xx_udc_remove()