Lines Matching +full:generic +full:- +full:xhci
1 // SPDX-License-Identifier: GPL-2.0
3 * xHCI host controller driver
26 * until you reach a non-link TRB.
58 #include <linux/dma-mapping.h>
59 #include "xhci.h"
60 #include "xhci-trace.h"
62 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
75 if (!seg || !trb || trb < seg->trbs) in xhci_trb_virt_to_dma()
78 segment_offset = trb - seg->trbs; in xhci_trb_virt_to_dma()
81 return seg->dma + (segment_offset * sizeof(*trb)); in xhci_trb_virt_to_dma()
86 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); in trb_is_noop()
91 return TRB_TYPE_LINK_LE32(trb->link.control); in trb_is_link()
96 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; in last_trb_on_seg()
102 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); in last_trb_on_ring()
107 return le32_to_cpu(trb->link.control) & LINK_TOGGLE; in link_trb_toggles_cycle()
112 struct urb_priv *urb_priv = td->urb->hcpriv; in last_td_in_urb()
114 return urb_priv->num_tds_done == urb_priv->num_tds; in last_td_in_urb()
119 struct urb_priv *urb_priv = urb->hcpriv; in inc_td_cnt()
121 urb_priv->num_tds_done++; in inc_td_cnt()
128 trb->link.control &= cpu_to_le32(~TRB_CHAIN); in trb_to_noop()
130 trb->generic.field[0] = 0; in trb_to_noop()
131 trb->generic.field[1] = 0; in trb_to_noop()
132 trb->generic.field[2] = 0; in trb_to_noop()
134 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in trb_to_noop()
135 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); in trb_to_noop()
143 static void next_trb(struct xhci_hcd *xhci, in next_trb() argument
149 *seg = (*seg)->next; in next_trb()
150 *trb = ((*seg)->trbs); in next_trb()
159 void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) in inc_deq() argument
164 if (ring->type == TYPE_EVENT) { in inc_deq()
165 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { in inc_deq()
166 ring->dequeue++; in inc_deq()
169 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) in inc_deq()
170 ring->cycle_state ^= 1; in inc_deq()
171 ring->deq_seg = ring->deq_seg->next; in inc_deq()
172 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
177 if (!trb_is_link(ring->dequeue)) { in inc_deq()
178 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) in inc_deq()
179 xhci_warn(xhci, "Missing link TRB at end of segment\n"); in inc_deq()
181 ring->dequeue++; in inc_deq()
184 while (trb_is_link(ring->dequeue)) { in inc_deq()
185 ring->deq_seg = ring->deq_seg->next; in inc_deq()
186 ring->dequeue = ring->deq_seg->trbs; in inc_deq()
188 if (link_trb_count++ > ring->num_segs) { in inc_deq()
189 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in inc_deq()
210 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
215 static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, in inc_enq() argument
222 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; in inc_enq()
224 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { in inc_enq()
225 xhci_err(xhci, "Tried to move enqueue past ring segment\n"); in inc_enq()
229 next = ++(ring->enqueue); in inc_enq()
248 if (!(ring->type == TYPE_ISOC && in inc_enq()
249 (xhci->quirks & XHCI_AMD_0x96_HOST)) && in inc_enq()
250 !xhci_link_trb_quirk(xhci)) { in inc_enq()
251 next->link.control &= cpu_to_le32(~TRB_CHAIN); in inc_enq()
252 next->link.control |= cpu_to_le32(chain); in inc_enq()
256 next->link.control ^= cpu_to_le32(TRB_CYCLE); in inc_enq()
260 ring->cycle_state ^= 1; in inc_enq()
262 ring->enq_seg = ring->enq_seg->next; in inc_enq()
263 ring->enqueue = ring->enq_seg->trbs; in inc_enq()
264 next = ring->enqueue; in inc_enq()
266 if (link_trb_count++ > ring->num_segs) { in inc_enq()
267 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__); in inc_enq()
281 static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_num_trbs_free() argument
283 struct xhci_segment *enq_seg = ring->enq_seg; in xhci_num_trbs_free()
284 union xhci_trb *enq = ring->enqueue; in xhci_num_trbs_free()
291 enq_seg = enq_seg->next; in xhci_num_trbs_free()
292 enq = enq_seg->trbs; in xhci_num_trbs_free()
296 if (enq == ring->dequeue) in xhci_num_trbs_free()
297 return ring->num_segs * (TRBS_PER_SEGMENT - 1); in xhci_num_trbs_free()
300 if (ring->deq_seg == enq_seg && ring->dequeue >= enq) in xhci_num_trbs_free()
301 return free + (ring->dequeue - enq); in xhci_num_trbs_free()
302 last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_num_trbs_free()
303 free += last_on_seg - enq; in xhci_num_trbs_free()
304 enq_seg = enq_seg->next; in xhci_num_trbs_free()
305 enq = enq_seg->trbs; in xhci_num_trbs_free()
306 } while (i++ <= ring->num_segs); in xhci_num_trbs_free()
317 static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhci_ring *ring, in xhci_ring_expansion_needed() argument
325 enq_used = ring->enqueue - ring->enq_seg->trbs; in xhci_ring_expansion_needed()
328 trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1); in xhci_ring_expansion_needed()
340 if (trb_is_link(ring->enqueue) && ring->enq_seg->next->trbs == ring->dequeue) in xhci_ring_expansion_needed()
343 new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1)); in xhci_ring_expansion_needed()
344 seg = ring->enq_seg; in xhci_ring_expansion_needed()
347 seg = seg->next; in xhci_ring_expansion_needed()
348 if (seg == ring->deq_seg) { in xhci_ring_expansion_needed()
349 xhci_dbg(xhci, "Ring expansion by %d segments needed\n", in xhci_ring_expansion_needed()
351 xhci_dbg(xhci, "Adding %d trbs moves enq %d trbs into deq seg\n", in xhci_ring_expansion_needed()
355 new_segs--; in xhci_ring_expansion_needed()
362 void xhci_ring_cmd_db(struct xhci_hcd *xhci) in xhci_ring_cmd_db() argument
364 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) in xhci_ring_cmd_db()
367 xhci_dbg(xhci, "// Ding dong!\n"); in xhci_ring_cmd_db()
371 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
373 readl(&xhci->dba->doorbell[0]); in xhci_ring_cmd_db()
376 static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci) in xhci_mod_cmd_timer() argument
378 return mod_delayed_work(system_wq, &xhci->cmd_timer, in xhci_mod_cmd_timer()
379 msecs_to_jiffies(xhci->current_cmd->timeout_ms)); in xhci_mod_cmd_timer()
382 static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci) in xhci_next_queued_cmd() argument
384 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command, in xhci_next_queued_cmd()
389 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
391 * This must be called with command ring stopped and xhci->lock held.
393 static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, in xhci_handle_stopped_cmd_ring() argument
398 /* Turn all aborted commands in list to no-ops, then restart */ in xhci_handle_stopped_cmd_ring()
399 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) { in xhci_handle_stopped_cmd_ring()
401 if (i_cmd->status != COMP_COMMAND_ABORTED) in xhci_handle_stopped_cmd_ring()
404 i_cmd->status = COMP_COMMAND_RING_STOPPED; in xhci_handle_stopped_cmd_ring()
406 xhci_dbg(xhci, "Turn aborted command %p to no-op\n", in xhci_handle_stopped_cmd_ring()
407 i_cmd->command_trb); in xhci_handle_stopped_cmd_ring()
409 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP); in xhci_handle_stopped_cmd_ring()
413 * completion event is received for these no-op commands in xhci_handle_stopped_cmd_ring()
417 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; in xhci_handle_stopped_cmd_ring()
420 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && in xhci_handle_stopped_cmd_ring()
421 !(xhci->xhc_state & XHCI_STATE_DYING)) { in xhci_handle_stopped_cmd_ring()
422 xhci->current_cmd = cur_cmd; in xhci_handle_stopped_cmd_ring()
424 xhci_mod_cmd_timer(xhci); in xhci_handle_stopped_cmd_ring()
425 xhci_ring_cmd_db(xhci); in xhci_handle_stopped_cmd_ring()
429 /* Must be called with xhci->lock held, releases and aquires lock back */
430 static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) in xhci_abort_cmd_ring() argument
432 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg; in xhci_abort_cmd_ring()
433 union xhci_trb *new_deq = xhci->cmd_ring->dequeue; in xhci_abort_cmd_ring()
437 xhci_dbg(xhci, "Abort command ring\n"); in xhci_abort_cmd_ring()
439 reinit_completion(&xhci->cmd_ring_stop_completion); in xhci_abort_cmd_ring()
449 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
451 next_trb(xhci, NULL, &new_seg, &new_deq); in xhci_abort_cmd_ring()
454 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); in xhci_abort_cmd_ring()
456 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the in xhci_abort_cmd_ring()
458 * seconds then driver handles it as if host died (-ENODEV). in xhci_abort_cmd_ring()
459 * In the future we should distinguish between -ENODEV and -ETIMEDOUT in xhci_abort_cmd_ring()
460 * and try to recover a -ETIMEDOUT with a host controller reset. in xhci_abort_cmd_ring()
462 ret = xhci_handshake(&xhci->op_regs->cmd_ring, in xhci_abort_cmd_ring()
465 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); in xhci_abort_cmd_ring()
466 xhci_halt(xhci); in xhci_abort_cmd_ring()
467 xhci_hc_died(xhci); in xhci_abort_cmd_ring()
476 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_abort_cmd_ring()
477 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion, in xhci_abort_cmd_ring()
479 spin_lock_irqsave(&xhci->lock, flags); in xhci_abort_cmd_ring()
481 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n"); in xhci_abort_cmd_ring()
482 xhci_cleanup_command_queue(xhci); in xhci_abort_cmd_ring()
484 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci)); in xhci_abort_cmd_ring()
489 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, in xhci_ring_ep_doorbell() argument
494 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id]; in xhci_ring_ep_doorbell()
495 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_ring_ep_doorbell()
496 unsigned int ep_state = ep->ep_state; in xhci_ring_ep_doorbell()
516 static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in ring_doorbell_for_active_rings() argument
523 ep = &xhci->devs[slot_id]->eps[ep_index]; in ring_doorbell_for_active_rings()
526 if (!(ep->ep_state & EP_HAS_STREAMS)) { in ring_doorbell_for_active_rings()
527 if (ep->ring && !(list_empty(&ep->ring->td_list))) in ring_doorbell_for_active_rings()
528 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); in ring_doorbell_for_active_rings()
532 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in ring_doorbell_for_active_rings()
534 struct xhci_stream_info *stream_info = ep->stream_info; in ring_doorbell_for_active_rings()
535 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) in ring_doorbell_for_active_rings()
536 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, in ring_doorbell_for_active_rings()
541 void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, in xhci_ring_doorbell_for_active_rings() argument
545 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_ring_doorbell_for_active_rings()
548 static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, in xhci_get_virt_ep() argument
553 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in xhci_get_virt_ep()
557 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); in xhci_get_virt_ep()
560 if (!xhci->devs[slot_id]) { in xhci_get_virt_ep()
561 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); in xhci_get_virt_ep()
565 return &xhci->devs[slot_id]->eps[ep_index]; in xhci_get_virt_ep()
568 static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci, in xhci_virt_ep_to_ring() argument
573 if (!(ep->ep_state & EP_HAS_STREAMS)) in xhci_virt_ep_to_ring()
574 return ep->ring; in xhci_virt_ep_to_ring()
576 if (!ep->stream_info) in xhci_virt_ep_to_ring()
579 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) { in xhci_virt_ep_to_ring()
580 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n", in xhci_virt_ep_to_ring()
581 stream_id, ep->vdev->slot_id, ep->ep_index); in xhci_virt_ep_to_ring()
585 return ep->stream_info->stream_rings[stream_id]; in xhci_virt_ep_to_ring()
592 struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, in xhci_triad_to_transfer_ring() argument
598 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_triad_to_transfer_ring()
602 return xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_triad_to_transfer_ring()
612 static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, in xhci_get_hw_deq() argument
619 ep = &vdev->eps[ep_index]; in xhci_get_hw_deq()
621 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_get_hw_deq()
622 st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; in xhci_get_hw_deq()
623 return le64_to_cpu(st_ctx->stream_ring); in xhci_get_hw_deq()
625 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); in xhci_get_hw_deq()
626 return le64_to_cpu(ep_ctx->deq); in xhci_get_hw_deq()
629 static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, in xhci_move_dequeue_past_td() argument
633 struct xhci_virt_device *dev = xhci->devs[slot_id]; in xhci_move_dequeue_past_td()
634 struct xhci_virt_ep *ep = &dev->eps[ep_index]; in xhci_move_dequeue_past_td()
647 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, in xhci_move_dequeue_past_td()
650 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n", in xhci_move_dequeue_past_td()
652 return -ENODEV; in xhci_move_dequeue_past_td()
662 if (list_empty(&ep_ring->td_list)) { in xhci_move_dequeue_past_td()
663 new_seg = ep_ring->enq_seg; in xhci_move_dequeue_past_td()
664 new_deq = ep_ring->enqueue; in xhci_move_dequeue_past_td()
665 new_cycle = ep_ring->cycle_state; in xhci_move_dequeue_past_td()
666 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); in xhci_move_dequeue_past_td()
669 xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); in xhci_move_dequeue_past_td()
670 return -EINVAL; in xhci_move_dequeue_past_td()
674 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); in xhci_move_dequeue_past_td()
675 new_seg = ep_ring->deq_seg; in xhci_move_dequeue_past_td()
676 new_deq = ep_ring->dequeue; in xhci_move_dequeue_past_td()
692 if (new_deq == td->last_trb) in xhci_move_dequeue_past_td()
699 next_trb(xhci, ep_ring, &new_seg, &new_deq); in xhci_move_dequeue_past_td()
702 if (new_deq == ep->ring->dequeue) { in xhci_move_dequeue_past_td()
703 xhci_err(xhci, "Error: Failed finding new dequeue state\n"); in xhci_move_dequeue_past_td()
704 return -EINVAL; in xhci_move_dequeue_past_td()
714 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n"); in xhci_move_dequeue_past_td()
715 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq); in xhci_move_dequeue_past_td()
716 return -EINVAL; in xhci_move_dequeue_past_td()
719 if ((ep->ep_state & SET_DEQ_PENDING)) { in xhci_move_dequeue_past_td()
720 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n", in xhci_move_dequeue_past_td()
722 return -EBUSY; in xhci_move_dequeue_past_td()
726 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_move_dequeue_past_td()
728 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr); in xhci_move_dequeue_past_td()
729 return -ENOMEM; in xhci_move_dequeue_past_td()
734 ret = queue_command(xhci, cmd, in xhci_move_dequeue_past_td()
740 xhci_free_command(xhci, cmd); in xhci_move_dequeue_past_td()
743 ep->queued_deq_seg = new_seg; in xhci_move_dequeue_past_td()
744 ep->queued_deq_ptr = new_deq; in xhci_move_dequeue_past_td()
746 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_move_dequeue_past_td()
754 ep->ep_state |= SET_DEQ_PENDING; in xhci_move_dequeue_past_td()
755 xhci_ring_cmd_db(xhci); in xhci_move_dequeue_past_td()
763 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in td_to_noop() argument
766 struct xhci_segment *seg = td->start_seg; in td_to_noop()
767 union xhci_trb *trb = td->first_trb; in td_to_noop()
773 if (flip_cycle && trb != td->first_trb && trb != td->last_trb) in td_to_noop()
774 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); in td_to_noop()
776 if (trb == td->last_trb) in td_to_noop()
779 next_trb(xhci, ep_ring, &seg, &trb); in td_to_noop()
784 * Must be called with xhci->lock held in interrupt context,
785 * releases and re-acquires xhci->lock
787 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, in xhci_giveback_urb_in_irq() argument
790 struct urb *urb = cur_td->urb; in xhci_giveback_urb_in_irq()
791 struct urb_priv *urb_priv = urb->hcpriv; in xhci_giveback_urb_in_irq()
792 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); in xhci_giveback_urb_in_irq()
794 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { in xhci_giveback_urb_in_irq()
795 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--; in xhci_giveback_urb_in_irq()
796 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_giveback_urb_in_irq()
797 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_giveback_urb_in_irq()
807 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, in xhci_unmap_td_bounce_buffer() argument
810 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_unmap_td_bounce_buffer()
811 struct xhci_segment *seg = td->bounce_seg; in xhci_unmap_td_bounce_buffer()
812 struct urb *urb = td->urb; in xhci_unmap_td_bounce_buffer()
819 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
824 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, in xhci_unmap_td_bounce_buffer()
827 if (urb->num_sgs) { in xhci_unmap_td_bounce_buffer()
828 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
829 seg->bounce_len, seg->bounce_offs); in xhci_unmap_td_bounce_buffer()
830 if (len != seg->bounce_len) in xhci_unmap_td_bounce_buffer()
831 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", in xhci_unmap_td_bounce_buffer()
832 len, seg->bounce_len); in xhci_unmap_td_bounce_buffer()
834 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, in xhci_unmap_td_bounce_buffer()
835 seg->bounce_len); in xhci_unmap_td_bounce_buffer()
837 seg->bounce_len = 0; in xhci_unmap_td_bounce_buffer()
838 seg->bounce_offs = 0; in xhci_unmap_td_bounce_buffer()
841 static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_td_cleanup() argument
847 urb = td->urb; in xhci_td_cleanup()
850 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); in xhci_td_cleanup()
854 * length, urb->actual_length will be a very big number (since it's in xhci_td_cleanup()
857 if (urb->actual_length > urb->transfer_buffer_length) { in xhci_td_cleanup()
858 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n", in xhci_td_cleanup()
859 urb->transfer_buffer_length, urb->actual_length); in xhci_td_cleanup()
860 urb->actual_length = 0; in xhci_td_cleanup()
864 if (!list_empty(&td->td_list)) in xhci_td_cleanup()
865 list_del_init(&td->td_list); in xhci_td_cleanup()
867 if (!list_empty(&td->cancelled_td_list)) in xhci_td_cleanup()
868 list_del_init(&td->cancelled_td_list); in xhci_td_cleanup()
873 if ((urb->actual_length != urb->transfer_buffer_length && in xhci_td_cleanup()
874 (urb->transfer_flags & URB_SHORT_NOT_OK)) || in xhci_td_cleanup()
875 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc))) in xhci_td_cleanup()
876 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n", in xhci_td_cleanup()
877 urb, urb->actual_length, in xhci_td_cleanup()
878 urb->transfer_buffer_length, status); in xhci_td_cleanup()
881 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) in xhci_td_cleanup()
883 xhci_giveback_urb_in_irq(xhci, td, status); in xhci_td_cleanup()
896 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_giveback_invalidated_tds()
899 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_giveback_invalidated_tds()
901 if (td->cancel_status == TD_CLEARED) { in xhci_giveback_invalidated_tds()
902 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_giveback_invalidated_tds()
903 __func__, td->urb); in xhci_giveback_invalidated_tds()
904 xhci_td_cleanup(ep->xhci, td, ring, td->status); in xhci_giveback_invalidated_tds()
906 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_giveback_invalidated_tds()
907 __func__, td->urb, td->cancel_status); in xhci_giveback_invalidated_tds()
909 if (ep->xhci->xhc_state & XHCI_STATE_DYING) in xhci_giveback_invalidated_tds()
914 static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id, in xhci_reset_halted_ep() argument
920 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_reset_halted_ep()
922 ret = -ENOMEM; in xhci_reset_halted_ep()
926 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n", in xhci_reset_halted_ep()
930 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type); in xhci_reset_halted_ep()
933 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n", in xhci_reset_halted_ep()
938 static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci, in xhci_handle_halted_endpoint() argument
943 unsigned int slot_id = ep->vdev->slot_id; in xhci_handle_halted_endpoint()
950 if (ep->vdev->flags & VDEV_PORT_ERROR) in xhci_handle_halted_endpoint()
951 return -ENODEV; in xhci_handle_halted_endpoint()
955 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; in xhci_handle_halted_endpoint()
956 if (td && list_empty(&td->cancelled_td_list)) { in xhci_handle_halted_endpoint()
957 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); in xhci_handle_halted_endpoint()
958 td->cancel_status = TD_HALTED; in xhci_handle_halted_endpoint()
962 if (ep->ep_state & EP_HALTED) { in xhci_handle_halted_endpoint()
963 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n", in xhci_handle_halted_endpoint()
964 ep->ep_index); in xhci_handle_halted_endpoint()
968 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type); in xhci_handle_halted_endpoint()
972 ep->ep_state |= EP_HALTED; in xhci_handle_halted_endpoint()
974 xhci_ring_cmd_db(xhci); in xhci_handle_halted_endpoint()
981 * We have the xHCI lock, so nothing can modify this list until we drop it.
982 * We're also in the event handler, so we can't get re-interrupted if another
990 struct xhci_hcd *xhci; in xhci_invalidate_cancelled_tds() local
996 unsigned int slot_id = ep->vdev->slot_id; in xhci_invalidate_cancelled_tds()
1003 if (ep->ep_state & SET_DEQ_PENDING) in xhci_invalidate_cancelled_tds()
1006 xhci = ep->xhci; in xhci_invalidate_cancelled_tds()
1008 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1009 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_invalidate_cancelled_tds()
1012 td->start_seg, td->first_trb), in xhci_invalidate_cancelled_tds()
1013 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1014 list_del_init(&td->td_list); in xhci_invalidate_cancelled_tds()
1015 ring = xhci_urb_to_transfer_ring(xhci, td->urb); in xhci_invalidate_cancelled_tds()
1017 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n", in xhci_invalidate_cancelled_tds()
1018 td->urb, td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1027 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, in xhci_invalidate_cancelled_tds()
1028 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1031 if (td->cancel_status == TD_HALTED || in xhci_invalidate_cancelled_tds()
1032 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { in xhci_invalidate_cancelled_tds()
1033 switch (td->cancel_status) { in xhci_invalidate_cancelled_tds()
1034 case TD_CLEARED: /* TD is already no-op */ in xhci_invalidate_cancelled_tds()
1041 if (cached_td->urb->stream_id != td->urb->stream_id) { in xhci_invalidate_cancelled_tds()
1043 xhci_dbg(xhci, in xhci_invalidate_cancelled_tds()
1045 td->urb->stream_id, td->urb); in xhci_invalidate_cancelled_tds()
1046 td->cancel_status = TD_CLEARING_CACHE_DEFERRED; in xhci_invalidate_cancelled_tds()
1051 xhci_warn(xhci, in xhci_invalidate_cancelled_tds()
1053 td->urb, cached_td->urb, in xhci_invalidate_cancelled_tds()
1054 td->urb->stream_id); in xhci_invalidate_cancelled_tds()
1055 td_to_noop(xhci, ring, cached_td, false); in xhci_invalidate_cancelled_tds()
1056 cached_td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1058 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1059 td->cancel_status = TD_CLEARING_CACHE; in xhci_invalidate_cancelled_tds()
1064 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1065 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1073 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index, in xhci_invalidate_cancelled_tds()
1074 cached_td->urb->stream_id, in xhci_invalidate_cancelled_tds()
1077 /* Failed to move past cached td, just set cached TDs to no-op */ in xhci_invalidate_cancelled_tds()
1078 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { in xhci_invalidate_cancelled_tds()
1084 if (td->cancel_status != TD_CLEARING_CACHE && in xhci_invalidate_cancelled_tds()
1085 td->cancel_status != TD_CLEARING_CACHE_DEFERRED) in xhci_invalidate_cancelled_tds()
1087 xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", in xhci_invalidate_cancelled_tds()
1088 td->urb); in xhci_invalidate_cancelled_tds()
1089 td_to_noop(xhci, ring, td, false); in xhci_invalidate_cancelled_tds()
1090 td->cancel_status = TD_CLEARED; in xhci_invalidate_cancelled_tds()
1101 * Call under xhci->lock on a stopped endpoint.
1111 * Only call for non-running rings without streams.
1118 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */ in find_halted_td()
1119 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0); in find_halted_td()
1121 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list); in find_halted_td()
1122 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb, in find_halted_td()
1123 td->last_trb, hw_deq, false)) in find_halted_td()
1136 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1139 static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_stop_ep() argument
1150 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { in xhci_handle_cmd_stop_ep()
1151 if (!xhci->devs[slot_id]) in xhci_handle_cmd_stop_ep()
1152 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", in xhci_handle_cmd_stop_ep()
1157 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_stop_ep()
1158 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1162 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_stop_ep()
1175 * Proper error code is unknown here, it would be -EPIPE if device side in xhci_handle_cmd_stop_ep()
1176 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error) in xhci_handle_cmd_stop_ep()
1177 * We use -EPROTO, if device is stalled it should return a stall error on in xhci_handle_cmd_stop_ep()
1178 * next transfer, which then will return -EPIPE, and device side stall is in xhci_handle_cmd_stop_ep()
1183 xhci_dbg(xhci, "Stop ep completion raced with stall\n"); in xhci_handle_cmd_stop_ep()
1188 if (ep->ep_state & EP_HALTED) in xhci_handle_cmd_stop_ep()
1191 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_stop_ep()
1197 td->status = -EPROTO; in xhci_handle_cmd_stop_ep()
1200 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type); in xhci_handle_cmd_stop_ep()
1201 xhci_dbg(xhci, "Stop ep completion resetting ep, status %d\n", err); in xhci_handle_cmd_stop_ep()
1206 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1210 * Per xHCI 4.6.9, Stop Endpoint command on a Stopped in xhci_handle_cmd_stop_ep()
1217 if (ep->ep_state & EP_HALTED) in xhci_handle_cmd_stop_ep()
1232 xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", in xhci_handle_cmd_stop_ep()
1238 if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) in xhci_handle_cmd_stop_ep()
1241 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); in xhci_handle_cmd_stop_ep()
1243 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1246 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0); in xhci_handle_cmd_stop_ep()
1247 xhci_ring_cmd_db(xhci); in xhci_handle_cmd_stop_ep()
1257 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_cmd_stop_ep()
1261 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_stop_ep()
1264 static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) in xhci_kill_ring_urbs() argument
1269 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) { in xhci_kill_ring_urbs()
1270 list_del_init(&cur_td->td_list); in xhci_kill_ring_urbs()
1272 if (!list_empty(&cur_td->cancelled_td_list)) in xhci_kill_ring_urbs()
1273 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_ring_urbs()
1275 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); in xhci_kill_ring_urbs()
1277 inc_td_cnt(cur_td->urb); in xhci_kill_ring_urbs()
1279 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_ring_urbs()
1283 static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, in xhci_kill_endpoint_urbs() argument
1291 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_kill_endpoint_urbs()
1295 if ((ep->ep_state & EP_HAS_STREAMS) || in xhci_kill_endpoint_urbs()
1296 (ep->ep_state & EP_GETTING_NO_STREAMS)) { in xhci_kill_endpoint_urbs()
1299 for (stream_id = 1; stream_id < ep->stream_info->num_streams; in xhci_kill_endpoint_urbs()
1301 ring = ep->stream_info->stream_rings[stream_id]; in xhci_kill_endpoint_urbs()
1305 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1308 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1311 ring = ep->ring; in xhci_kill_endpoint_urbs()
1314 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_kill_endpoint_urbs()
1317 xhci_kill_ring_urbs(xhci, ring); in xhci_kill_endpoint_urbs()
1320 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list, in xhci_kill_endpoint_urbs()
1322 list_del_init(&cur_td->cancelled_td_list); in xhci_kill_endpoint_urbs()
1323 inc_td_cnt(cur_td->urb); in xhci_kill_endpoint_urbs()
1326 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); in xhci_kill_endpoint_urbs()
1336 * Call with xhci->lock held.
1337 * lock is relased and re-acquired while giving back urb.
1339 void xhci_hc_died(struct xhci_hcd *xhci) in xhci_hc_died() argument
1343 if (xhci->xhc_state & XHCI_STATE_DYING) in xhci_hc_died()
1346 xhci_err(xhci, "xHCI host controller not responding, assume dead\n"); in xhci_hc_died()
1347 xhci->xhc_state |= XHCI_STATE_DYING; in xhci_hc_died()
1349 xhci_cleanup_command_queue(xhci); in xhci_hc_died()
1352 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { in xhci_hc_died()
1353 if (!xhci->devs[i]) in xhci_hc_died()
1356 xhci_kill_endpoint_urbs(xhci, i, j); in xhci_hc_died()
1360 if (!(xhci->xhc_state & XHCI_STATE_REMOVING)) in xhci_hc_died()
1361 usb_hc_died(xhci_to_hcd(xhci)); in xhci_hc_died()
1364 static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, in update_ring_for_set_deq_completion() argument
1371 dequeue_temp = ep_ring->dequeue; in update_ring_for_set_deq_completion()
1373 /* If we get two back-to-back stalls, and the first stalled transfer in update_ring_for_set_deq_completion()
1377 * the segment into la-la-land. in update_ring_for_set_deq_completion()
1379 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1380 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1381 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1384 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) { in update_ring_for_set_deq_completion()
1386 ep_ring->dequeue++; in update_ring_for_set_deq_completion()
1387 if (trb_is_link(ep_ring->dequeue)) { in update_ring_for_set_deq_completion()
1388 if (ep_ring->dequeue == in update_ring_for_set_deq_completion()
1389 dev->eps[ep_index].queued_deq_ptr) in update_ring_for_set_deq_completion()
1391 ep_ring->deq_seg = ep_ring->deq_seg->next; in update_ring_for_set_deq_completion()
1392 ep_ring->dequeue = ep_ring->deq_seg->trbs; in update_ring_for_set_deq_completion()
1394 if (ep_ring->dequeue == dequeue_temp) { in update_ring_for_set_deq_completion()
1395 xhci_dbg(xhci, "Unable to find new dequeue pointer\n"); in update_ring_for_set_deq_completion()
1408 static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_set_deq() argument
1419 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_set_deq()
1420 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); in xhci_handle_cmd_set_deq()
1421 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1425 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id); in xhci_handle_cmd_set_deq()
1427 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", in xhci_handle_cmd_set_deq()
1433 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_set_deq()
1434 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in xhci_handle_cmd_set_deq()
1444 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n"); in xhci_handle_cmd_set_deq()
1447 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n"); in xhci_handle_cmd_set_deq()
1449 slot_state = le32_to_cpu(slot_ctx->dev_state); in xhci_handle_cmd_set_deq()
1451 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1456 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n", in xhci_handle_cmd_set_deq()
1460 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n", in xhci_handle_cmd_set_deq()
1473 if (ep->ep_state & EP_HAS_STREAMS) { in xhci_handle_cmd_set_deq()
1475 &ep->stream_info->stream_ctx_array[stream_id]; in xhci_handle_cmd_set_deq()
1476 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; in xhci_handle_cmd_set_deq()
1479 * Cadence xHCI controllers store some endpoint state in xhci_handle_cmd_set_deq()
1487 if (xhci->quirks & XHCI_CDNS_SCTX_QUIRK) { in xhci_handle_cmd_set_deq()
1488 ctx->reserved[0] = 0; in xhci_handle_cmd_set_deq()
1489 ctx->reserved[1] = 0; in xhci_handle_cmd_set_deq()
1492 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; in xhci_handle_cmd_set_deq()
1494 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, in xhci_handle_cmd_set_deq()
1496 if (xhci_trb_virt_to_dma(ep->queued_deq_seg, in xhci_handle_cmd_set_deq()
1497 ep->queued_deq_ptr) == deq) { in xhci_handle_cmd_set_deq()
1501 update_ring_for_set_deq_completion(xhci, ep->vdev, in xhci_handle_cmd_set_deq()
1504 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n"); in xhci_handle_cmd_set_deq()
1505 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", in xhci_handle_cmd_set_deq()
1506 ep->queued_deq_seg, ep->queued_deq_ptr); in xhci_handle_cmd_set_deq()
1510 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, in xhci_handle_cmd_set_deq()
1512 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); in xhci_handle_cmd_set_deq()
1513 if (td->cancel_status == TD_CLEARING_CACHE) { in xhci_handle_cmd_set_deq()
1514 td->cancel_status = TD_CLEARED; in xhci_handle_cmd_set_deq()
1515 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", in xhci_handle_cmd_set_deq()
1516 __func__, td->urb); in xhci_handle_cmd_set_deq()
1517 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); in xhci_handle_cmd_set_deq()
1519 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", in xhci_handle_cmd_set_deq()
1520 __func__, td->urb, td->cancel_status); in xhci_handle_cmd_set_deq()
1524 ep->ep_state &= ~SET_DEQ_PENDING; in xhci_handle_cmd_set_deq()
1525 ep->queued_deq_seg = NULL; in xhci_handle_cmd_set_deq()
1526 ep->queued_deq_ptr = NULL; in xhci_handle_cmd_set_deq()
1529 if (!list_empty(&ep->cancelled_td_list)) { in xhci_handle_cmd_set_deq()
1530 xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", in xhci_handle_cmd_set_deq()
1534 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1539 xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); in xhci_handle_cmd_set_deq()
1540 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_set_deq()
1544 static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_reset_ep() argument
1551 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); in xhci_handle_cmd_reset_ep()
1552 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1556 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in xhci_handle_cmd_reset_ep()
1562 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, in xhci_handle_cmd_reset_ep()
1569 ep->ep_state &= ~EP_HALTED; in xhci_handle_cmd_reset_ep()
1574 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) in xhci_handle_cmd_reset_ep()
1575 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); in xhci_handle_cmd_reset_ep()
1578 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_enable_slot() argument
1582 command->slot_id = slot_id; in xhci_handle_cmd_enable_slot()
1584 command->slot_id = 0; in xhci_handle_cmd_enable_slot()
1587 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_disable_slot() argument
1592 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_disable_slot()
1596 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); in xhci_handle_cmd_disable_slot()
1599 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) in xhci_handle_cmd_disable_slot()
1601 xhci_free_device_endpoint_resources(xhci, virt_dev, true); in xhci_handle_cmd_disable_slot()
1604 static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, in xhci_handle_cmd_config_ep() argument
1618 virt_dev = xhci->devs[slot_id]; in xhci_handle_cmd_config_ep()
1621 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); in xhci_handle_cmd_config_ep()
1623 xhci_warn(xhci, "Could not get input context, bad type.\n"); in xhci_handle_cmd_config_ep()
1627 add_flags = le32_to_cpu(ctrl_ctx->add_flags); in xhci_handle_cmd_config_ep()
1630 ep_index = xhci_last_valid_endpoint(add_flags) - 1; in xhci_handle_cmd_config_ep()
1632 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); in xhci_handle_cmd_config_ep()
1638 static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_addr_dev() argument
1643 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_addr_dev()
1646 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_addr_dev()
1650 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) in xhci_handle_cmd_reset_dev() argument
1655 vdev = xhci->devs[slot_id]; in xhci_handle_cmd_reset_dev()
1657 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n", in xhci_handle_cmd_reset_dev()
1661 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); in xhci_handle_cmd_reset_dev()
1664 xhci_dbg(xhci, "Completed reset device command.\n"); in xhci_handle_cmd_reset_dev()
1667 static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, in xhci_handle_cmd_nec_get_fw() argument
1670 if (!(xhci->quirks & XHCI_NEC_HOST)) { in xhci_handle_cmd_nec_get_fw()
1671 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n"); in xhci_handle_cmd_nec_get_fw()
1674 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, in xhci_handle_cmd_nec_get_fw()
1676 NEC_FW_MAJOR(le32_to_cpu(event->status)), in xhci_handle_cmd_nec_get_fw()
1677 NEC_FW_MINOR(le32_to_cpu(event->status))); in xhci_handle_cmd_nec_get_fw()
1682 list_del(&cmd->cmd_list); in xhci_complete_del_and_free_cmd()
1684 if (cmd->completion) { in xhci_complete_del_and_free_cmd()
1685 cmd->status = status; in xhci_complete_del_and_free_cmd()
1686 complete(cmd->completion); in xhci_complete_del_and_free_cmd()
1692 void xhci_cleanup_command_queue(struct xhci_hcd *xhci) in xhci_cleanup_command_queue() argument
1695 xhci->current_cmd = NULL; in xhci_cleanup_command_queue()
1696 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) in xhci_cleanup_command_queue()
1702 struct xhci_hcd *xhci; in xhci_handle_command_timeout() local
1709 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer); in xhci_handle_command_timeout()
1711 spin_lock_irqsave(&xhci->lock, flags); in xhci_handle_command_timeout()
1717 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) { in xhci_handle_command_timeout()
1718 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1722 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]); in xhci_handle_command_timeout()
1723 usbsts = readl(&xhci->op_regs->status); in xhci_handle_command_timeout()
1724 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts)); in xhci_handle_command_timeout()
1726 /* Bail out and tear down xhci if a stop endpoint command failed */ in xhci_handle_command_timeout()
1730 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n"); in xhci_handle_command_timeout()
1732 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3), in xhci_handle_command_timeout()
1735 ep->ep_state &= ~EP_STOP_CMD_PENDING; in xhci_handle_command_timeout()
1737 xhci_halt(xhci); in xhci_handle_command_timeout()
1738 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1743 xhci->current_cmd->status = COMP_COMMAND_ABORTED; in xhci_handle_command_timeout()
1746 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); in xhci_handle_command_timeout()
1748 xhci_hc_died(xhci); in xhci_handle_command_timeout()
1752 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && in xhci_handle_command_timeout()
1755 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; in xhci_handle_command_timeout()
1756 xhci_dbg(xhci, "Command timeout\n"); in xhci_handle_command_timeout()
1757 xhci_abort_cmd_ring(xhci, flags); in xhci_handle_command_timeout()
1762 if (xhci->xhc_state & XHCI_STATE_REMOVING) { in xhci_handle_command_timeout()
1763 xhci_dbg(xhci, "host removed, ring start fail?\n"); in xhci_handle_command_timeout()
1764 xhci_cleanup_command_queue(xhci); in xhci_handle_command_timeout()
1770 xhci_dbg(xhci, "Command timeout on stopped ring\n"); in xhci_handle_command_timeout()
1771 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); in xhci_handle_command_timeout()
1774 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_handle_command_timeout()
1778 static void handle_cmd_completion(struct xhci_hcd *xhci, in handle_cmd_completion() argument
1781 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_cmd_completion()
1790 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); in handle_cmd_completion()
1794 cmd_dma = le64_to_cpu(event->cmd_trb); in handle_cmd_completion()
1795 cmd_trb = xhci->cmd_ring->dequeue; in handle_cmd_completion()
1797 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); in handle_cmd_completion()
1799 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); in handle_cmd_completion()
1803 complete_all(&xhci->cmd_ring_stop_completion); in handle_cmd_completion()
1807 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, in handle_cmd_completion()
1814 xhci_warn(xhci, in handle_cmd_completion()
1819 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list); in handle_cmd_completion()
1821 cancel_delayed_work(&xhci->cmd_timer); in handle_cmd_completion()
1823 if (cmd->command_trb != xhci->cmd_ring->dequeue) { in handle_cmd_completion()
1824 xhci_err(xhci, in handle_cmd_completion()
1836 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; in handle_cmd_completion()
1837 if (cmd->status == COMP_COMMAND_ABORTED) { in handle_cmd_completion()
1838 if (xhci->current_cmd == cmd) in handle_cmd_completion()
1839 xhci->current_cmd = NULL; in handle_cmd_completion()
1844 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1847 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); in handle_cmd_completion()
1850 xhci_handle_cmd_disable_slot(xhci, slot_id); in handle_cmd_completion()
1853 if (!cmd->completion) in handle_cmd_completion()
1854 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); in handle_cmd_completion()
1859 xhci_handle_cmd_addr_dev(xhci, slot_id); in handle_cmd_completion()
1863 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1864 if (!cmd->completion) in handle_cmd_completion()
1865 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, in handle_cmd_completion()
1870 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1871 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1874 /* Is this an aborted command turned to NO-OP? */ in handle_cmd_completion()
1875 if (cmd->status == COMP_COMMAND_RING_STOPPED) in handle_cmd_completion()
1880 le32_to_cpu(cmd_trb->generic.field[3]))); in handle_cmd_completion()
1881 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); in handle_cmd_completion()
1885 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11) in handle_cmd_completion()
1888 le32_to_cpu(cmd_trb->generic.field[3])); in handle_cmd_completion()
1889 xhci_handle_cmd_reset_dev(xhci, slot_id); in handle_cmd_completion()
1892 xhci_handle_cmd_nec_get_fw(xhci, event); in handle_cmd_completion()
1896 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); in handle_cmd_completion()
1901 if (!list_is_singular(&xhci->cmd_list)) { in handle_cmd_completion()
1902 xhci->current_cmd = list_first_entry(&cmd->cmd_list, in handle_cmd_completion()
1904 xhci_mod_cmd_timer(xhci); in handle_cmd_completion()
1905 } else if (xhci->current_cmd == cmd) { in handle_cmd_completion()
1906 xhci->current_cmd = NULL; in handle_cmd_completion()
1912 inc_deq(xhci, xhci->cmd_ring); in handle_cmd_completion()
1915 static void handle_vendor_event(struct xhci_hcd *xhci, in handle_vendor_event() argument
1918 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); in handle_vendor_event()
1919 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) in handle_vendor_event()
1920 handle_cmd_completion(xhci, &event->event_cmd); in handle_vendor_event()
1923 static void handle_device_notification(struct xhci_hcd *xhci, in handle_device_notification() argument
1929 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); in handle_device_notification()
1930 if (!xhci->devs[slot_id]) { in handle_device_notification()
1931 xhci_warn(xhci, "Device Notification event for " in handle_device_notification()
1936 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n", in handle_device_notification()
1938 udev = xhci->devs[slot_id]->udev; in handle_device_notification()
1939 if (udev && udev->parent) in handle_device_notification()
1940 usb_wakeup_notification(udev->parent, udev->portnum); in handle_device_notification()
1944 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1946 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1955 static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) in xhci_cavium_reset_phy_quirk() argument
1957 struct usb_hcd *hcd = xhci_to_hcd(xhci); in xhci_cavium_reset_phy_quirk()
1963 writel(0x6F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1965 /* De-assert the PHY reset */ in xhci_cavium_reset_phy_quirk()
1966 writel(0x7F, hcd->regs + 0x1048); in xhci_cavium_reset_phy_quirk()
1968 pll_lock_check = readl(hcd->regs + 0x1070); in xhci_cavium_reset_phy_quirk()
1969 } while (!(pll_lock_check & 0x1) && --retry_count); in xhci_cavium_reset_phy_quirk()
1972 static void handle_port_status(struct xhci_hcd *xhci, in handle_port_status() argument
1987 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) in handle_port_status()
1988 xhci_warn(xhci, in handle_port_status()
1991 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); in handle_port_status()
1992 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); in handle_port_status()
1995 xhci_warn(xhci, "Port change event with invalid port ID %d\n", in handle_port_status()
1997 inc_deq(xhci, ir->event_ring); in handle_port_status()
2001 port = &xhci->hw_ports[port_id - 1]; in handle_port_status()
2002 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) { in handle_port_status()
2003 xhci_warn(xhci, "Port change event, no port for port ID %u\n", in handle_port_status()
2010 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { in handle_port_status()
2011 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); in handle_port_status()
2016 hcd = port->rhub->hcd; in handle_port_status()
2017 bus_state = &port->rhub->bus_state; in handle_port_status()
2018 hcd_portnum = port->hcd_portnum; in handle_port_status()
2019 portsc = readl(port->addr); in handle_port_status()
2021 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n", in handle_port_status()
2022 hcd->self.busnum, hcd_portnum + 1, port_id, portsc); in handle_port_status()
2026 if (hcd->state == HC_STATE_SUSPENDED) { in handle_port_status()
2027 xhci_dbg(xhci, "resume root hub\n"); in handle_port_status()
2031 if (hcd->speed >= HCD_USB3 && in handle_port_status()
2033 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
2034 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
2035 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR; in handle_port_status()
2039 xhci_dbg(xhci, "port resume event for port %d\n", port_id); in handle_port_status()
2041 cmd_reg = readl(&xhci->op_regs->command); in handle_port_status()
2043 xhci_warn(xhci, "xHC is not running.\n"); in handle_port_status()
2048 xhci_dbg(xhci, "remote wake SS port %d\n", port_id); in handle_port_status()
2053 bus_state->port_remote_wakeup |= 1 << hcd_portnum; in handle_port_status()
2054 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2055 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2056 xhci_set_link_state(xhci, port, XDEV_U0); in handle_port_status()
2062 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { in handle_port_status()
2063 xhci_dbg(xhci, "resume HS port %d\n", port_id); in handle_port_status()
2064 port->resume_timestamp = jiffies + in handle_port_status()
2066 set_bit(hcd_portnum, &bus_state->resuming_ports); in handle_port_status()
2069 * usb device auto-resume latency around ~40ms. in handle_port_status()
2071 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2072 mod_timer(&hcd->rh_timer, in handle_port_status()
2073 port->resume_timestamp); in handle_port_status()
2074 usb_hcd_start_port_resume(&hcd->self, hcd_portnum); in handle_port_status()
2084 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); in handle_port_status()
2085 complete(&port->u3exit_done); in handle_port_status()
2088 * U3Exit state after a host-initiated resume. If it's a device in handle_port_status()
2093 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1); in handle_port_status()
2094 if (slot_id && xhci->devs[slot_id]) in handle_port_status()
2095 xhci_ring_device(xhci, slot_id); in handle_port_status()
2096 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) { in handle_port_status()
2097 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2098 usb_wakeup_notification(hcd->self.root_hub, in handle_port_status()
2106 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or in handle_port_status()
2110 if (hcd->speed < HCD_USB3 && port->rexit_active) { in handle_port_status()
2111 complete(&port->rexit_done); in handle_port_status()
2112 port->rexit_active = false; in handle_port_status()
2117 if (hcd->speed < HCD_USB3) { in handle_port_status()
2118 xhci_test_and_clear_bit(xhci, port, PORT_PLC); in handle_port_status()
2119 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && in handle_port_status()
2121 xhci_cavium_reset_phy_quirk(xhci); in handle_port_status()
2126 inc_deq(xhci, ir->event_ring); in handle_port_status()
2136 * xHCI port-status-change events occur when the "or" of all the in handle_port_status()
2137 * status-change bits in the portsc register changes from 0 to 1. in handle_port_status()
2142 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", in handle_port_status()
2143 __func__, hcd->self.busnum); in handle_port_status()
2144 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); in handle_port_status()
2145 spin_unlock(&xhci->lock); in handle_port_status()
2148 spin_lock(&xhci->lock); in handle_port_status()
2157 struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, in trb_in_td() argument
2177 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]); in trb_in_td()
2182 xhci_warn(xhci, in trb_in_td()
2183 …"Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx… in trb_in_td()
2187 (unsigned long long)cur_seg->dma, in trb_in_td()
2201 (suspect_dma >= cur_seg->dma && in trb_in_td()
2211 cur_seg = cur_seg->next; in trb_in_td()
2212 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); in trb_in_td()
2218 static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, in xhci_clear_hub_tt_buffer() argument
2222 * As part of low/full-speed endpoint-halt processing in xhci_clear_hub_tt_buffer()
2225 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) && in xhci_clear_hub_tt_buffer()
2226 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) && in xhci_clear_hub_tt_buffer()
2227 !(ep->ep_state & EP_CLEARING_TT)) { in xhci_clear_hub_tt_buffer()
2228 ep->ep_state |= EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2229 td->urb->ep->hcpriv = td->urb->dev; in xhci_clear_hub_tt_buffer()
2230 if (usb_hub_clear_tt_buffer(td->urb)) in xhci_clear_hub_tt_buffer()
2231 ep->ep_state &= ~EP_CLEARING_TT; in xhci_clear_hub_tt_buffer()
2236 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2241 static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, in xhci_requires_manual_halt_cleanup() argument
2261 int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) in xhci_is_vendor_info_code() argument
2265 * treat as not-an-error. in xhci_is_vendor_info_code()
2267 xhci_dbg(xhci, "Vendor defined info completion code %u\n", in xhci_is_vendor_info_code()
2269 xhci_dbg(xhci, "Treating code as success.\n"); in xhci_is_vendor_info_code()
2275 static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in finish_td() argument
2281 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in finish_td()
2313 if ((ep->ep_state & EP_HALTED) && in finish_td()
2314 !list_empty(&td->cancelled_td_list)) { in finish_td()
2315 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n", in finish_td()
2317 td->start_seg, td->first_trb)); in finish_td()
2324 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2325 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2329 * xhci internal endpoint state will go to a "halt" state for in finish_td()
2338 if (ep->ep_index != 0) in finish_td()
2339 xhci_clear_hub_tt_buffer(xhci, td, ep); in finish_td()
2341 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); in finish_td()
2349 ep_ring->dequeue = td->last_trb; in finish_td()
2350 ep_ring->deq_seg = td->last_trb_seg; in finish_td()
2351 inc_deq(xhci, ep_ring); in finish_td()
2353 return xhci_td_cleanup(xhci, td, ep_ring, td->status); in finish_td()
2357 static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, in sum_trb_lengths() argument
2361 union xhci_trb *trb = ring->dequeue; in sum_trb_lengths()
2362 struct xhci_segment *seg = ring->deq_seg; in sum_trb_lengths()
2364 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { in sum_trb_lengths()
2366 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); in sum_trb_lengths()
2374 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_ctrl_td() argument
2383 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); in process_ctrl_td()
2384 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); in process_ctrl_td()
2385 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2386 requested = td->urb->transfer_buffer_length; in process_ctrl_td()
2387 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_ctrl_td()
2392 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", in process_ctrl_td()
2394 td->status = -ESHUTDOWN; in process_ctrl_td()
2397 td->status = 0; in process_ctrl_td()
2400 td->status = 0; in process_ctrl_td()
2404 td->urb->actual_length = remaining; in process_ctrl_td()
2406 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); in process_ctrl_td()
2411 td->urb->actual_length = 0; in process_ctrl_td()
2415 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2418 td->urb->actual_length = requested; in process_ctrl_td()
2421 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n", in process_ctrl_td()
2428 if (!xhci_requires_manual_halt_cleanup(xhci, in process_ctrl_td()
2431 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", in process_ctrl_td()
2432 trb_comp_code, ep->ep_index); in process_ctrl_td()
2437 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2438 else if (!td->urb_length_set) in process_ctrl_td()
2439 td->urb->actual_length = 0; in process_ctrl_td()
2453 td->urb_length_set = true; in process_ctrl_td()
2454 td->urb->actual_length = requested - remaining; in process_ctrl_td()
2455 xhci_dbg(xhci, "Waiting for status stage event\n"); in process_ctrl_td()
2460 if (!td->urb_length_set) in process_ctrl_td()
2461 td->urb->actual_length = requested; in process_ctrl_td()
2464 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_ctrl_td()
2470 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_isoc_td() argument
2482 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2483 urb_priv = td->urb->hcpriv; in process_isoc_td()
2484 idx = urb_priv->num_tds_done; in process_isoc_td()
2485 frame = &td->urb->iso_frame_desc[idx]; in process_isoc_td()
2486 requested = frame->length; in process_isoc_td()
2487 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_isoc_td()
2488 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_isoc_td()
2489 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? in process_isoc_td()
2490 -EREMOTEIO : 0; in process_isoc_td()
2495 /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ in process_isoc_td()
2496 if (td->error_mid_td) in process_isoc_td()
2499 frame->status = short_framestatus; in process_isoc_td()
2503 frame->status = 0; in process_isoc_td()
2506 frame->status = short_framestatus; in process_isoc_td()
2510 frame->status = -ECOMM; in process_isoc_td()
2516 frame->status = -EOVERFLOW; in process_isoc_td()
2517 if (ep_trb != td->last_trb) in process_isoc_td()
2518 td->error_mid_td = true; in process_isoc_td()
2522 frame->status = -EPROTO; in process_isoc_td()
2525 frame->status = -EPROTO; in process_isoc_td()
2527 if (ep_trb != td->last_trb) in process_isoc_td()
2528 td->error_mid_td = true; in process_isoc_td()
2535 frame->status = short_framestatus; in process_isoc_td()
2544 frame->status = -1; in process_isoc_td()
2548 if (td->urb_length_set) in process_isoc_td()
2552 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + in process_isoc_td()
2553 ep_trb_len - remaining; in process_isoc_td()
2555 frame->actual_length = requested; in process_isoc_td()
2557 td->urb->actual_length += frame->actual_length; in process_isoc_td()
2561 if (td->error_mid_td && ep_trb != td->last_trb) { in process_isoc_td()
2562 xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); in process_isoc_td()
2563 td->urb_length_set = true; in process_isoc_td()
2567 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_isoc_td()
2570 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, in skip_isoc_td() argument
2577 urb_priv = td->urb->hcpriv; in skip_isoc_td()
2578 idx = urb_priv->num_tds_done; in skip_isoc_td()
2579 frame = &td->urb->iso_frame_desc[idx]; in skip_isoc_td()
2582 frame->status = -EXDEV; in skip_isoc_td()
2585 frame->actual_length = 0; in skip_isoc_td()
2588 ep->ring->dequeue = td->last_trb; in skip_isoc_td()
2589 ep->ring->deq_seg = td->last_trb_seg; in skip_isoc_td()
2590 inc_deq(xhci, ep->ring); in skip_isoc_td()
2592 return xhci_td_cleanup(xhci, td, ep->ring, status); in skip_isoc_td()
2598 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, in process_bulk_intr_td() argument
2606 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); in process_bulk_intr_td()
2607 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2608 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); in process_bulk_intr_td()
2609 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); in process_bulk_intr_td()
2610 requested = td->urb->transfer_buffer_length; in process_bulk_intr_td()
2614 ep->err_count = 0; in process_bulk_intr_td()
2616 if (ep_trb != td->last_trb || remaining) { in process_bulk_intr_td()
2617 xhci_warn(xhci, "WARN Successful completion on short TX\n"); in process_bulk_intr_td()
2618 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2619 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2622 td->status = 0; in process_bulk_intr_td()
2625 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n", in process_bulk_intr_td()
2626 td->urb->ep->desc.bEndpointAddress, in process_bulk_intr_td()
2628 td->status = 0; in process_bulk_intr_td()
2631 td->urb->actual_length = remaining; in process_bulk_intr_td()
2635 td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb); in process_bulk_intr_td()
2638 if (xhci->quirks & XHCI_NO_SOFT_RETRY || in process_bulk_intr_td()
2639 (ep->err_count++ > MAX_SOFT_RETRY) || in process_bulk_intr_td()
2640 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) in process_bulk_intr_td()
2643 td->status = 0; in process_bulk_intr_td()
2645 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET); in process_bulk_intr_td()
2652 if (ep_trb == td->last_trb) in process_bulk_intr_td()
2653 td->urb->actual_length = requested - remaining; in process_bulk_intr_td()
2655 td->urb->actual_length = in process_bulk_intr_td()
2656 sum_trb_lengths(xhci, ep_ring, ep_trb) + in process_bulk_intr_td()
2657 ep_trb_len - remaining; in process_bulk_intr_td()
2660 xhci_warn(xhci, "bad transfer trb length %d in event trb\n", in process_bulk_intr_td()
2662 td->urb->actual_length = 0; in process_bulk_intr_td()
2665 return finish_td(xhci, ep, ep_ring, td, trb_comp_code); in process_bulk_intr_td()
2673 static int handle_tx_event(struct xhci_hcd *xhci, in handle_tx_event() argument
2685 int status = -EINPROGRESS; in handle_tx_event()
2691 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); in handle_tx_event()
2692 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; in handle_tx_event()
2693 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); in handle_tx_event()
2694 ep_trb_dma = le64_to_cpu(event->buffer); in handle_tx_event()
2696 ep = xhci_get_virt_ep(xhci, slot_id, ep_index); in handle_tx_event()
2698 xhci_err(xhci, "ERROR Invalid Transfer event\n"); in handle_tx_event()
2703 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); in handle_tx_event()
2706 xhci_err(xhci, in handle_tx_event()
2712 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ in handle_tx_event()
2719 xhci_dbg(xhci, "Stream transaction error ep %u no id\n", in handle_tx_event()
2721 if (ep->err_count++ > MAX_SOFT_RETRY) in handle_tx_event()
2722 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2725 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2733 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", in handle_tx_event()
2739 /* Count current td numbers if ep->skip is set */ in handle_tx_event()
2740 if (ep->skip) in handle_tx_event()
2741 td_num += list_count_nodes(&ep_ring->td_list); in handle_tx_event()
2749 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { in handle_tx_event()
2751 xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n", in handle_tx_event()
2752 slot_id, ep_index, ep_ring->last_td_was_short); in handle_tx_event()
2759 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n", in handle_tx_event()
2763 xhci_dbg(xhci, in handle_tx_event()
2764 "Stopped on No-op or Link TRB for slot %u ep %u\n", in handle_tx_event()
2768 xhci_dbg(xhci, in handle_tx_event()
2774 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id, in handle_tx_event()
2776 status = -EPIPE; in handle_tx_event()
2779 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n", in handle_tx_event()
2781 status = -EPROTO; in handle_tx_event()
2784 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n", in handle_tx_event()
2786 status = -EPROTO; in handle_tx_event()
2789 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n", in handle_tx_event()
2791 status = -EOVERFLOW; in handle_tx_event()
2795 xhci_warn(xhci, in handle_tx_event()
2798 status = -EILSEQ; in handle_tx_event()
2802 xhci_warn(xhci, in handle_tx_event()
2805 status = -ENOSR; in handle_tx_event()
2808 xhci_warn(xhci, in handle_tx_event()
2813 xhci_warn(xhci, in handle_tx_event()
2823 xhci_dbg(xhci, "underrun event on endpoint\n"); in handle_tx_event()
2824 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2825 xhci_dbg(xhci, "Underrun Event for slot %d ep %d " in handle_tx_event()
2827 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2831 xhci_dbg(xhci, "overrun event on endpoint\n"); in handle_tx_event()
2832 if (!list_empty(&ep_ring->td_list)) in handle_tx_event()
2833 xhci_dbg(xhci, "Overrun Event for slot %d ep %d " in handle_tx_event()
2835 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2845 ep->skip = true; in handle_tx_event()
2846 xhci_dbg(xhci, in handle_tx_event()
2851 ep->skip = true; in handle_tx_event()
2852 xhci_dbg(xhci, in handle_tx_event()
2859 xhci_warn(xhci, in handle_tx_event()
2862 status = -EPROTO; in handle_tx_event()
2865 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { in handle_tx_event()
2869 xhci_warn(xhci, in handle_tx_event()
2879 if (list_empty(&ep_ring->td_list)) { in handle_tx_event()
2890 ep_ring->last_td_was_short)) { in handle_tx_event()
2891 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", in handle_tx_event()
2892 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), in handle_tx_event()
2895 if (ep->skip) { in handle_tx_event()
2896 ep->skip = false; in handle_tx_event()
2897 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2901 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
2903 xhci_handle_halted_endpoint(xhci, ep, NULL, in handle_tx_event()
2909 /* We've skipped all the TDs on the ep ring when ep->skip set */ in handle_tx_event()
2910 if (ep->skip && td_num == 0) { in handle_tx_event()
2911 ep->skip = false; in handle_tx_event()
2912 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", in handle_tx_event()
2917 td = list_first_entry(&ep_ring->td_list, struct xhci_td, in handle_tx_event()
2919 if (ep->skip) in handle_tx_event()
2920 td_num--; in handle_tx_event()
2923 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue, in handle_tx_event()
2924 td->last_trb, ep_trb_dma, false); in handle_tx_event()
2928 * is not in the current TD pointed by ep_ring->dequeue because in handle_tx_event()
2941 if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { in handle_tx_event()
2942 skip_isoc_td(xhci, td, ep, status); in handle_tx_event()
2950 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && in handle_tx_event()
2951 ep_ring->last_td_was_short) { in handle_tx_event()
2952 ep_ring->last_td_was_short = false; in handle_tx_event()
2957 * xhci 4.10.2 states isoc endpoints should continue in handle_tx_event()
2961 * xhci 4.9.1 states that if there are errors in mult-TRB in handle_tx_event()
2967 if (td->error_mid_td && in handle_tx_event()
2968 !list_is_last(&td->td_list, &ep_ring->td_list)) { in handle_tx_event()
2971 ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, in handle_tx_event()
2972 td_next->last_trb, ep_trb_dma, false); in handle_tx_event()
2975 xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); in handle_tx_event()
2976 ep_ring->dequeue = td->last_trb; in handle_tx_event()
2977 ep_ring->deq_seg = td->last_trb_seg; in handle_tx_event()
2978 inc_deq(xhci, ep_ring); in handle_tx_event()
2979 xhci_td_cleanup(xhci, td, ep_ring, td->status); in handle_tx_event()
2986 xhci_err(xhci, in handle_tx_event()
2991 trb_in_td(xhci, ep_ring->deq_seg, in handle_tx_event()
2992 ep_ring->dequeue, td->last_trb, in handle_tx_event()
2994 return -ESHUTDOWN; in handle_tx_event()
2998 ep_ring->last_td_was_short = true; in handle_tx_event()
3000 ep_ring->last_td_was_short = false; in handle_tx_event()
3002 if (ep->skip) { in handle_tx_event()
3003 xhci_dbg(xhci, in handle_tx_event()
3006 ep->skip = false; in handle_tx_event()
3009 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / in handle_tx_event()
3016 * No-op TRB could trigger interrupts in a case where in handle_tx_event()
3025 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, in handle_tx_event()
3027 xhci_handle_halted_endpoint(xhci, ep, td, in handle_tx_event()
3032 td->status = status; in handle_tx_event()
3035 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) in handle_tx_event()
3036 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
3037 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) in handle_tx_event()
3038 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
3040 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); in handle_tx_event()
3042 handling_skipped_tds = ep->skip && in handle_tx_event()
3051 inc_deq(xhci, ir->event_ring); in handle_tx_event()
3054 * If ep->skip is set, it means there are missed tds on the in handle_tx_event()
3064 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", in handle_tx_event()
3066 ir->event_ring->deq_seg, in handle_tx_event()
3067 ir->event_ring->dequeue), in handle_tx_event()
3068 lower_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
3069 upper_32_bits(le64_to_cpu(event->buffer)), in handle_tx_event()
3070 le32_to_cpu(event->transfer_len), in handle_tx_event()
3071 le32_to_cpu(event->flags)); in handle_tx_event()
3072 return -ENODEV; in handle_tx_event()
3076 * This function handles all OS-owned events on the event ring. It may drop
3077 * xhci->lock between event processing (e.g. to pass up port status changes).
3081 static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) in xhci_handle_event() argument
3089 if (!ir || !ir->event_ring || !ir->event_ring->dequeue) { in xhci_handle_event()
3090 xhci_err(xhci, "ERROR interrupter not ready\n"); in xhci_handle_event()
3091 return -ENOMEM; in xhci_handle_event()
3094 event = ir->event_ring->dequeue; in xhci_handle_event()
3096 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != in xhci_handle_event()
3097 ir->event_ring->cycle_state) in xhci_handle_event()
3100 trace_xhci_handle_event(ir->event_ring, &event->generic); in xhci_handle_event()
3107 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); in xhci_handle_event()
3112 handle_cmd_completion(xhci, &event->event_cmd); in xhci_handle_event()
3115 handle_port_status(xhci, ir, event); in xhci_handle_event()
3119 ret = handle_tx_event(xhci, ir, &event->trans_event); in xhci_handle_event()
3124 handle_device_notification(xhci, event); in xhci_handle_event()
3128 handle_vendor_event(xhci, event, trb_type); in xhci_handle_event()
3130 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); in xhci_handle_event()
3132 /* Any of the above functions may drop and re-acquire the lock, so check in xhci_handle_event()
3133 * to make sure a watchdog timer didn't mark the host as non-responsive. in xhci_handle_event()
3135 if (xhci->xhc_state & XHCI_STATE_DYING) { in xhci_handle_event()
3136 xhci_dbg(xhci, "xHCI host dying, returning from " in xhci_handle_event()
3143 inc_deq(xhci, ir->event_ring); in xhci_handle_event()
3153 * - When all events have finished
3154 * - To avoid "Event Ring Full Error" condition
3156 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, in xhci_update_erst_dequeue() argument
3164 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3166 if (event_ring_deq != ir->event_ring->dequeue) { in xhci_update_erst_dequeue()
3167 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, in xhci_update_erst_dequeue()
3168 ir->event_ring->dequeue); in xhci_update_erst_dequeue()
3170 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); in xhci_update_erst_dequeue()
3180 temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; in xhci_update_erst_dequeue()
3187 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); in xhci_update_erst_dequeue()
3191 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3197 struct xhci_hcd *xhci = hcd_to_xhci(hcd); in xhci_irq() local
3205 spin_lock(&xhci->lock); in xhci_irq()
3207 status = readl(&xhci->op_regs->status); in xhci_irq()
3209 xhci_hc_died(xhci); in xhci_irq()
3218 xhci_warn(xhci, "WARNING: Host Controller Error\n"); in xhci_irq()
3223 xhci_warn(xhci, "WARNING: Host System Error\n"); in xhci_irq()
3224 xhci_halt(xhci); in xhci_irq()
3231 * so we can receive interrupts from other MSI-X interrupters. in xhci_irq()
3235 writel(status, &xhci->op_regs->status); in xhci_irq()
3238 ir = xhci->interrupters[0]; in xhci_irq()
3239 if (!hcd->msi_enabled) { in xhci_irq()
3241 irq_pending = readl(&ir->ir_set->irq_pending); in xhci_irq()
3243 writel(irq_pending, &ir->ir_set->irq_pending); in xhci_irq()
3246 if (xhci->xhc_state & XHCI_STATE_DYING || in xhci_irq()
3247 xhci->xhc_state & XHCI_STATE_HALTED) { in xhci_irq()
3248 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " in xhci_irq()
3253 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); in xhci_irq()
3254 xhci_write_64(xhci, temp_64 | ERST_EHB, in xhci_irq()
3255 &ir->ir_set->erst_dequeue); in xhci_irq()
3260 event_ring_deq = ir->event_ring->dequeue; in xhci_irq()
3264 while (xhci_handle_event(xhci, ir) > 0) { in xhci_irq()
3267 xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false); in xhci_irq()
3268 event_ring_deq = ir->event_ring->dequeue; in xhci_irq()
3270 /* ring is half-full, force isoc trbs to interrupt more often */ in xhci_irq()
3271 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN) in xhci_irq()
3272 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2; in xhci_irq()
3277 xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true); in xhci_irq()
3281 spin_unlock(&xhci->lock); in xhci_irq()
3295 * Generic function for queueing a TRB on a ring.
3301 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, in queue_trb() argument
3307 trb = &ring->enqueue->generic; in queue_trb()
3308 trb->field[0] = cpu_to_le32(field1); in queue_trb()
3309 trb->field[1] = cpu_to_le32(field2); in queue_trb()
3310 trb->field[2] = cpu_to_le32(field3); in queue_trb()
3313 trb->field[3] = cpu_to_le32(field4); in queue_trb()
3317 inc_enq(xhci, ring, more_trbs_coming); in queue_trb()
3324 static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, in prepare_ring() argument
3337 xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); in prepare_ring()
3338 return -ENOENT; in prepare_ring()
3340 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n"); in prepare_ring()
3342 /* XXX not sure if this should be -ENOENT or not */ in prepare_ring()
3343 return -EINVAL; in prepare_ring()
3345 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n"); in prepare_ring()
3351 xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); in prepare_ring()
3356 return -EINVAL; in prepare_ring()
3359 if (ep_ring != xhci->cmd_ring) { in prepare_ring()
3360 new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs); in prepare_ring()
3361 } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) { in prepare_ring()
3362 xhci_err(xhci, "Do not support expand command ring\n"); in prepare_ring()
3363 return -ENOMEM; in prepare_ring()
3367 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion, in prepare_ring()
3369 if (xhci_ring_expansion(xhci, ep_ring, new_segs, mem_flags)) { in prepare_ring()
3370 xhci_err(xhci, "Ring expansion failed\n"); in prepare_ring()
3371 return -ENOMEM; in prepare_ring()
3375 while (trb_is_link(ep_ring->enqueue)) { in prepare_ring()
3379 if (!xhci_link_trb_quirk(xhci) && in prepare_ring()
3380 !(ep_ring->type == TYPE_ISOC && in prepare_ring()
3381 (xhci->quirks & XHCI_AMD_0x96_HOST))) in prepare_ring()
3382 ep_ring->enqueue->link.control &= in prepare_ring()
3385 ep_ring->enqueue->link.control |= in prepare_ring()
3389 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); in prepare_ring()
3392 if (link_trb_toggles_cycle(ep_ring->enqueue)) in prepare_ring()
3393 ep_ring->cycle_state ^= 1; in prepare_ring()
3395 ep_ring->enq_seg = ep_ring->enq_seg->next; in prepare_ring()
3396 ep_ring->enqueue = ep_ring->enq_seg->trbs; in prepare_ring()
3399 if (link_trb_count++ > ep_ring->num_segs) { in prepare_ring()
3400 xhci_warn(xhci, "Ring is an endless link TRB loop\n"); in prepare_ring()
3401 return -EINVAL; in prepare_ring()
3405 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) { in prepare_ring()
3406 xhci_warn(xhci, "Missing link TRB at end of ring segment\n"); in prepare_ring()
3407 return -EINVAL; in prepare_ring()
3413 static int prepare_transfer(struct xhci_hcd *xhci, in prepare_transfer() argument
3426 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in prepare_transfer()
3428 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, in prepare_transfer()
3431 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", in prepare_transfer()
3433 return -EINVAL; in prepare_transfer()
3436 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in prepare_transfer()
3441 urb_priv = urb->hcpriv; in prepare_transfer()
3442 td = &urb_priv->td[td_index]; in prepare_transfer()
3444 INIT_LIST_HEAD(&td->td_list); in prepare_transfer()
3445 INIT_LIST_HEAD(&td->cancelled_td_list); in prepare_transfer()
3448 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb); in prepare_transfer()
3453 td->urb = urb; in prepare_transfer()
3455 list_add_tail(&td->td_list, &ep_ring->td_list); in prepare_transfer()
3456 td->start_seg = ep_ring->enq_seg; in prepare_transfer()
3457 td->first_trb = ep_ring->enqueue; in prepare_transfer()
3466 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)), in count_trbs()
3476 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length); in count_trbs_needed()
3484 full_len = urb->transfer_buffer_length; in count_sg_trbs_needed()
3486 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { in count_sg_trbs_needed()
3490 full_len -= len; in count_sg_trbs_needed()
3502 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); in count_isoc_trbs_needed()
3503 len = urb->iso_frame_desc[i].length; in count_isoc_trbs_needed()
3510 if (unlikely(running_total != urb->transfer_buffer_length)) in check_trb_math()
3511 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " in check_trb_math()
3514 urb->ep->desc.bEndpointAddress, in check_trb_math()
3516 urb->transfer_buffer_length, in check_trb_math()
3517 urb->transfer_buffer_length); in check_trb_math()
3520 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, in giveback_first_trb() argument
3530 start_trb->field[3] |= cpu_to_le32(start_cycle); in giveback_first_trb()
3532 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); in giveback_first_trb()
3533 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); in giveback_first_trb()
3536 static void check_interval(struct xhci_hcd *xhci, struct urb *urb, in check_interval() argument
3542 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); in check_interval()
3543 ep_interval = urb->interval; in check_interval()
3546 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3547 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3554 dev_dbg_ratelimited(&urb->dev->dev, in check_interval()
3555 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n", in check_interval()
3558 urb->interval = xhci_interval; in check_interval()
3560 if (urb->dev->speed == USB_SPEED_LOW || in check_interval()
3561 urb->dev->speed == USB_SPEED_FULL) in check_interval()
3562 urb->interval /= 8; in check_interval()
3567 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3572 int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_intr_tx() argument
3577 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); in xhci_queue_intr_tx()
3578 check_interval(xhci, urb, ep_ctx); in xhci_queue_intr_tx()
3580 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_intr_tx()
3584 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3593 * TD size = total_packet_count - packets_transferred
3595 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3603 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, in xhci_td_remainder() argument
3609 /* MTK xHCI 0.96 contains some features from 1.0 */ in xhci_td_remainder()
3610 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) in xhci_td_remainder()
3611 return ((td_total_len - transferred) >> 10); in xhci_td_remainder()
3613 /* One TRB with a zero-length data packet. */ in xhci_td_remainder()
3618 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ in xhci_td_remainder()
3619 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) in xhci_td_remainder()
3622 maxp = usb_endpoint_maxp(&urb->ep->desc); in xhci_td_remainder()
3626 return (total_packet_count - ((transferred + trb_buff_len) / maxp)); in xhci_td_remainder()
3630 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, in xhci_align_td() argument
3633 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_align_td()
3639 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_align_td()
3646 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", in xhci_align_td()
3651 *trb_buff_len -= unalign; in xhci_align_td()
3652 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3661 new_buff_len = max_pkt - (enqd_len % max_pkt); in xhci_align_td()
3663 if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) in xhci_align_td()
3664 new_buff_len = (urb->transfer_buffer_length - enqd_len); in xhci_align_td()
3668 if (urb->num_sgs) { in xhci_align_td()
3669 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, in xhci_align_td()
3670 seg->bounce_buf, new_buff_len, enqd_len); in xhci_align_td()
3672 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", in xhci_align_td()
3675 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); in xhci_align_td()
3678 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3681 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, in xhci_align_td()
3685 if (dma_mapping_error(dev, seg->bounce_dma)) { in xhci_align_td()
3687 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); in xhci_align_td()
3691 seg->bounce_len = new_buff_len; in xhci_align_td()
3692 seg->bounce_offs = enqd_len; in xhci_align_td()
3694 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); in xhci_align_td()
3699 /* This is very similar to what ehci-q.c qtd_fill() does */
3700 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_bulk_tx() argument
3718 ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_bulk_tx()
3720 return -EINVAL; in xhci_queue_bulk_tx()
3722 full_len = urb->transfer_buffer_length; in xhci_queue_bulk_tx()
3724 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) { in xhci_queue_bulk_tx()
3725 num_sgs = urb->num_mapped_sgs; in xhci_queue_bulk_tx()
3726 sg = urb->sg; in xhci_queue_bulk_tx()
3732 addr = (u64) urb->transfer_dma; in xhci_queue_bulk_tx()
3735 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3736 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3741 urb_priv = urb->hcpriv; in xhci_queue_bulk_tx()
3743 /* Deal with URB_ZERO_PACKET - need one more td/trb */ in xhci_queue_bulk_tx()
3744 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1) in xhci_queue_bulk_tx()
3747 td = &urb_priv->td[0]; in xhci_queue_bulk_tx()
3754 start_trb = &ring->enqueue->generic; in xhci_queue_bulk_tx()
3755 start_cycle = ring->cycle_state; in xhci_queue_bulk_tx()
3758 /* Queue the TRBs, even if they are zero-length */ in xhci_queue_bulk_tx()
3768 trb_buff_len = full_len - enqd_len; in xhci_queue_bulk_tx()
3776 field |= ring->cycle_state; in xhci_queue_bulk_tx()
3783 if (trb_is_link(ring->enqueue + 1)) { in xhci_queue_bulk_tx()
3784 if (xhci_align_td(xhci, urb, enqd_len, in xhci_queue_bulk_tx()
3786 ring->enq_seg)) { in xhci_queue_bulk_tx()
3787 send_addr = ring->enq_seg->bounce_dma; in xhci_queue_bulk_tx()
3789 td->bounce_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3797 td->last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3798 td->last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3800 memcpy(&send_addr, urb->transfer_buffer, in xhci_queue_bulk_tx()
3812 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, in xhci_queue_bulk_tx()
3819 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, in xhci_queue_bulk_tx()
3824 td->num_trbs++; in xhci_queue_bulk_tx()
3830 --num_sgs; in xhci_queue_bulk_tx()
3831 sent_len -= block_len; in xhci_queue_bulk_tx()
3839 block_len -= sent_len; in xhci_queue_bulk_tx()
3844 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_bulk_tx()
3845 ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3847 urb_priv->td[1].last_trb = ring->enqueue; in xhci_queue_bulk_tx()
3848 urb_priv->td[1].last_trb_seg = ring->enq_seg; in xhci_queue_bulk_tx()
3849 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; in xhci_queue_bulk_tx()
3850 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); in xhci_queue_bulk_tx()
3851 urb_priv->td[1].num_trbs++; in xhci_queue_bulk_tx()
3855 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_bulk_tx()
3860 /* Caller must have locked xhci->lock */
3861 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_ctrl_tx() argument
3874 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); in xhci_queue_ctrl_tx()
3876 return -EINVAL; in xhci_queue_ctrl_tx()
3882 if (!urb->setup_packet) in xhci_queue_ctrl_tx()
3883 return -EINVAL; in xhci_queue_ctrl_tx()
3885 if ((xhci->quirks & XHCI_ETRON_HOST) && in xhci_queue_ctrl_tx()
3886 urb->dev->speed >= USB_SPEED_SUPER) { in xhci_queue_ctrl_tx()
3892 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue + 1)) { in xhci_queue_ctrl_tx()
3893 field = TRB_TYPE(TRB_TR_NOOP) | ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3894 queue_trb(xhci, ep_ring, false, 0, 0, in xhci_queue_ctrl_tx()
3906 if (urb->transfer_buffer_length > 0) in xhci_queue_ctrl_tx()
3908 ret = prepare_transfer(xhci, xhci->devs[slot_id], in xhci_queue_ctrl_tx()
3909 ep_index, urb->stream_id, in xhci_queue_ctrl_tx()
3914 urb_priv = urb->hcpriv; in xhci_queue_ctrl_tx()
3915 td = &urb_priv->td[0]; in xhci_queue_ctrl_tx()
3916 td->num_trbs = num_trbs; in xhci_queue_ctrl_tx()
3923 start_trb = &ep_ring->enqueue->generic; in xhci_queue_ctrl_tx()
3924 start_cycle = ep_ring->cycle_state; in xhci_queue_ctrl_tx()
3926 /* Queue setup TRB - see section 6.4.1.2.1 */ in xhci_queue_ctrl_tx()
3928 setup = (struct usb_ctrlrequest *) urb->setup_packet; in xhci_queue_ctrl_tx()
3934 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */ in xhci_queue_ctrl_tx()
3935 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) { in xhci_queue_ctrl_tx()
3936 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3937 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3944 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3945 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16, in xhci_queue_ctrl_tx()
3946 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16, in xhci_queue_ctrl_tx()
3958 if (urb->transfer_buffer_length > 0) { in xhci_queue_ctrl_tx()
3963 memcpy(&addr, urb->transfer_buffer, in xhci_queue_ctrl_tx()
3964 urb->transfer_buffer_length); in xhci_queue_ctrl_tx()
3968 addr = (u64) urb->transfer_dma; in xhci_queue_ctrl_tx()
3971 remainder = xhci_td_remainder(xhci, 0, in xhci_queue_ctrl_tx()
3972 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3973 urb->transfer_buffer_length, in xhci_queue_ctrl_tx()
3975 length_field = TRB_LEN(urb->transfer_buffer_length) | in xhci_queue_ctrl_tx()
3978 if (setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3980 queue_trb(xhci, ep_ring, true, in xhci_queue_ctrl_tx()
3984 field | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
3988 td->last_trb = ep_ring->enqueue; in xhci_queue_ctrl_tx()
3989 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_ctrl_tx()
3991 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ in xhci_queue_ctrl_tx()
3993 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) in xhci_queue_ctrl_tx()
3997 queue_trb(xhci, ep_ring, false, in xhci_queue_ctrl_tx()
4002 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); in xhci_queue_ctrl_tx()
4004 giveback_first_trb(xhci, slot_id, ep_index, 0, in xhci_queue_ctrl_tx()
4015 * zero. Only xHCI 1.0 host controllers support this field.
4017 static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci, in xhci_get_burst_count() argument
4022 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER) in xhci_get_burst_count()
4025 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_burst_count()
4026 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1; in xhci_get_burst_count()
4037 static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci, in xhci_get_last_burst_packet_count() argument
4043 if (xhci->hci_version < 0x100) in xhci_get_last_burst_packet_count()
4046 if (urb->dev->speed >= USB_SPEED_SUPER) { in xhci_get_last_burst_packet_count()
4048 max_burst = urb->ep->ss_ep_comp.bMaxBurst; in xhci_get_last_burst_packet_count()
4051 * number of packets, but the TLBPC field is zero-based. in xhci_get_last_burst_packet_count()
4055 return residue - 1; in xhci_get_last_burst_packet_count()
4059 return total_packet_count - 1; in xhci_get_last_burst_packet_count()
4069 static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, in xhci_get_isoc_frame_id() argument
4075 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
4076 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
4077 start_frame = urb->start_frame + index * urb->interval; in xhci_get_isoc_frame_id()
4079 start_frame = (urb->start_frame + index * urb->interval) >> 3; in xhci_get_isoc_frame_id()
4089 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_get_isoc_frame_id()
4090 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_get_isoc_frame_id()
4106 current_frame_id = readl(&xhci->run_regs->microframe_index); in xhci_get_isoc_frame_id()
4114 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n", in xhci_get_isoc_frame_id()
4115 __func__, index, readl(&xhci->run_regs->microframe_index), in xhci_get_isoc_frame_id()
4121 ret = -EINVAL; in xhci_get_isoc_frame_id()
4125 ret = -EINVAL; in xhci_get_isoc_frame_id()
4127 ret = -EINVAL; in xhci_get_isoc_frame_id()
4131 if (ret == -EINVAL || start_frame == start_frame_id) { in xhci_get_isoc_frame_id()
4133 if (urb->dev->speed == USB_SPEED_LOW || in xhci_get_isoc_frame_id()
4134 urb->dev->speed == USB_SPEED_FULL) in xhci_get_isoc_frame_id()
4135 urb->start_frame = start_frame; in xhci_get_isoc_frame_id()
4137 urb->start_frame = start_frame << 3; in xhci_get_isoc_frame_id()
4143 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n", in xhci_get_isoc_frame_id()
4146 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n"); in xhci_get_isoc_frame_id()
4154 static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) in trb_block_event_intr() argument
4156 if (xhci->hci_version < 0x100) in trb_block_event_intr()
4159 if (i == num_tds - 1) in trb_block_event_intr()
4165 if (i && xhci->quirks & XHCI_AVOID_BEI) in trb_block_event_intr()
4166 return !!(i % xhci->isoc_bei_interval); in trb_block_event_intr()
4172 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx() argument
4190 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx()
4191 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; in xhci_queue_isoc_tx()
4193 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx()
4195 xhci_dbg(xhci, "Isoc URB with zero packets?\n"); in xhci_queue_isoc_tx()
4196 return -EINVAL; in xhci_queue_isoc_tx()
4198 start_addr = (u64) urb->transfer_dma; in xhci_queue_isoc_tx()
4199 start_trb = &ep_ring->enqueue->generic; in xhci_queue_isoc_tx()
4200 start_cycle = ep_ring->cycle_state; in xhci_queue_isoc_tx()
4202 urb_priv = urb->hcpriv; in xhci_queue_isoc_tx()
4203 /* Queue the TRBs for each TD, even if they are zero-length */ in xhci_queue_isoc_tx()
4211 addr = start_addr + urb->iso_frame_desc[i].offset; in xhci_queue_isoc_tx()
4212 td_len = urb->iso_frame_desc[i].length; in xhci_queue_isoc_tx()
4214 max_pkt = usb_endpoint_maxp(&urb->ep->desc); in xhci_queue_isoc_tx()
4217 /* A zero-length transfer still involves at least one packet. */ in xhci_queue_isoc_tx()
4220 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count); in xhci_queue_isoc_tx()
4221 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci, in xhci_queue_isoc_tx()
4226 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, in xhci_queue_isoc_tx()
4227 urb->stream_id, trbs_per_td, urb, i, mem_flags); in xhci_queue_isoc_tx()
4233 td = &urb_priv->td[i]; in xhci_queue_isoc_tx()
4234 td->num_trbs = trbs_per_td; in xhci_queue_isoc_tx()
4237 if (!(urb->transfer_flags & URB_ISO_ASAP) && in xhci_queue_isoc_tx()
4238 HCC_CFC(xhci->hcc_params)) { in xhci_queue_isoc_tx()
4239 frame_id = xhci_get_isoc_frame_id(xhci, urb, i); in xhci_queue_isoc_tx()
4251 (i ? ep_ring->cycle_state : !start_cycle); in xhci_queue_isoc_tx()
4253 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */ in xhci_queue_isoc_tx()
4254 if (!xep->use_extended_tbc) in xhci_queue_isoc_tx()
4264 ep_ring->cycle_state; in xhci_queue_isoc_tx()
4271 if (j < trbs_per_td - 1) { in xhci_queue_isoc_tx()
4276 td->last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4277 td->last_trb_seg = ep_ring->enq_seg; in xhci_queue_isoc_tx()
4279 if (trb_block_event_intr(xhci, num_tds, i)) in xhci_queue_isoc_tx()
4288 remainder = xhci_td_remainder(xhci, running_total, in xhci_queue_isoc_tx()
4295 /* xhci 1.1 with ETE uses TD Size field for TBC */ in xhci_queue_isoc_tx()
4296 if (first_trb && xep->use_extended_tbc) in xhci_queue_isoc_tx()
4302 queue_trb(xhci, ep_ring, more_trbs_coming, in xhci_queue_isoc_tx()
4310 td_remain_len -= trb_buff_len; in xhci_queue_isoc_tx()
4315 xhci_err(xhci, "ISOC TD length unmatch\n"); in xhci_queue_isoc_tx()
4316 ret = -EINVAL; in xhci_queue_isoc_tx()
4322 if (HCC_CFC(xhci->hcc_params)) in xhci_queue_isoc_tx()
4323 xep->next_frame_id = urb->start_frame + num_tds * urb->interval; in xhci_queue_isoc_tx()
4325 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) { in xhci_queue_isoc_tx()
4326 if (xhci->quirks & XHCI_AMD_PLL_FIX) in xhci_queue_isoc_tx()
4329 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++; in xhci_queue_isoc_tx()
4331 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, in xhci_queue_isoc_tx()
4337 for (i--; i >= 0; i--) in xhci_queue_isoc_tx()
4338 list_del_init(&urb_priv->td[i].td_list); in xhci_queue_isoc_tx()
4341 * into No-ops with a software-owned cycle bit. That way the hardware in xhci_queue_isoc_tx()
4343 * overwrite them. td->first_trb and td->start_seg are already set. in xhci_queue_isoc_tx()
4345 urb_priv->td[0].last_trb = ep_ring->enqueue; in xhci_queue_isoc_tx()
4347 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true); in xhci_queue_isoc_tx()
4350 ep_ring->enqueue = urb_priv->td[0].first_trb; in xhci_queue_isoc_tx()
4351 ep_ring->enq_seg = urb_priv->td[0].start_seg; in xhci_queue_isoc_tx()
4352 ep_ring->cycle_state = start_cycle; in xhci_queue_isoc_tx()
4353 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); in xhci_queue_isoc_tx()
4360 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4361 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4364 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, in xhci_queue_isoc_tx_prepare() argument
4376 xdev = xhci->devs[slot_id]; in xhci_queue_isoc_tx_prepare()
4377 xep = &xhci->devs[slot_id]->eps[ep_index]; in xhci_queue_isoc_tx_prepare()
4378 ep_ring = xdev->eps[ep_index].ring; in xhci_queue_isoc_tx_prepare()
4379 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); in xhci_queue_isoc_tx_prepare()
4382 num_tds = urb->number_of_packets; in xhci_queue_isoc_tx_prepare()
4389 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), in xhci_queue_isoc_tx_prepare()
4398 check_interval(xhci, urb, ep_ctx); in xhci_queue_isoc_tx_prepare()
4400 /* Calculate the start frame and put it in urb->start_frame. */ in xhci_queue_isoc_tx_prepare()
4401 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { in xhci_queue_isoc_tx_prepare()
4403 urb->start_frame = xep->next_frame_id; in xhci_queue_isoc_tx_prepare()
4408 start_frame = readl(&xhci->run_regs->microframe_index); in xhci_queue_isoc_tx_prepare()
4414 ist = HCS_IST(xhci->hcs_params2) & 0x7; in xhci_queue_isoc_tx_prepare()
4415 if (HCS_IST(xhci->hcs_params2) & (1 << 3)) in xhci_queue_isoc_tx_prepare()
4424 if (urb->dev->speed == USB_SPEED_LOW || in xhci_queue_isoc_tx_prepare()
4425 urb->dev->speed == USB_SPEED_FULL) { in xhci_queue_isoc_tx_prepare()
4426 start_frame = roundup(start_frame, urb->interval << 3); in xhci_queue_isoc_tx_prepare()
4427 urb->start_frame = start_frame >> 3; in xhci_queue_isoc_tx_prepare()
4429 start_frame = roundup(start_frame, urb->interval); in xhci_queue_isoc_tx_prepare()
4430 urb->start_frame = start_frame; in xhci_queue_isoc_tx_prepare()
4435 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index); in xhci_queue_isoc_tx_prepare()
4440 /* Generic function for queueing a command TRB on the command ring.
4445 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4448 static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in queue_command() argument
4452 int reserved_trbs = xhci->cmd_ring_reserved_trbs; in queue_command()
4455 if ((xhci->xhc_state & XHCI_STATE_DYING) || in queue_command()
4456 (xhci->xhc_state & XHCI_STATE_HALTED)) { in queue_command()
4457 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); in queue_command()
4458 return -ESHUTDOWN; in queue_command()
4464 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING, in queue_command()
4467 xhci_err(xhci, "ERR: No room for command on command ring\n"); in queue_command()
4469 xhci_err(xhci, "ERR: Reserved TRB counting for " in queue_command()
4474 cmd->command_trb = xhci->cmd_ring->enqueue; in queue_command()
4477 if (list_empty(&xhci->cmd_list)) { in queue_command()
4478 xhci->current_cmd = cmd; in queue_command()
4479 xhci_mod_cmd_timer(xhci); in queue_command()
4482 list_add_tail(&cmd->cmd_list, &xhci->cmd_list); in queue_command()
4484 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, in queue_command()
4485 field4 | xhci->cmd_ring->cycle_state); in queue_command()
4490 int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_slot_control() argument
4493 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_slot_control()
4498 int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_address_device() argument
4501 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_address_device()
4507 int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_vendor_command() argument
4510 return queue_command(xhci, cmd, field1, field2, field3, field4, false); in xhci_queue_vendor_command()
4514 int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_device() argument
4517 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_device()
4523 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, in xhci_queue_configure_endpoint() argument
4527 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_configure_endpoint()
4534 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_evaluate_context() argument
4537 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), in xhci_queue_evaluate_context()
4547 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_stop_endpoint() argument
4555 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_stop_endpoint()
4559 int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, in xhci_queue_reset_ep() argument
4570 return queue_command(xhci, cmd, 0, 0, 0, in xhci_queue_reset_ep()