xref: /openbmc/u-boot/drivers/usb/host/xhci-ring.c (revision 83d290c56fab2d38cd1ab4c4cc7099559c1d5046)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * USB HOST XHCI Controller stack
4   *
5   * Based on xHCI host controller driver in linux-kernel
6   * by Sarah Sharp.
7   *
8   * Copyright (C) 2008 Intel Corp.
9   * Author: Sarah Sharp
10   *
11   * Copyright (C) 2013 Samsung Electronics Co.Ltd
12   * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13   *	    Vikas Sajjan <vikas.sajjan@samsung.com>
14   */
15  
16  #include <common.h>
17  #include <asm/byteorder.h>
18  #include <usb.h>
19  #include <asm/unaligned.h>
20  #include <linux/errno.h>
21  
22  #include "xhci.h"
23  
24  /**
25   * Is this TRB a link TRB or was the last TRB the last TRB in this event ring
26   * segment?  I.e. would the updated event TRB pointer step off the end of the
27   * event seg ?
28   *
29   * @param ctrl	Host controller data structure
30   * @param ring	pointer to the ring
31   * @param seg	poniter to the segment to which TRB belongs
32   * @param trb	poniter to the ring trb
33   * @return 1 if this TRB a link TRB else 0
34   */
last_trb(struct xhci_ctrl * ctrl,struct xhci_ring * ring,struct xhci_segment * seg,union xhci_trb * trb)35  static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
36  			struct xhci_segment *seg, union xhci_trb *trb)
37  {
38  	if (ring == ctrl->event_ring)
39  		return trb == &seg->trbs[TRBS_PER_SEGMENT];
40  	else
41  		return TRB_TYPE_LINK_LE32(trb->link.control);
42  }
43  
44  /**
45   * Does this link TRB point to the first segment in a ring,
46   * or was the previous TRB the last TRB on the last segment in the ERST?
47   *
48   * @param ctrl	Host controller data structure
49   * @param ring	pointer to the ring
50   * @param seg	poniter to the segment to which TRB belongs
51   * @param trb	poniter to the ring trb
52   * @return 1 if this TRB is the last TRB on the last segment else 0
53   */
last_trb_on_last_seg(struct xhci_ctrl * ctrl,struct xhci_ring * ring,struct xhci_segment * seg,union xhci_trb * trb)54  static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
55  				 struct xhci_ring *ring,
56  				 struct xhci_segment *seg,
57  				 union xhci_trb *trb)
58  {
59  	if (ring == ctrl->event_ring)
60  		return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
61  			(seg->next == ring->first_seg));
62  	else
63  		return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
64  }
65  
66  /**
67   * See Cycle bit rules. SW is the consumer for the event ring only.
68   * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
69   *
70   * If we've just enqueued a TRB that is in the middle of a TD (meaning the
71   * chain bit is set), then set the chain bit in all the following link TRBs.
72   * If we've enqueued the last TRB in a TD, make sure the following link TRBs
73   * have their chain bit cleared (so that each Link TRB is a separate TD).
74   *
75   * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
76   * set, but other sections talk about dealing with the chain bit set.  This was
77   * fixed in the 0.96 specification errata, but we have to assume that all 0.95
78   * xHCI hardware can't handle the chain bit being cleared on a link TRB.
79   *
80   * @param ctrl	Host controller data structure
81   * @param ring	pointer to the ring
82   * @param more_trbs_coming	flag to indicate whether more trbs
83   *				are expected or NOT.
84   *				Will you enqueue more TRBs before calling
85   *				prepare_ring()?
86   * @return none
87   */
inc_enq(struct xhci_ctrl * ctrl,struct xhci_ring * ring,bool more_trbs_coming)88  static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
89  						bool more_trbs_coming)
90  {
91  	u32 chain;
92  	union xhci_trb *next;
93  
94  	chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
95  	next = ++(ring->enqueue);
96  
97  	/*
98  	 * Update the dequeue pointer further if that was a link TRB or we're at
99  	 * the end of an event ring segment (which doesn't have link TRBS)
100  	 */
101  	while (last_trb(ctrl, ring, ring->enq_seg, next)) {
102  		if (ring != ctrl->event_ring) {
103  			/*
104  			 * If the caller doesn't plan on enqueueing more
105  			 * TDs before ringing the doorbell, then we
106  			 * don't want to give the link TRB to the
107  			 * hardware just yet.  We'll give the link TRB
108  			 * back in prepare_ring() just before we enqueue
109  			 * the TD at the top of the ring.
110  			 */
111  			if (!chain && !more_trbs_coming)
112  				break;
113  
114  			/*
115  			 * If we're not dealing with 0.95 hardware or
116  			 * isoc rings on AMD 0.96 host,
117  			 * carry over the chain bit of the previous TRB
118  			 * (which may mean the chain bit is cleared).
119  			 */
120  			next->link.control &= cpu_to_le32(~TRB_CHAIN);
121  			next->link.control |= cpu_to_le32(chain);
122  
123  			next->link.control ^= cpu_to_le32(TRB_CYCLE);
124  			xhci_flush_cache((uintptr_t)next,
125  					 sizeof(union xhci_trb));
126  		}
127  		/* Toggle the cycle bit after the last ring segment. */
128  		if (last_trb_on_last_seg(ctrl, ring,
129  					ring->enq_seg, next))
130  			ring->cycle_state = (ring->cycle_state ? 0 : 1);
131  
132  		ring->enq_seg = ring->enq_seg->next;
133  		ring->enqueue = ring->enq_seg->trbs;
134  		next = ring->enqueue;
135  	}
136  }
137  
138  /**
139   * See Cycle bit rules. SW is the consumer for the event ring only.
140   * Don't make a ring full of link TRBs.  That would be dumb and this would loop.
141   *
142   * @param ctrl	Host controller data structure
143   * @param ring	Ring whose Dequeue TRB pointer needs to be incremented.
144   * return none
145   */
inc_deq(struct xhci_ctrl * ctrl,struct xhci_ring * ring)146  static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
147  {
148  	do {
149  		/*
150  		 * Update the dequeue pointer further if that was a link TRB or
151  		 * we're at the end of an event ring segment (which doesn't have
152  		 * link TRBS)
153  		 */
154  		if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
155  			if (ring == ctrl->event_ring &&
156  					last_trb_on_last_seg(ctrl, ring,
157  						ring->deq_seg, ring->dequeue)) {
158  				ring->cycle_state = (ring->cycle_state ? 0 : 1);
159  			}
160  			ring->deq_seg = ring->deq_seg->next;
161  			ring->dequeue = ring->deq_seg->trbs;
162  		} else {
163  			ring->dequeue++;
164  		}
165  	} while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
166  }
167  
168  /**
169   * Generic function for queueing a TRB on a ring.
170   * The caller must have checked to make sure there's room on the ring.
171   *
172   * @param	more_trbs_coming:   Will you enqueue more TRBs before calling
173   *				prepare_ring()?
174   * @param ctrl	Host controller data structure
175   * @param ring	pointer to the ring
176   * @param more_trbs_coming	flag to indicate whether more trbs
177   * @param trb_fields	pointer to trb field array containing TRB contents
178   * @return pointer to the enqueued trb
179   */
queue_trb(struct xhci_ctrl * ctrl,struct xhci_ring * ring,bool more_trbs_coming,unsigned int * trb_fields)180  static struct xhci_generic_trb *queue_trb(struct xhci_ctrl *ctrl,
181  					  struct xhci_ring *ring,
182  					  bool more_trbs_coming,
183  					  unsigned int *trb_fields)
184  {
185  	struct xhci_generic_trb *trb;
186  	int i;
187  
188  	trb = &ring->enqueue->generic;
189  
190  	for (i = 0; i < 4; i++)
191  		trb->field[i] = cpu_to_le32(trb_fields[i]);
192  
193  	xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
194  
195  	inc_enq(ctrl, ring, more_trbs_coming);
196  
197  	return trb;
198  }
199  
200  /**
201   * Does various checks on the endpoint ring, and makes it ready
202   * to queue num_trbs.
203   *
204   * @param ctrl		Host controller data structure
205   * @param ep_ring	pointer to the EP Transfer Ring
206   * @param ep_state	State of the End Point
207   * @return error code in case of invalid ep_state, 0 on success
208   */
prepare_ring(struct xhci_ctrl * ctrl,struct xhci_ring * ep_ring,u32 ep_state)209  static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
210  							u32 ep_state)
211  {
212  	union xhci_trb *next = ep_ring->enqueue;
213  
214  	/* Make sure the endpoint has been added to xHC schedule */
215  	switch (ep_state) {
216  	case EP_STATE_DISABLED:
217  		/*
218  		 * USB core changed config/interfaces without notifying us,
219  		 * or hardware is reporting the wrong state.
220  		 */
221  		puts("WARN urb submitted to disabled ep\n");
222  		return -ENOENT;
223  	case EP_STATE_ERROR:
224  		puts("WARN waiting for error on ep to be cleared\n");
225  		return -EINVAL;
226  	case EP_STATE_HALTED:
227  		puts("WARN halted endpoint, queueing URB anyway.\n");
228  	case EP_STATE_STOPPED:
229  	case EP_STATE_RUNNING:
230  		debug("EP STATE RUNNING.\n");
231  		break;
232  	default:
233  		puts("ERROR unknown endpoint state for ep\n");
234  		return -EINVAL;
235  	}
236  
237  	while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
238  		/*
239  		 * If we're not dealing with 0.95 hardware or isoc rings
240  		 * on AMD 0.96 host, clear the chain bit.
241  		 */
242  		next->link.control &= cpu_to_le32(~TRB_CHAIN);
243  
244  		next->link.control ^= cpu_to_le32(TRB_CYCLE);
245  
246  		xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
247  
248  		/* Toggle the cycle bit after the last ring segment. */
249  		if (last_trb_on_last_seg(ctrl, ep_ring,
250  					ep_ring->enq_seg, next))
251  			ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
252  		ep_ring->enq_seg = ep_ring->enq_seg->next;
253  		ep_ring->enqueue = ep_ring->enq_seg->trbs;
254  		next = ep_ring->enqueue;
255  	}
256  
257  	return 0;
258  }
259  
260  /**
261   * Generic function for queueing a command TRB on the command ring.
262   * Check to make sure there's room on the command ring for one command TRB.
263   *
264   * @param ctrl		Host controller data structure
265   * @param ptr		Pointer address to write in the first two fields (opt.)
266   * @param slot_id	Slot ID to encode in the flags field (opt.)
267   * @param ep_index	Endpoint index to encode in the flags field (opt.)
268   * @param cmd		Command type to enqueue
269   * @return none
270   */
xhci_queue_command(struct xhci_ctrl * ctrl,u8 * ptr,u32 slot_id,u32 ep_index,trb_type cmd)271  void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr, u32 slot_id,
272  			u32 ep_index, trb_type cmd)
273  {
274  	u32 fields[4];
275  	u64 val_64 = (uintptr_t)ptr;
276  
277  	BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
278  
279  	fields[0] = lower_32_bits(val_64);
280  	fields[1] = upper_32_bits(val_64);
281  	fields[2] = 0;
282  	fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
283  		    ctrl->cmd_ring->cycle_state;
284  
285  	/*
286  	 * Only 'reset endpoint', 'stop endpoint' and 'set TR dequeue pointer'
287  	 * commands need endpoint id encoded.
288  	 */
289  	if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
290  		fields[3] |= EP_ID_FOR_TRB(ep_index);
291  
292  	queue_trb(ctrl, ctrl->cmd_ring, false, fields);
293  
294  	/* Ring the command ring doorbell */
295  	xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
296  }
297  
298  /**
299   * The TD size is the number of bytes remaining in the TD (including this TRB),
300   * right shifted by 10.
301   * It must fit in bits 21:17, so it can't be bigger than 31.
302   *
303   * @param remainder	remaining packets to be sent
304   * @return remainder if remainder is less than max else max
305   */
xhci_td_remainder(unsigned int remainder)306  static u32 xhci_td_remainder(unsigned int remainder)
307  {
308  	u32 max = (1 << (21 - 17 + 1)) - 1;
309  
310  	if ((remainder >> 10) >= max)
311  		return max << 17;
312  	else
313  		return (remainder >> 10) << 17;
314  }
315  
316  /**
317   * Finds out the remanining packets to be sent
318   *
319   * @param running_total	total size sent so far
320   * @param trb_buff_len	length of the TRB Buffer
321   * @param total_packet_count	total packet count
322   * @param maxpacketsize		max packet size of current pipe
323   * @param num_trbs_left		number of TRBs left to be processed
324   * @return 0 if running_total or trb_buff_len is 0, else remainder
325   */
xhci_v1_0_td_remainder(int running_total,int trb_buff_len,unsigned int total_packet_count,int maxpacketsize,unsigned int num_trbs_left)326  static u32 xhci_v1_0_td_remainder(int running_total,
327  				int trb_buff_len,
328  				unsigned int total_packet_count,
329  				int maxpacketsize,
330  				unsigned int num_trbs_left)
331  {
332  	int packets_transferred;
333  
334  	/* One TRB with a zero-length data packet. */
335  	if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
336  		return 0;
337  
338  	/*
339  	 * All the TRB queueing functions don't count the current TRB in
340  	 * running_total.
341  	 */
342  	packets_transferred = (running_total + trb_buff_len) / maxpacketsize;
343  
344  	if ((total_packet_count - packets_transferred) > 31)
345  		return 31 << 17;
346  	return (total_packet_count - packets_transferred) << 17;
347  }
348  
349  /**
350   * Ring the doorbell of the End Point
351   *
352   * @param udev		pointer to the USB device structure
353   * @param ep_index	index of the endpoint
354   * @param start_cycle	cycle flag of the first TRB
355   * @param start_trb	pionter to the first TRB
356   * @return none
357   */
giveback_first_trb(struct usb_device * udev,int ep_index,int start_cycle,struct xhci_generic_trb * start_trb)358  static void giveback_first_trb(struct usb_device *udev, int ep_index,
359  				int start_cycle,
360  				struct xhci_generic_trb *start_trb)
361  {
362  	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
363  
364  	/*
365  	 * Pass all the TRBs to the hardware at once and make sure this write
366  	 * isn't reordered.
367  	 */
368  	if (start_cycle)
369  		start_trb->field[3] |= cpu_to_le32(start_cycle);
370  	else
371  		start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
372  
373  	xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
374  
375  	/* Ringing EP doorbell here */
376  	xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
377  				DB_VALUE(ep_index, 0));
378  
379  	return;
380  }
381  
382  /**** POLLING mechanism for XHCI ****/
383  
384  /**
385   * Finalizes a handled event TRB by advancing our dequeue pointer and giving
386   * the TRB back to the hardware for recycling. Must call this exactly once at
387   * the end of each event handler, and not touch the TRB again afterwards.
388   *
389   * @param ctrl	Host controller data structure
390   * @return none
391   */
xhci_acknowledge_event(struct xhci_ctrl * ctrl)392  void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
393  {
394  	/* Advance our dequeue pointer to the next event */
395  	inc_deq(ctrl, ctrl->event_ring);
396  
397  	/* Inform the hardware */
398  	xhci_writeq(&ctrl->ir_set->erst_dequeue,
399  		(uintptr_t)ctrl->event_ring->dequeue | ERST_EHB);
400  }
401  
402  /**
403   * Checks if there is a new event to handle on the event ring.
404   *
405   * @param ctrl	Host controller data structure
406   * @return 0 if failure else 1 on success
407   */
event_ready(struct xhci_ctrl * ctrl)408  static int event_ready(struct xhci_ctrl *ctrl)
409  {
410  	union xhci_trb *event;
411  
412  	xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
413  			 sizeof(union xhci_trb));
414  
415  	event = ctrl->event_ring->dequeue;
416  
417  	/* Does the HC or OS own the TRB? */
418  	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
419  		ctrl->event_ring->cycle_state)
420  		return 0;
421  
422  	return 1;
423  }
424  
425  /**
426   * Waits for a specific type of event and returns it. Discards unexpected
427   * events. Caller *must* call xhci_acknowledge_event() after it is finished
428   * processing the event, and must not access the returned pointer afterwards.
429   *
430   * @param ctrl		Host controller data structure
431   * @param expected	TRB type expected from Event TRB
432   * @return pointer to event trb
433   */
xhci_wait_for_event(struct xhci_ctrl * ctrl,trb_type expected)434  union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
435  {
436  	trb_type type;
437  	unsigned long ts = get_timer(0);
438  
439  	do {
440  		union xhci_trb *event = ctrl->event_ring->dequeue;
441  
442  		if (!event_ready(ctrl))
443  			continue;
444  
445  		type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
446  		if (type == expected)
447  			return event;
448  
449  		if (type == TRB_PORT_STATUS)
450  		/* TODO: remove this once enumeration has been reworked */
451  			/*
452  			 * Port status change events always have a
453  			 * successful completion code
454  			 */
455  			BUG_ON(GET_COMP_CODE(
456  				le32_to_cpu(event->generic.field[2])) !=
457  								COMP_SUCCESS);
458  		else
459  			printf("Unexpected XHCI event TRB, skipping... "
460  				"(%08x %08x %08x %08x)\n",
461  				le32_to_cpu(event->generic.field[0]),
462  				le32_to_cpu(event->generic.field[1]),
463  				le32_to_cpu(event->generic.field[2]),
464  				le32_to_cpu(event->generic.field[3]));
465  
466  		xhci_acknowledge_event(ctrl);
467  	} while (get_timer(ts) < XHCI_TIMEOUT);
468  
469  	if (expected == TRB_TRANSFER)
470  		return NULL;
471  
472  	printf("XHCI timeout on event type %d... cannot recover.\n", expected);
473  	BUG();
474  }
475  
476  /*
477   * Stops transfer processing for an endpoint and throws away all unprocessed
478   * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
479   * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
480   * ring the doorbell, causing this endpoint to start working again.
481   * (Careful: This will BUG() when there was no transfer in progress. Shouldn't
482   * happen in practice for current uses and is too complicated to fix right now.)
483   */
abort_td(struct usb_device * udev,int ep_index)484  static void abort_td(struct usb_device *udev, int ep_index)
485  {
486  	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
487  	struct xhci_ring *ring =  ctrl->devs[udev->slot_id]->eps[ep_index].ring;
488  	union xhci_trb *event;
489  	u32 field;
490  
491  	xhci_queue_command(ctrl, NULL, udev->slot_id, ep_index, TRB_STOP_RING);
492  
493  	event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
494  	field = le32_to_cpu(event->trans_event.flags);
495  	BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
496  	BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
497  	BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
498  		!= COMP_STOP)));
499  	xhci_acknowledge_event(ctrl);
500  
501  	event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
502  	BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
503  		!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
504  		event->event_cmd.status)) != COMP_SUCCESS);
505  	xhci_acknowledge_event(ctrl);
506  
507  	xhci_queue_command(ctrl, (void *)((uintptr_t)ring->enqueue |
508  		ring->cycle_state), udev->slot_id, ep_index, TRB_SET_DEQ);
509  	event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
510  	BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
511  		!= udev->slot_id || GET_COMP_CODE(le32_to_cpu(
512  		event->event_cmd.status)) != COMP_SUCCESS);
513  	xhci_acknowledge_event(ctrl);
514  }
515  
record_transfer_result(struct usb_device * udev,union xhci_trb * event,int length)516  static void record_transfer_result(struct usb_device *udev,
517  				   union xhci_trb *event, int length)
518  {
519  	udev->act_len = min(length, length -
520  		(int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
521  
522  	switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
523  	case COMP_SUCCESS:
524  		BUG_ON(udev->act_len != length);
525  		/* fallthrough */
526  	case COMP_SHORT_TX:
527  		udev->status = 0;
528  		break;
529  	case COMP_STALL:
530  		udev->status = USB_ST_STALLED;
531  		break;
532  	case COMP_DB_ERR:
533  	case COMP_TRB_ERR:
534  		udev->status = USB_ST_BUF_ERR;
535  		break;
536  	case COMP_BABBLE:
537  		udev->status = USB_ST_BABBLE_DET;
538  		break;
539  	default:
540  		udev->status = 0x80;  /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
541  	}
542  }
543  
544  /**** Bulk and Control transfer methods ****/
545  /**
546   * Queues up the BULK Request
547   *
548   * @param udev		pointer to the USB device structure
549   * @param pipe		contains the DIR_IN or OUT , devnum
550   * @param length	length of the buffer
551   * @param buffer	buffer to be read/written based on the request
552   * @return returns 0 if successful else -1 on failure
553   */
xhci_bulk_tx(struct usb_device * udev,unsigned long pipe,int length,void * buffer)554  int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
555  			int length, void *buffer)
556  {
557  	int num_trbs = 0;
558  	struct xhci_generic_trb *start_trb;
559  	bool first_trb = false;
560  	int start_cycle;
561  	u32 field = 0;
562  	u32 length_field = 0;
563  	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
564  	int slot_id = udev->slot_id;
565  	int ep_index;
566  	struct xhci_virt_device *virt_dev;
567  	struct xhci_ep_ctx *ep_ctx;
568  	struct xhci_ring *ring;		/* EP transfer ring */
569  	union xhci_trb *event;
570  
571  	int running_total, trb_buff_len;
572  	unsigned int total_packet_count;
573  	int maxpacketsize;
574  	u64 addr;
575  	int ret;
576  	u32 trb_fields[4];
577  	u64 val_64 = (uintptr_t)buffer;
578  
579  	debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
580  		udev, pipe, buffer, length);
581  
582  	ep_index = usb_pipe_ep_index(pipe);
583  	virt_dev = ctrl->devs[slot_id];
584  
585  	xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
586  			 virt_dev->out_ctx->size);
587  
588  	ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
589  
590  	ring = virt_dev->eps[ep_index].ring;
591  	/*
592  	 * How much data is (potentially) left before the 64KB boundary?
593  	 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
594  	 * that the buffer should not span 64KB boundary. if so
595  	 * we send request in more than 1 TRB by chaining them.
596  	 */
597  	running_total = TRB_MAX_BUFF_SIZE -
598  			(lower_32_bits(val_64) & (TRB_MAX_BUFF_SIZE - 1));
599  	trb_buff_len = running_total;
600  	running_total &= TRB_MAX_BUFF_SIZE - 1;
601  
602  	/*
603  	 * If there's some data on this 64KB chunk, or we have to send a
604  	 * zero-length transfer, we need at least one TRB
605  	 */
606  	if (running_total != 0 || length == 0)
607  		num_trbs++;
608  
609  	/* How many more 64KB chunks to transfer, how many more TRBs? */
610  	while (running_total < length) {
611  		num_trbs++;
612  		running_total += TRB_MAX_BUFF_SIZE;
613  	}
614  
615  	/*
616  	 * XXX: Calling routine prepare_ring() called in place of
617  	 * prepare_trasfer() as there in 'Linux' since we are not
618  	 * maintaining multiple TDs/transfer at the same time.
619  	 */
620  	ret = prepare_ring(ctrl, ring,
621  			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
622  	if (ret < 0)
623  		return ret;
624  
625  	/*
626  	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
627  	 * until we've finished creating all the other TRBs.  The ring's cycle
628  	 * state may change as we enqueue the other TRBs, so save it too.
629  	 */
630  	start_trb = &ring->enqueue->generic;
631  	start_cycle = ring->cycle_state;
632  
633  	running_total = 0;
634  	maxpacketsize = usb_maxpacket(udev, pipe);
635  
636  	total_packet_count = DIV_ROUND_UP(length, maxpacketsize);
637  
638  	/* How much data is in the first TRB? */
639  	/*
640  	 * How much data is (potentially) left before the 64KB boundary?
641  	 * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
642  	 * that the buffer should not span 64KB boundary. if so
643  	 * we send request in more than 1 TRB by chaining them.
644  	 */
645  	addr = val_64;
646  
647  	if (trb_buff_len > length)
648  		trb_buff_len = length;
649  
650  	first_trb = true;
651  
652  	/* flush the buffer before use */
653  	xhci_flush_cache((uintptr_t)buffer, length);
654  
655  	/* Queue the first TRB, even if it's zero-length */
656  	do {
657  		u32 remainder = 0;
658  		field = 0;
659  		/* Don't change the cycle bit of the first TRB until later */
660  		if (first_trb) {
661  			first_trb = false;
662  			if (start_cycle == 0)
663  				field |= TRB_CYCLE;
664  		} else {
665  			field |= ring->cycle_state;
666  		}
667  
668  		/*
669  		 * Chain all the TRBs together; clear the chain bit in the last
670  		 * TRB to indicate it's the last TRB in the chain.
671  		 */
672  		if (num_trbs > 1)
673  			field |= TRB_CHAIN;
674  		else
675  			field |= TRB_IOC;
676  
677  		/* Only set interrupt on short packet for IN endpoints */
678  		if (usb_pipein(pipe))
679  			field |= TRB_ISP;
680  
681  		/* Set the TRB length, TD size, and interrupter fields. */
682  		if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) < 0x100)
683  			remainder = xhci_td_remainder(length - running_total);
684  		else
685  			remainder = xhci_v1_0_td_remainder(running_total,
686  							   trb_buff_len,
687  							   total_packet_count,
688  							   maxpacketsize,
689  							   num_trbs - 1);
690  
691  		length_field = ((trb_buff_len & TRB_LEN_MASK) |
692  				remainder |
693  				((0 & TRB_INTR_TARGET_MASK) <<
694  				TRB_INTR_TARGET_SHIFT));
695  
696  		trb_fields[0] = lower_32_bits(addr);
697  		trb_fields[1] = upper_32_bits(addr);
698  		trb_fields[2] = length_field;
699  		trb_fields[3] = field | (TRB_NORMAL << TRB_TYPE_SHIFT);
700  
701  		queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
702  
703  		--num_trbs;
704  
705  		running_total += trb_buff_len;
706  
707  		/* Calculate length for next transfer */
708  		addr += trb_buff_len;
709  		trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
710  	} while (running_total < length);
711  
712  	giveback_first_trb(udev, ep_index, start_cycle, start_trb);
713  
714  	event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
715  	if (!event) {
716  		debug("XHCI bulk transfer timed out, aborting...\n");
717  		abort_td(udev, ep_index);
718  		udev->status = USB_ST_NAK_REC;  /* closest thing to a timeout */
719  		udev->act_len = 0;
720  		return -ETIMEDOUT;
721  	}
722  	field = le32_to_cpu(event->trans_event.flags);
723  
724  	BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
725  	BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
726  	BUG_ON(*(void **)(uintptr_t)le64_to_cpu(event->trans_event.buffer) -
727  		buffer > (size_t)length);
728  
729  	record_transfer_result(udev, event, length);
730  	xhci_acknowledge_event(ctrl);
731  	xhci_inval_cache((uintptr_t)buffer, length);
732  
733  	return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
734  }
735  
736  /**
737   * Queues up the Control Transfer Request
738   *
739   * @param udev	pointer to the USB device structure
740   * @param pipe		contains the DIR_IN or OUT , devnum
741   * @param req		request type
742   * @param length	length of the buffer
743   * @param buffer	buffer to be read/written based on the request
744   * @return returns 0 if successful else error code on failure
745   */
xhci_ctrl_tx(struct usb_device * udev,unsigned long pipe,struct devrequest * req,int length,void * buffer)746  int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
747  			struct devrequest *req,	int length,
748  			void *buffer)
749  {
750  	int ret;
751  	int start_cycle;
752  	int num_trbs;
753  	u32 field;
754  	u32 length_field;
755  	u64 buf_64 = 0;
756  	struct xhci_generic_trb *start_trb;
757  	struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
758  	int slot_id = udev->slot_id;
759  	int ep_index;
760  	u32 trb_fields[4];
761  	struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
762  	struct xhci_ring *ep_ring;
763  	union xhci_trb *event;
764  
765  	debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
766  		req->request, req->request,
767  		req->requesttype, req->requesttype,
768  		le16_to_cpu(req->value), le16_to_cpu(req->value),
769  		le16_to_cpu(req->index));
770  
771  	ep_index = usb_pipe_ep_index(pipe);
772  
773  	ep_ring = virt_dev->eps[ep_index].ring;
774  
775  	/*
776  	 * Check to see if the max packet size for the default control
777  	 * endpoint changed during FS device enumeration
778  	 */
779  	if (udev->speed == USB_SPEED_FULL) {
780  		ret = xhci_check_maxpacket(udev);
781  		if (ret < 0)
782  			return ret;
783  	}
784  
785  	xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
786  			 virt_dev->out_ctx->size);
787  
788  	struct xhci_ep_ctx *ep_ctx = NULL;
789  	ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
790  
791  	/* 1 TRB for setup, 1 for status */
792  	num_trbs = 2;
793  	/*
794  	 * Don't need to check if we need additional event data and normal TRBs,
795  	 * since data in control transfers will never get bigger than 16MB
796  	 * XXX: can we get a buffer that crosses 64KB boundaries?
797  	 */
798  
799  	if (length > 0)
800  		num_trbs++;
801  	/*
802  	 * XXX: Calling routine prepare_ring() called in place of
803  	 * prepare_trasfer() as there in 'Linux' since we are not
804  	 * maintaining multiple TDs/transfer at the same time.
805  	 */
806  	ret = prepare_ring(ctrl, ep_ring,
807  				le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
808  
809  	if (ret < 0)
810  		return ret;
811  
812  	/*
813  	 * Don't give the first TRB to the hardware (by toggling the cycle bit)
814  	 * until we've finished creating all the other TRBs.  The ring's cycle
815  	 * state may change as we enqueue the other TRBs, so save it too.
816  	 */
817  	start_trb = &ep_ring->enqueue->generic;
818  	start_cycle = ep_ring->cycle_state;
819  
820  	debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
821  
822  	/* Queue setup TRB - see section 6.4.1.2.1 */
823  	/* FIXME better way to translate setup_packet into two u32 fields? */
824  	field = 0;
825  	field |= TRB_IDT | (TRB_SETUP << TRB_TYPE_SHIFT);
826  	if (start_cycle == 0)
827  		field |= 0x1;
828  
829  	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
830  	if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) == 0x100) {
831  		if (length > 0) {
832  			if (req->requesttype & USB_DIR_IN)
833  				field |= (TRB_DATA_IN << TRB_TX_TYPE_SHIFT);
834  			else
835  				field |= (TRB_DATA_OUT << TRB_TX_TYPE_SHIFT);
836  		}
837  	}
838  
839  	debug("req->requesttype = %d, req->request = %d,"
840  		"le16_to_cpu(req->value) = %d,"
841  		"le16_to_cpu(req->index) = %d,"
842  		"le16_to_cpu(req->length) = %d\n",
843  		req->requesttype, req->request, le16_to_cpu(req->value),
844  		le16_to_cpu(req->index), le16_to_cpu(req->length));
845  
846  	trb_fields[0] = req->requesttype | req->request << 8 |
847  				le16_to_cpu(req->value) << 16;
848  	trb_fields[1] = le16_to_cpu(req->index) |
849  			le16_to_cpu(req->length) << 16;
850  	/* TRB_LEN | (TRB_INTR_TARGET) */
851  	trb_fields[2] = (8 | ((0 & TRB_INTR_TARGET_MASK) <<
852  			TRB_INTR_TARGET_SHIFT));
853  	/* Immediate data in pointer */
854  	trb_fields[3] = field;
855  	queue_trb(ctrl, ep_ring, true, trb_fields);
856  
857  	/* Re-initializing field to zero */
858  	field = 0;
859  	/* If there's data, queue data TRBs */
860  	/* Only set interrupt on short packet for IN endpoints */
861  	if (usb_pipein(pipe))
862  		field = TRB_ISP | (TRB_DATA << TRB_TYPE_SHIFT);
863  	else
864  		field = (TRB_DATA << TRB_TYPE_SHIFT);
865  
866  	length_field = (length & TRB_LEN_MASK) | xhci_td_remainder(length) |
867  			((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
868  	debug("length_field = %d, length = %d,"
869  		"xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
870  		length_field, (length & TRB_LEN_MASK),
871  		xhci_td_remainder(length), 0);
872  
873  	if (length > 0) {
874  		if (req->requesttype & USB_DIR_IN)
875  			field |= TRB_DIR_IN;
876  		buf_64 = (uintptr_t)buffer;
877  
878  		trb_fields[0] = lower_32_bits(buf_64);
879  		trb_fields[1] = upper_32_bits(buf_64);
880  		trb_fields[2] = length_field;
881  		trb_fields[3] = field | ep_ring->cycle_state;
882  
883  		xhci_flush_cache((uintptr_t)buffer, length);
884  		queue_trb(ctrl, ep_ring, true, trb_fields);
885  	}
886  
887  	/*
888  	 * Queue status TRB -
889  	 * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
890  	 */
891  
892  	/* If the device sent data, the status stage is an OUT transfer */
893  	field = 0;
894  	if (length > 0 && req->requesttype & USB_DIR_IN)
895  		field = 0;
896  	else
897  		field = TRB_DIR_IN;
898  
899  	trb_fields[0] = 0;
900  	trb_fields[1] = 0;
901  	trb_fields[2] = ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
902  		/* Event on completion */
903  	trb_fields[3] = field | TRB_IOC |
904  			(TRB_STATUS << TRB_TYPE_SHIFT) |
905  			ep_ring->cycle_state;
906  
907  	queue_trb(ctrl, ep_ring, false, trb_fields);
908  
909  	giveback_first_trb(udev, ep_index, start_cycle, start_trb);
910  
911  	event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
912  	if (!event)
913  		goto abort;
914  	field = le32_to_cpu(event->trans_event.flags);
915  
916  	BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
917  	BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
918  
919  	record_transfer_result(udev, event, length);
920  	xhci_acknowledge_event(ctrl);
921  
922  	/* Invalidate buffer to make it available to usb-core */
923  	if (length > 0)
924  		xhci_inval_cache((uintptr_t)buffer, length);
925  
926  	if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
927  			== COMP_SHORT_TX) {
928  		/* Short data stage, clear up additional status stage event */
929  		event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
930  		if (!event)
931  			goto abort;
932  		BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
933  		BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
934  		xhci_acknowledge_event(ctrl);
935  	}
936  
937  	return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
938  
939  abort:
940  	debug("XHCI control transfer timed out, aborting...\n");
941  	abort_td(udev, ep_index);
942  	udev->status = USB_ST_NAK_REC;
943  	udev->act_len = 0;
944  	return -ETIMEDOUT;
945  }
946