xref: /openbmc/linux/drivers/usb/host/xhci-mem.c (revision 1a340825)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xHCI host controller driver
4  *
5  * Copyright (C) 2008 Intel Corp.
6  *
7  * Author: Sarah Sharp
8  * Some code borrowed from the Linux EHCI driver.
9  */
10 
11 #include <linux/usb.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 
17 #include "xhci.h"
18 #include "xhci-trace.h"
19 #include "xhci-debugfs.h"
20 
21 /*
22  * Allocates a generic ring segment from the ring pool, sets the dma address,
23  * initializes the segment to zero, and sets the private next pointer to NULL.
24  *
25  * Section 4.11.1.1:
26  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
27  */
28 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
29 					       unsigned int cycle_state,
30 					       unsigned int max_packet,
31 					       gfp_t flags)
32 {
33 	struct xhci_segment *seg;
34 	dma_addr_t	dma;
35 	int		i;
36 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
37 
38 	seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
39 	if (!seg)
40 		return NULL;
41 
42 	seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
43 	if (!seg->trbs) {
44 		kfree(seg);
45 		return NULL;
46 	}
47 
48 	if (max_packet) {
49 		seg->bounce_buf = kzalloc_node(max_packet, flags,
50 					dev_to_node(dev));
51 		if (!seg->bounce_buf) {
52 			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
53 			kfree(seg);
54 			return NULL;
55 		}
56 	}
57 	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
58 	if (cycle_state == 0) {
59 		for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 			seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
61 	}
62 	seg->dma = dma;
63 	seg->next = NULL;
64 
65 	return seg;
66 }
67 
68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69 {
70 	if (seg->trbs) {
71 		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 		seg->trbs = NULL;
73 	}
74 	kfree(seg->bounce_buf);
75 	kfree(seg);
76 }
77 
78 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
79 				struct xhci_segment *first)
80 {
81 	struct xhci_segment *seg;
82 
83 	seg = first->next;
84 	while (seg != first) {
85 		struct xhci_segment *next = seg->next;
86 		xhci_segment_free(xhci, seg);
87 		seg = next;
88 	}
89 	xhci_segment_free(xhci, first);
90 }
91 
92 /*
93  * Make the prev segment point to the next segment.
94  *
95  * Change the last TRB in the prev segment to be a Link TRB which points to the
96  * DMA address of the next segment.  The caller needs to set any Link TRB
97  * related flags, such as End TRB, Toggle Cycle, and no snoop.
98  */
99 static void xhci_link_segments(struct xhci_segment *prev,
100 			       struct xhci_segment *next,
101 			       enum xhci_ring_type type, bool chain_links)
102 {
103 	u32 val;
104 
105 	if (!prev || !next)
106 		return;
107 	prev->next = next;
108 	if (type != TYPE_EVENT) {
109 		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
110 			cpu_to_le64(next->dma);
111 
112 		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
113 		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
114 		val &= ~TRB_TYPE_BITMASK;
115 		val |= TRB_TYPE(TRB_LINK);
116 		if (chain_links)
117 			val |= TRB_CHAIN;
118 		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
119 	}
120 }
121 
122 /*
123  * Link the ring to the new segments.
124  * Set Toggle Cycle for the new ring if needed.
125  */
126 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
127 		struct xhci_segment *first, struct xhci_segment *last,
128 		unsigned int num_segs)
129 {
130 	struct xhci_segment *next;
131 	bool chain_links;
132 
133 	if (!ring || !first || !last)
134 		return;
135 
136 	/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
137 	chain_links = !!(xhci_link_trb_quirk(xhci) ||
138 			 (ring->type == TYPE_ISOC &&
139 			  (xhci->quirks & XHCI_AMD_0x96_HOST)));
140 
141 	next = ring->enq_seg->next;
142 	xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
143 	xhci_link_segments(last, next, ring->type, chain_links);
144 	ring->num_segs += num_segs;
145 	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
146 
147 	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
148 		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
149 			&= ~cpu_to_le32(LINK_TOGGLE);
150 		last->trbs[TRBS_PER_SEGMENT-1].link.control
151 			|= cpu_to_le32(LINK_TOGGLE);
152 		ring->last_seg = last;
153 	}
154 }
155 
156 /*
157  * We need a radix tree for mapping physical addresses of TRBs to which stream
158  * ID they belong to.  We need to do this because the host controller won't tell
159  * us which stream ring the TRB came from.  We could store the stream ID in an
160  * event data TRB, but that doesn't help us for the cancellation case, since the
161  * endpoint may stop before it reaches that event data TRB.
162  *
163  * The radix tree maps the upper portion of the TRB DMA address to a ring
164  * segment that has the same upper portion of DMA addresses.  For example, say I
165  * have segments of size 1KB, that are always 1KB aligned.  A segment may
166  * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
167  * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
168  * pass the radix tree a key to get the right stream ID:
169  *
170  *	0x10c90fff >> 10 = 0x43243
171  *	0x10c912c0 >> 10 = 0x43244
172  *	0x10c91400 >> 10 = 0x43245
173  *
174  * Obviously, only those TRBs with DMA addresses that are within the segment
175  * will make the radix tree return the stream ID for that ring.
176  *
177  * Caveats for the radix tree:
178  *
179  * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
180  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
181  * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
182  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
183  * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
184  * extended systems (where the DMA address can be bigger than 32-bits),
185  * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
186  */
187 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
188 		struct xhci_ring *ring,
189 		struct xhci_segment *seg,
190 		gfp_t mem_flags)
191 {
192 	unsigned long key;
193 	int ret;
194 
195 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
196 	/* Skip any segments that were already added. */
197 	if (radix_tree_lookup(trb_address_map, key))
198 		return 0;
199 
200 	ret = radix_tree_maybe_preload(mem_flags);
201 	if (ret)
202 		return ret;
203 	ret = radix_tree_insert(trb_address_map,
204 			key, ring);
205 	radix_tree_preload_end();
206 	return ret;
207 }
208 
209 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
210 		struct xhci_segment *seg)
211 {
212 	unsigned long key;
213 
214 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
215 	if (radix_tree_lookup(trb_address_map, key))
216 		radix_tree_delete(trb_address_map, key);
217 }
218 
219 static int xhci_update_stream_segment_mapping(
220 		struct radix_tree_root *trb_address_map,
221 		struct xhci_ring *ring,
222 		struct xhci_segment *first_seg,
223 		struct xhci_segment *last_seg,
224 		gfp_t mem_flags)
225 {
226 	struct xhci_segment *seg;
227 	struct xhci_segment *failed_seg;
228 	int ret;
229 
230 	if (WARN_ON_ONCE(trb_address_map == NULL))
231 		return 0;
232 
233 	seg = first_seg;
234 	do {
235 		ret = xhci_insert_segment_mapping(trb_address_map,
236 				ring, seg, mem_flags);
237 		if (ret)
238 			goto remove_streams;
239 		if (seg == last_seg)
240 			return 0;
241 		seg = seg->next;
242 	} while (seg != first_seg);
243 
244 	return 0;
245 
246 remove_streams:
247 	failed_seg = seg;
248 	seg = first_seg;
249 	do {
250 		xhci_remove_segment_mapping(trb_address_map, seg);
251 		if (seg == failed_seg)
252 			return ret;
253 		seg = seg->next;
254 	} while (seg != first_seg);
255 
256 	return ret;
257 }
258 
259 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
260 {
261 	struct xhci_segment *seg;
262 
263 	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
264 		return;
265 
266 	seg = ring->first_seg;
267 	do {
268 		xhci_remove_segment_mapping(ring->trb_address_map, seg);
269 		seg = seg->next;
270 	} while (seg != ring->first_seg);
271 }
272 
273 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
274 {
275 	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
276 			ring->first_seg, ring->last_seg, mem_flags);
277 }
278 
279 /* XXX: Do we need the hcd structure in all these functions? */
280 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
281 {
282 	if (!ring)
283 		return;
284 
285 	trace_xhci_ring_free(ring);
286 
287 	if (ring->first_seg) {
288 		if (ring->type == TYPE_STREAM)
289 			xhci_remove_stream_mapping(ring);
290 		xhci_free_segments_for_ring(xhci, ring->first_seg);
291 	}
292 
293 	kfree(ring);
294 }
295 
296 void xhci_initialize_ring_info(struct xhci_ring *ring,
297 			       unsigned int cycle_state)
298 {
299 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
300 	ring->enqueue = ring->first_seg->trbs;
301 	ring->enq_seg = ring->first_seg;
302 	ring->dequeue = ring->enqueue;
303 	ring->deq_seg = ring->first_seg;
304 	/* The ring is initialized to 0. The producer must write 1 to the cycle
305 	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
306 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
307 	 *
308 	 * New rings are initialized with cycle state equal to 1; if we are
309 	 * handling ring expansion, set the cycle state equal to the old ring.
310 	 */
311 	ring->cycle_state = cycle_state;
312 
313 	/*
314 	 * Each segment has a link TRB, and leave an extra TRB for SW
315 	 * accounting purpose
316 	 */
317 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
318 }
319 
320 /* Allocate segments and link them for a ring */
321 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
322 		struct xhci_segment **first, struct xhci_segment **last,
323 		unsigned int num_segs, unsigned int cycle_state,
324 		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
325 {
326 	struct xhci_segment *prev;
327 	bool chain_links;
328 
329 	/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
330 	chain_links = !!(xhci_link_trb_quirk(xhci) ||
331 			 (type == TYPE_ISOC &&
332 			  (xhci->quirks & XHCI_AMD_0x96_HOST)));
333 
334 	prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
335 	if (!prev)
336 		return -ENOMEM;
337 	num_segs--;
338 
339 	*first = prev;
340 	while (num_segs > 0) {
341 		struct xhci_segment	*next;
342 
343 		next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
344 		if (!next) {
345 			prev = *first;
346 			while (prev) {
347 				next = prev->next;
348 				xhci_segment_free(xhci, prev);
349 				prev = next;
350 			}
351 			return -ENOMEM;
352 		}
353 		xhci_link_segments(prev, next, type, chain_links);
354 
355 		prev = next;
356 		num_segs--;
357 	}
358 	xhci_link_segments(prev, *first, type, chain_links);
359 	*last = prev;
360 
361 	return 0;
362 }
363 
364 /*
365  * Create a new ring with zero or more segments.
366  *
367  * Link each segment together into a ring.
368  * Set the end flag and the cycle toggle bit on the last segment.
369  * See section 4.9.1 and figures 15 and 16.
370  */
371 struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
372 		unsigned int num_segs, unsigned int cycle_state,
373 		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
374 {
375 	struct xhci_ring	*ring;
376 	int ret;
377 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
378 
379 	ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
380 	if (!ring)
381 		return NULL;
382 
383 	ring->num_segs = num_segs;
384 	ring->bounce_buf_len = max_packet;
385 	INIT_LIST_HEAD(&ring->td_list);
386 	ring->type = type;
387 	if (num_segs == 0)
388 		return ring;
389 
390 	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
391 			&ring->last_seg, num_segs, cycle_state, type,
392 			max_packet, flags);
393 	if (ret)
394 		goto fail;
395 
396 	/* Only event ring does not use link TRB */
397 	if (type != TYPE_EVENT) {
398 		/* See section 4.9.2.1 and 6.4.4.1 */
399 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
400 			cpu_to_le32(LINK_TOGGLE);
401 	}
402 	xhci_initialize_ring_info(ring, cycle_state);
403 	trace_xhci_ring_alloc(ring);
404 	return ring;
405 
406 fail:
407 	kfree(ring);
408 	return NULL;
409 }
410 
411 void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
412 		struct xhci_virt_device *virt_dev,
413 		unsigned int ep_index)
414 {
415 	xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
416 	virt_dev->eps[ep_index].ring = NULL;
417 }
418 
419 /*
420  * Expand an existing ring.
421  * Allocate a new ring which has same segment numbers and link the two rings.
422  */
423 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
424 				unsigned int num_trbs, gfp_t flags)
425 {
426 	struct xhci_segment	*first;
427 	struct xhci_segment	*last;
428 	unsigned int		num_segs;
429 	unsigned int		num_segs_needed;
430 	int			ret;
431 
432 	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
433 				(TRBS_PER_SEGMENT - 1);
434 
435 	/* Allocate number of segments we needed, or double the ring size */
436 	num_segs = max(ring->num_segs, num_segs_needed);
437 
438 	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
439 			num_segs, ring->cycle_state, ring->type,
440 			ring->bounce_buf_len, flags);
441 	if (ret)
442 		return -ENOMEM;
443 
444 	if (ring->type == TYPE_STREAM)
445 		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
446 						ring, first, last, flags);
447 	if (ret) {
448 		struct xhci_segment *next;
449 		do {
450 			next = first->next;
451 			xhci_segment_free(xhci, first);
452 			if (first == last)
453 				break;
454 			first = next;
455 		} while (true);
456 		return ret;
457 	}
458 
459 	xhci_link_rings(xhci, ring, first, last, num_segs);
460 	trace_xhci_ring_expansion(ring);
461 	xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
462 			"ring expansion succeed, now has %d segments",
463 			ring->num_segs);
464 
465 	return 0;
466 }
467 
468 struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
469 						    int type, gfp_t flags)
470 {
471 	struct xhci_container_ctx *ctx;
472 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
473 
474 	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
475 		return NULL;
476 
477 	ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev));
478 	if (!ctx)
479 		return NULL;
480 
481 	ctx->type = type;
482 	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
483 	if (type == XHCI_CTX_TYPE_INPUT)
484 		ctx->size += CTX_SIZE(xhci->hcc_params);
485 
486 	ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
487 	if (!ctx->bytes) {
488 		kfree(ctx);
489 		return NULL;
490 	}
491 	return ctx;
492 }
493 
494 void xhci_free_container_ctx(struct xhci_hcd *xhci,
495 			     struct xhci_container_ctx *ctx)
496 {
497 	if (!ctx)
498 		return;
499 	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
500 	kfree(ctx);
501 }
502 
503 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
504 					      struct xhci_container_ctx *ctx)
505 {
506 	if (ctx->type != XHCI_CTX_TYPE_INPUT)
507 		return NULL;
508 
509 	return (struct xhci_input_control_ctx *)ctx->bytes;
510 }
511 
512 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
513 					struct xhci_container_ctx *ctx)
514 {
515 	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
516 		return (struct xhci_slot_ctx *)ctx->bytes;
517 
518 	return (struct xhci_slot_ctx *)
519 		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
520 }
521 
522 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
523 				    struct xhci_container_ctx *ctx,
524 				    unsigned int ep_index)
525 {
526 	/* increment ep index by offset of start of ep ctx array */
527 	ep_index++;
528 	if (ctx->type == XHCI_CTX_TYPE_INPUT)
529 		ep_index++;
530 
531 	return (struct xhci_ep_ctx *)
532 		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
533 }
534 EXPORT_SYMBOL_GPL(xhci_get_ep_ctx);
535 
536 /***************** Streams structures manipulation *************************/
537 
538 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
539 		unsigned int num_stream_ctxs,
540 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
541 {
542 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
543 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
544 
545 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
546 		dma_free_coherent(dev, size,
547 				stream_ctx, dma);
548 	else if (size <= SMALL_STREAM_ARRAY_SIZE)
549 		return dma_pool_free(xhci->small_streams_pool,
550 				stream_ctx, dma);
551 	else
552 		return dma_pool_free(xhci->medium_streams_pool,
553 				stream_ctx, dma);
554 }
555 
556 /*
557  * The stream context array for each endpoint with bulk streams enabled can
558  * vary in size, based on:
559  *  - how many streams the endpoint supports,
560  *  - the maximum primary stream array size the host controller supports,
561  *  - and how many streams the device driver asks for.
562  *
563  * The stream context array must be a power of 2, and can be as small as
564  * 64 bytes or as large as 1MB.
565  */
566 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
567 		unsigned int num_stream_ctxs, dma_addr_t *dma,
568 		gfp_t mem_flags)
569 {
570 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
571 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
572 
573 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
574 		return dma_alloc_coherent(dev, size,
575 				dma, mem_flags);
576 	else if (size <= SMALL_STREAM_ARRAY_SIZE)
577 		return dma_pool_alloc(xhci->small_streams_pool,
578 				mem_flags, dma);
579 	else
580 		return dma_pool_alloc(xhci->medium_streams_pool,
581 				mem_flags, dma);
582 }
583 
584 struct xhci_ring *xhci_dma_to_transfer_ring(
585 		struct xhci_virt_ep *ep,
586 		u64 address)
587 {
588 	if (ep->ep_state & EP_HAS_STREAMS)
589 		return radix_tree_lookup(&ep->stream_info->trb_address_map,
590 				address >> TRB_SEGMENT_SHIFT);
591 	return ep->ring;
592 }
593 
594 /*
595  * Change an endpoint's internal structure so it supports stream IDs.  The
596  * number of requested streams includes stream 0, which cannot be used by device
597  * drivers.
598  *
599  * The number of stream contexts in the stream context array may be bigger than
600  * the number of streams the driver wants to use.  This is because the number of
601  * stream context array entries must be a power of two.
602  */
603 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
604 		unsigned int num_stream_ctxs,
605 		unsigned int num_streams,
606 		unsigned int max_packet, gfp_t mem_flags)
607 {
608 	struct xhci_stream_info *stream_info;
609 	u32 cur_stream;
610 	struct xhci_ring *cur_ring;
611 	u64 addr;
612 	int ret;
613 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
614 
615 	xhci_dbg(xhci, "Allocating %u streams and %u "
616 			"stream context array entries.\n",
617 			num_streams, num_stream_ctxs);
618 	if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
619 		xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
620 		return NULL;
621 	}
622 	xhci->cmd_ring_reserved_trbs++;
623 
624 	stream_info = kzalloc_node(sizeof(*stream_info), mem_flags,
625 			dev_to_node(dev));
626 	if (!stream_info)
627 		goto cleanup_trbs;
628 
629 	stream_info->num_streams = num_streams;
630 	stream_info->num_stream_ctxs = num_stream_ctxs;
631 
632 	/* Initialize the array of virtual pointers to stream rings. */
633 	stream_info->stream_rings = kcalloc_node(
634 			num_streams, sizeof(struct xhci_ring *), mem_flags,
635 			dev_to_node(dev));
636 	if (!stream_info->stream_rings)
637 		goto cleanup_info;
638 
639 	/* Initialize the array of DMA addresses for stream rings for the HW. */
640 	stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
641 			num_stream_ctxs, &stream_info->ctx_array_dma,
642 			mem_flags);
643 	if (!stream_info->stream_ctx_array)
644 		goto cleanup_ctx;
645 	memset(stream_info->stream_ctx_array, 0,
646 			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
647 
648 	/* Allocate everything needed to free the stream rings later */
649 	stream_info->free_streams_command =
650 		xhci_alloc_command_with_ctx(xhci, true, mem_flags);
651 	if (!stream_info->free_streams_command)
652 		goto cleanup_ctx;
653 
654 	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
655 
656 	/* Allocate rings for all the streams that the driver will use,
657 	 * and add their segment DMA addresses to the radix tree.
658 	 * Stream 0 is reserved.
659 	 */
660 
661 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
662 		stream_info->stream_rings[cur_stream] =
663 			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
664 					mem_flags);
665 		cur_ring = stream_info->stream_rings[cur_stream];
666 		if (!cur_ring)
667 			goto cleanup_rings;
668 		cur_ring->stream_id = cur_stream;
669 		cur_ring->trb_address_map = &stream_info->trb_address_map;
670 		/* Set deq ptr, cycle bit, and stream context type */
671 		addr = cur_ring->first_seg->dma |
672 			SCT_FOR_CTX(SCT_PRI_TR) |
673 			cur_ring->cycle_state;
674 		stream_info->stream_ctx_array[cur_stream].stream_ring =
675 			cpu_to_le64(addr);
676 		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
677 				cur_stream, (unsigned long long) addr);
678 
679 		ret = xhci_update_stream_mapping(cur_ring, mem_flags);
680 		if (ret) {
681 			xhci_ring_free(xhci, cur_ring);
682 			stream_info->stream_rings[cur_stream] = NULL;
683 			goto cleanup_rings;
684 		}
685 	}
686 	/* Leave the other unused stream ring pointers in the stream context
687 	 * array initialized to zero.  This will cause the xHC to give us an
688 	 * error if the device asks for a stream ID we don't have setup (if it
689 	 * was any other way, the host controller would assume the ring is
690 	 * "empty" and wait forever for data to be queued to that stream ID).
691 	 */
692 
693 	return stream_info;
694 
695 cleanup_rings:
696 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
697 		cur_ring = stream_info->stream_rings[cur_stream];
698 		if (cur_ring) {
699 			xhci_ring_free(xhci, cur_ring);
700 			stream_info->stream_rings[cur_stream] = NULL;
701 		}
702 	}
703 	xhci_free_command(xhci, stream_info->free_streams_command);
704 cleanup_ctx:
705 	kfree(stream_info->stream_rings);
706 cleanup_info:
707 	kfree(stream_info);
708 cleanup_trbs:
709 	xhci->cmd_ring_reserved_trbs--;
710 	return NULL;
711 }
712 /*
713  * Sets the MaxPStreams field and the Linear Stream Array field.
714  * Sets the dequeue pointer to the stream context array.
715  */
716 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
717 		struct xhci_ep_ctx *ep_ctx,
718 		struct xhci_stream_info *stream_info)
719 {
720 	u32 max_primary_streams;
721 	/* MaxPStreams is the number of stream context array entries, not the
722 	 * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
723 	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
724 	 */
725 	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
726 	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
727 			"Setting number of stream ctx array entries to %u",
728 			1 << (max_primary_streams + 1));
729 	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
730 	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
731 				       | EP_HAS_LSA);
732 	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
733 }
734 
735 /*
736  * Sets the MaxPStreams field and the Linear Stream Array field to 0.
737  * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
738  * not at the beginning of the ring).
739  */
740 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
741 		struct xhci_virt_ep *ep)
742 {
743 	dma_addr_t addr;
744 	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
745 	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
746 	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
747 }
748 
749 /* Frees all stream contexts associated with the endpoint,
750  *
751  * Caller should fix the endpoint context streams fields.
752  */
753 void xhci_free_stream_info(struct xhci_hcd *xhci,
754 		struct xhci_stream_info *stream_info)
755 {
756 	int cur_stream;
757 	struct xhci_ring *cur_ring;
758 
759 	if (!stream_info)
760 		return;
761 
762 	for (cur_stream = 1; cur_stream < stream_info->num_streams;
763 			cur_stream++) {
764 		cur_ring = stream_info->stream_rings[cur_stream];
765 		if (cur_ring) {
766 			xhci_ring_free(xhci, cur_ring);
767 			stream_info->stream_rings[cur_stream] = NULL;
768 		}
769 	}
770 	xhci_free_command(xhci, stream_info->free_streams_command);
771 	xhci->cmd_ring_reserved_trbs--;
772 	if (stream_info->stream_ctx_array)
773 		xhci_free_stream_ctx(xhci,
774 				stream_info->num_stream_ctxs,
775 				stream_info->stream_ctx_array,
776 				stream_info->ctx_array_dma);
777 
778 	kfree(stream_info->stream_rings);
779 	kfree(stream_info);
780 }
781 
782 
783 /***************** Device context manipulation *************************/
784 
785 static void xhci_free_tt_info(struct xhci_hcd *xhci,
786 		struct xhci_virt_device *virt_dev,
787 		int slot_id)
788 {
789 	struct list_head *tt_list_head;
790 	struct xhci_tt_bw_info *tt_info, *next;
791 	bool slot_found = false;
792 
793 	/* If the device never made it past the Set Address stage,
794 	 * it may not have the real_port set correctly.
795 	 */
796 	if (virt_dev->real_port == 0 ||
797 			virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
798 		xhci_dbg(xhci, "Bad real port.\n");
799 		return;
800 	}
801 
802 	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
803 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
804 		/* Multi-TT hubs will have more than one entry */
805 		if (tt_info->slot_id == slot_id) {
806 			slot_found = true;
807 			list_del(&tt_info->tt_list);
808 			kfree(tt_info);
809 		} else if (slot_found) {
810 			break;
811 		}
812 	}
813 }
814 
815 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
816 		struct xhci_virt_device *virt_dev,
817 		struct usb_device *hdev,
818 		struct usb_tt *tt, gfp_t mem_flags)
819 {
820 	struct xhci_tt_bw_info		*tt_info;
821 	unsigned int			num_ports;
822 	int				i, j;
823 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
824 
825 	if (!tt->multi)
826 		num_ports = 1;
827 	else
828 		num_ports = hdev->maxchild;
829 
830 	for (i = 0; i < num_ports; i++, tt_info++) {
831 		struct xhci_interval_bw_table *bw_table;
832 
833 		tt_info = kzalloc_node(sizeof(*tt_info), mem_flags,
834 				dev_to_node(dev));
835 		if (!tt_info)
836 			goto free_tts;
837 		INIT_LIST_HEAD(&tt_info->tt_list);
838 		list_add(&tt_info->tt_list,
839 				&xhci->rh_bw[virt_dev->real_port - 1].tts);
840 		tt_info->slot_id = virt_dev->udev->slot_id;
841 		if (tt->multi)
842 			tt_info->ttport = i+1;
843 		bw_table = &tt_info->bw_table;
844 		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
845 			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
846 	}
847 	return 0;
848 
849 free_tts:
850 	xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
851 	return -ENOMEM;
852 }
853 
854 
855 /* All the xhci_tds in the ring's TD list should be freed at this point.
856  * Should be called with xhci->lock held if there is any chance the TT lists
857  * will be manipulated by the configure endpoint, allocate device, or update
858  * hub functions while this function is removing the TT entries from the list.
859  */
860 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
861 {
862 	struct xhci_virt_device *dev;
863 	int i;
864 	int old_active_eps = 0;
865 
866 	/* Slot ID 0 is reserved */
867 	if (slot_id == 0 || !xhci->devs[slot_id])
868 		return;
869 
870 	dev = xhci->devs[slot_id];
871 
872 	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
873 	if (!dev)
874 		return;
875 
876 	trace_xhci_free_virt_device(dev);
877 
878 	if (dev->tt_info)
879 		old_active_eps = dev->tt_info->active_eps;
880 
881 	for (i = 0; i < 31; i++) {
882 		if (dev->eps[i].ring)
883 			xhci_ring_free(xhci, dev->eps[i].ring);
884 		if (dev->eps[i].stream_info)
885 			xhci_free_stream_info(xhci,
886 					dev->eps[i].stream_info);
887 		/* Endpoints on the TT/root port lists should have been removed
888 		 * when usb_disable_device() was called for the device.
889 		 * We can't drop them anyway, because the udev might have gone
890 		 * away by this point, and we can't tell what speed it was.
891 		 */
892 		if (!list_empty(&dev->eps[i].bw_endpoint_list))
893 			xhci_warn(xhci, "Slot %u endpoint %u "
894 					"not removed from BW list!\n",
895 					slot_id, i);
896 	}
897 	/* If this is a hub, free the TT(s) from the TT list */
898 	xhci_free_tt_info(xhci, dev, slot_id);
899 	/* If necessary, update the number of active TTs on this root port */
900 	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
901 
902 	if (dev->in_ctx)
903 		xhci_free_container_ctx(xhci, dev->in_ctx);
904 	if (dev->out_ctx)
905 		xhci_free_container_ctx(xhci, dev->out_ctx);
906 
907 	if (dev->udev && dev->udev->slot_id)
908 		dev->udev->slot_id = 0;
909 	kfree(xhci->devs[slot_id]);
910 	xhci->devs[slot_id] = NULL;
911 }
912 
913 /*
914  * Free a virt_device structure.
915  * If the virt_device added a tt_info (a hub) and has children pointing to
916  * that tt_info, then free the child first. Recursive.
917  * We can't rely on udev at this point to find child-parent relationships.
918  */
919 static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
920 {
921 	struct xhci_virt_device *vdev;
922 	struct list_head *tt_list_head;
923 	struct xhci_tt_bw_info *tt_info, *next;
924 	int i;
925 
926 	vdev = xhci->devs[slot_id];
927 	if (!vdev)
928 		return;
929 
930 	if (vdev->real_port == 0 ||
931 			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
932 		xhci_dbg(xhci, "Bad vdev->real_port.\n");
933 		goto out;
934 	}
935 
936 	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
937 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
938 		/* is this a hub device that added a tt_info to the tts list */
939 		if (tt_info->slot_id == slot_id) {
940 			/* are any devices using this tt_info? */
941 			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
942 				vdev = xhci->devs[i];
943 				if (vdev && (vdev->tt_info == tt_info))
944 					xhci_free_virt_devices_depth_first(
945 						xhci, i);
946 			}
947 		}
948 	}
949 out:
950 	/* we are now at a leaf device */
951 	xhci_debugfs_remove_slot(xhci, slot_id);
952 	xhci_free_virt_device(xhci, slot_id);
953 }
954 
955 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
956 		struct usb_device *udev, gfp_t flags)
957 {
958 	struct xhci_virt_device *dev;
959 	int i;
960 
961 	/* Slot ID 0 is reserved */
962 	if (slot_id == 0 || xhci->devs[slot_id]) {
963 		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
964 		return 0;
965 	}
966 
967 	dev = kzalloc(sizeof(*dev), flags);
968 	if (!dev)
969 		return 0;
970 
971 	dev->slot_id = slot_id;
972 
973 	/* Allocate the (output) device context that will be used in the HC. */
974 	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
975 	if (!dev->out_ctx)
976 		goto fail;
977 
978 	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
979 			(unsigned long long)dev->out_ctx->dma);
980 
981 	/* Allocate the (input) device context for address device command */
982 	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
983 	if (!dev->in_ctx)
984 		goto fail;
985 
986 	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
987 			(unsigned long long)dev->in_ctx->dma);
988 
989 	/* Initialize the cancellation and bandwidth list for each ep */
990 	for (i = 0; i < 31; i++) {
991 		dev->eps[i].ep_index = i;
992 		dev->eps[i].vdev = dev;
993 		dev->eps[i].xhci = xhci;
994 		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
995 		INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
996 	}
997 
998 	/* Allocate endpoint 0 ring */
999 	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1000 	if (!dev->eps[0].ring)
1001 		goto fail;
1002 
1003 	dev->udev = udev;
1004 
1005 	/* Point to output device context in dcbaa. */
1006 	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1007 	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1008 		 slot_id,
1009 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
1010 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1011 
1012 	trace_xhci_alloc_virt_device(dev);
1013 
1014 	xhci->devs[slot_id] = dev;
1015 
1016 	return 1;
1017 fail:
1018 
1019 	if (dev->in_ctx)
1020 		xhci_free_container_ctx(xhci, dev->in_ctx);
1021 	if (dev->out_ctx)
1022 		xhci_free_container_ctx(xhci, dev->out_ctx);
1023 	kfree(dev);
1024 
1025 	return 0;
1026 }
1027 
1028 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1029 		struct usb_device *udev)
1030 {
1031 	struct xhci_virt_device *virt_dev;
1032 	struct xhci_ep_ctx	*ep0_ctx;
1033 	struct xhci_ring	*ep_ring;
1034 
1035 	virt_dev = xhci->devs[udev->slot_id];
1036 	ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1037 	ep_ring = virt_dev->eps[0].ring;
1038 	/*
1039 	 * FIXME we don't keep track of the dequeue pointer very well after a
1040 	 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1041 	 * host to our enqueue pointer.  This should only be called after a
1042 	 * configured device has reset, so all control transfers should have
1043 	 * been completed or cancelled before the reset.
1044 	 */
1045 	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1046 							ep_ring->enqueue)
1047 				   | ep_ring->cycle_state);
1048 }
1049 
1050 /*
1051  * The xHCI roothub may have ports of differing speeds in any order in the port
1052  * status registers.
1053  *
1054  * The xHCI hardware wants to know the roothub port number that the USB device
1055  * is attached to (or the roothub port its ancestor hub is attached to).  All we
1056  * know is the index of that port under either the USB 2.0 or the USB 3.0
1057  * roothub, but that doesn't give us the real index into the HW port status
1058  * registers. Call xhci_find_raw_port_number() to get real index.
1059  */
1060 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1061 		struct usb_device *udev)
1062 {
1063 	struct usb_device *top_dev;
1064 	struct usb_hcd *hcd;
1065 
1066 	if (udev->speed >= USB_SPEED_SUPER)
1067 		hcd = xhci_get_usb3_hcd(xhci);
1068 	else
1069 		hcd = xhci->main_hcd;
1070 
1071 	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1072 			top_dev = top_dev->parent)
1073 		/* Found device below root hub */;
1074 
1075 	return	xhci_find_raw_port_number(hcd, top_dev->portnum);
1076 }
1077 
1078 /* Setup an xHCI virtual device for a Set Address command */
1079 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1080 {
1081 	struct xhci_virt_device *dev;
1082 	struct xhci_ep_ctx	*ep0_ctx;
1083 	struct xhci_slot_ctx    *slot_ctx;
1084 	u32			port_num;
1085 	u32			max_packets;
1086 	struct usb_device *top_dev;
1087 
1088 	dev = xhci->devs[udev->slot_id];
1089 	/* Slot ID 0 is reserved */
1090 	if (udev->slot_id == 0 || !dev) {
1091 		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1092 				udev->slot_id);
1093 		return -EINVAL;
1094 	}
1095 	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1096 	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1097 
1098 	/* 3) Only the control endpoint is valid - one endpoint context */
1099 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1100 	switch (udev->speed) {
1101 	case USB_SPEED_SUPER_PLUS:
1102 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1103 		max_packets = MAX_PACKET(512);
1104 		break;
1105 	case USB_SPEED_SUPER:
1106 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1107 		max_packets = MAX_PACKET(512);
1108 		break;
1109 	case USB_SPEED_HIGH:
1110 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1111 		max_packets = MAX_PACKET(64);
1112 		break;
1113 	/* USB core guesses at a 64-byte max packet first for FS devices */
1114 	case USB_SPEED_FULL:
1115 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1116 		max_packets = MAX_PACKET(64);
1117 		break;
1118 	case USB_SPEED_LOW:
1119 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1120 		max_packets = MAX_PACKET(8);
1121 		break;
1122 	case USB_SPEED_WIRELESS:
1123 		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1124 		return -EINVAL;
1125 	default:
1126 		/* Speed was set earlier, this shouldn't happen. */
1127 		return -EINVAL;
1128 	}
1129 	/* Find the root hub port this device is under */
1130 	port_num = xhci_find_real_port_number(xhci, udev);
1131 	if (!port_num)
1132 		return -EINVAL;
1133 	slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1134 	/* Set the port number in the virtual_device to the faked port number */
1135 	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1136 			top_dev = top_dev->parent)
1137 		/* Found device below root hub */;
1138 	dev->fake_port = top_dev->portnum;
1139 	dev->real_port = port_num;
1140 	xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1141 	xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1142 
1143 	/* Find the right bandwidth table that this device will be a part of.
1144 	 * If this is a full speed device attached directly to a root port (or a
1145 	 * decendent of one), it counts as a primary bandwidth domain, not a
1146 	 * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1147 	 * will never be created for the HS root hub.
1148 	 */
1149 	if (!udev->tt || !udev->tt->hub->parent) {
1150 		dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1151 	} else {
1152 		struct xhci_root_port_bw_info *rh_bw;
1153 		struct xhci_tt_bw_info *tt_bw;
1154 
1155 		rh_bw = &xhci->rh_bw[port_num - 1];
1156 		/* Find the right TT. */
1157 		list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1158 			if (tt_bw->slot_id != udev->tt->hub->slot_id)
1159 				continue;
1160 
1161 			if (!dev->udev->tt->multi ||
1162 					(udev->tt->multi &&
1163 					 tt_bw->ttport == dev->udev->ttport)) {
1164 				dev->bw_table = &tt_bw->bw_table;
1165 				dev->tt_info = tt_bw;
1166 				break;
1167 			}
1168 		}
1169 		if (!dev->tt_info)
1170 			xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1171 	}
1172 
1173 	/* Is this a LS/FS device under an external HS hub? */
1174 	if (udev->tt && udev->tt->hub->parent) {
1175 		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1176 						(udev->ttport << 8));
1177 		if (udev->tt->multi)
1178 			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1179 	}
1180 	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1181 	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1182 
1183 	/* Step 4 - ring already allocated */
1184 	/* Step 5 */
1185 	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1186 
1187 	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1188 	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1189 					 max_packets);
1190 
1191 	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1192 				   dev->eps[0].ring->cycle_state);
1193 
1194 	trace_xhci_setup_addressable_virt_device(dev);
1195 
1196 	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1197 
1198 	return 0;
1199 }
1200 
1201 /*
1202  * Convert interval expressed as 2^(bInterval - 1) == interval into
1203  * straight exponent value 2^n == interval.
1204  *
1205  */
1206 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1207 		struct usb_host_endpoint *ep)
1208 {
1209 	unsigned int interval;
1210 
1211 	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1212 	if (interval != ep->desc.bInterval - 1)
1213 		dev_warn(&udev->dev,
1214 			 "ep %#x - rounding interval to %d %sframes\n",
1215 			 ep->desc.bEndpointAddress,
1216 			 1 << interval,
1217 			 udev->speed == USB_SPEED_FULL ? "" : "micro");
1218 
1219 	if (udev->speed == USB_SPEED_FULL) {
1220 		/*
1221 		 * Full speed isoc endpoints specify interval in frames,
1222 		 * not microframes. We are using microframes everywhere,
1223 		 * so adjust accordingly.
1224 		 */
1225 		interval += 3;	/* 1 frame = 2^3 uframes */
1226 	}
1227 
1228 	return interval;
1229 }
1230 
1231 /*
1232  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1233  * microframes, rounded down to nearest power of 2.
1234  */
1235 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1236 		struct usb_host_endpoint *ep, unsigned int desc_interval,
1237 		unsigned int min_exponent, unsigned int max_exponent)
1238 {
1239 	unsigned int interval;
1240 
1241 	interval = fls(desc_interval) - 1;
1242 	interval = clamp_val(interval, min_exponent, max_exponent);
1243 	if ((1 << interval) != desc_interval)
1244 		dev_dbg(&udev->dev,
1245 			 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1246 			 ep->desc.bEndpointAddress,
1247 			 1 << interval,
1248 			 desc_interval);
1249 
1250 	return interval;
1251 }
1252 
1253 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1254 		struct usb_host_endpoint *ep)
1255 {
1256 	if (ep->desc.bInterval == 0)
1257 		return 0;
1258 	return xhci_microframes_to_exponent(udev, ep,
1259 			ep->desc.bInterval, 0, 15);
1260 }
1261 
1262 
1263 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1264 		struct usb_host_endpoint *ep)
1265 {
1266 	return xhci_microframes_to_exponent(udev, ep,
1267 			ep->desc.bInterval * 8, 3, 10);
1268 }
1269 
1270 /* Return the polling or NAK interval.
1271  *
1272  * The polling interval is expressed in "microframes".  If xHCI's Interval field
1273  * is set to N, it will service the endpoint every 2^(Interval)*125us.
1274  *
1275  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1276  * is set to 0.
1277  */
1278 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1279 		struct usb_host_endpoint *ep)
1280 {
1281 	unsigned int interval = 0;
1282 
1283 	switch (udev->speed) {
1284 	case USB_SPEED_HIGH:
1285 		/* Max NAK rate */
1286 		if (usb_endpoint_xfer_control(&ep->desc) ||
1287 		    usb_endpoint_xfer_bulk(&ep->desc)) {
1288 			interval = xhci_parse_microframe_interval(udev, ep);
1289 			break;
1290 		}
1291 		fallthrough;	/* SS and HS isoc/int have same decoding */
1292 
1293 	case USB_SPEED_SUPER_PLUS:
1294 	case USB_SPEED_SUPER:
1295 		if (usb_endpoint_xfer_int(&ep->desc) ||
1296 		    usb_endpoint_xfer_isoc(&ep->desc)) {
1297 			interval = xhci_parse_exponent_interval(udev, ep);
1298 		}
1299 		break;
1300 
1301 	case USB_SPEED_FULL:
1302 		if (usb_endpoint_xfer_isoc(&ep->desc)) {
1303 			interval = xhci_parse_exponent_interval(udev, ep);
1304 			break;
1305 		}
1306 		/*
1307 		 * Fall through for interrupt endpoint interval decoding
1308 		 * since it uses the same rules as low speed interrupt
1309 		 * endpoints.
1310 		 */
1311 		fallthrough;
1312 
1313 	case USB_SPEED_LOW:
1314 		if (usb_endpoint_xfer_int(&ep->desc) ||
1315 		    usb_endpoint_xfer_isoc(&ep->desc)) {
1316 
1317 			interval = xhci_parse_frame_interval(udev, ep);
1318 		}
1319 		break;
1320 
1321 	default:
1322 		BUG();
1323 	}
1324 	return interval;
1325 }
1326 
1327 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1328  * High speed endpoint descriptors can define "the number of additional
1329  * transaction opportunities per microframe", but that goes in the Max Burst
1330  * endpoint context field.
1331  */
1332 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1333 		struct usb_host_endpoint *ep)
1334 {
1335 	if (udev->speed < USB_SPEED_SUPER ||
1336 			!usb_endpoint_xfer_isoc(&ep->desc))
1337 		return 0;
1338 	return ep->ss_ep_comp.bmAttributes;
1339 }
1340 
1341 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1342 				       struct usb_host_endpoint *ep)
1343 {
1344 	/* Super speed and Plus have max burst in ep companion desc */
1345 	if (udev->speed >= USB_SPEED_SUPER)
1346 		return ep->ss_ep_comp.bMaxBurst;
1347 
1348 	if (udev->speed == USB_SPEED_HIGH &&
1349 	    (usb_endpoint_xfer_isoc(&ep->desc) ||
1350 	     usb_endpoint_xfer_int(&ep->desc)))
1351 		return usb_endpoint_maxp_mult(&ep->desc) - 1;
1352 
1353 	return 0;
1354 }
1355 
1356 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1357 {
1358 	int in;
1359 
1360 	in = usb_endpoint_dir_in(&ep->desc);
1361 
1362 	switch (usb_endpoint_type(&ep->desc)) {
1363 	case USB_ENDPOINT_XFER_CONTROL:
1364 		return CTRL_EP;
1365 	case USB_ENDPOINT_XFER_BULK:
1366 		return in ? BULK_IN_EP : BULK_OUT_EP;
1367 	case USB_ENDPOINT_XFER_ISOC:
1368 		return in ? ISOC_IN_EP : ISOC_OUT_EP;
1369 	case USB_ENDPOINT_XFER_INT:
1370 		return in ? INT_IN_EP : INT_OUT_EP;
1371 	}
1372 	return 0;
1373 }
1374 
1375 /* Return the maximum endpoint service interval time (ESIT) payload.
1376  * Basically, this is the maxpacket size, multiplied by the burst size
1377  * and mult size.
1378  */
1379 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1380 		struct usb_host_endpoint *ep)
1381 {
1382 	int max_burst;
1383 	int max_packet;
1384 
1385 	/* Only applies for interrupt or isochronous endpoints */
1386 	if (usb_endpoint_xfer_control(&ep->desc) ||
1387 			usb_endpoint_xfer_bulk(&ep->desc))
1388 		return 0;
1389 
1390 	/* SuperSpeedPlus Isoc ep sending over 48k per esit */
1391 	if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1392 	    USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1393 		return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1394 	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1395 	else if (udev->speed >= USB_SPEED_SUPER)
1396 		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1397 
1398 	max_packet = usb_endpoint_maxp(&ep->desc);
1399 	max_burst = usb_endpoint_maxp_mult(&ep->desc);
1400 	/* A 0 in max burst means 1 transfer per ESIT */
1401 	return max_packet * max_burst;
1402 }
1403 
1404 /* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1405  * Drivers will have to call usb_alloc_streams() to do that.
1406  */
1407 int xhci_endpoint_init(struct xhci_hcd *xhci,
1408 		struct xhci_virt_device *virt_dev,
1409 		struct usb_device *udev,
1410 		struct usb_host_endpoint *ep,
1411 		gfp_t mem_flags)
1412 {
1413 	unsigned int ep_index;
1414 	struct xhci_ep_ctx *ep_ctx;
1415 	struct xhci_ring *ep_ring;
1416 	unsigned int max_packet;
1417 	enum xhci_ring_type ring_type;
1418 	u32 max_esit_payload;
1419 	u32 endpoint_type;
1420 	unsigned int max_burst;
1421 	unsigned int interval;
1422 	unsigned int mult;
1423 	unsigned int avg_trb_len;
1424 	unsigned int err_count = 0;
1425 
1426 	ep_index = xhci_get_endpoint_index(&ep->desc);
1427 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1428 
1429 	endpoint_type = xhci_get_endpoint_type(ep);
1430 	if (!endpoint_type)
1431 		return -EINVAL;
1432 
1433 	ring_type = usb_endpoint_type(&ep->desc);
1434 
1435 	/*
1436 	 * Get values to fill the endpoint context, mostly from ep descriptor.
1437 	 * The average TRB buffer lengt for bulk endpoints is unclear as we
1438 	 * have no clue on scatter gather list entry size. For Isoc and Int,
1439 	 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1440 	 */
1441 	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1442 	interval = xhci_get_endpoint_interval(udev, ep);
1443 
1444 	/* Periodic endpoint bInterval limit quirk */
1445 	if (usb_endpoint_xfer_int(&ep->desc) ||
1446 	    usb_endpoint_xfer_isoc(&ep->desc)) {
1447 		if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
1448 		    udev->speed >= USB_SPEED_HIGH &&
1449 		    interval >= 7) {
1450 			interval = 6;
1451 		}
1452 	}
1453 
1454 	mult = xhci_get_endpoint_mult(udev, ep);
1455 	max_packet = usb_endpoint_maxp(&ep->desc);
1456 	max_burst = xhci_get_endpoint_max_burst(udev, ep);
1457 	avg_trb_len = max_esit_payload;
1458 
1459 	/* FIXME dig Mult and streams info out of ep companion desc */
1460 
1461 	/* Allow 3 retries for everything but isoc, set CErr = 3 */
1462 	if (!usb_endpoint_xfer_isoc(&ep->desc))
1463 		err_count = 3;
1464 	/* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
1465 	if (usb_endpoint_xfer_bulk(&ep->desc)) {
1466 		if (udev->speed == USB_SPEED_HIGH)
1467 			max_packet = 512;
1468 		if (udev->speed == USB_SPEED_FULL) {
1469 			max_packet = rounddown_pow_of_two(max_packet);
1470 			max_packet = clamp_val(max_packet, 8, 64);
1471 		}
1472 	}
1473 	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1474 	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1475 		avg_trb_len = 8;
1476 	/* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1477 	if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1478 		mult = 0;
1479 
1480 	/* Set up the endpoint ring */
1481 	virt_dev->eps[ep_index].new_ring =
1482 		xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1483 	if (!virt_dev->eps[ep_index].new_ring)
1484 		return -ENOMEM;
1485 
1486 	virt_dev->eps[ep_index].skip = false;
1487 	ep_ring = virt_dev->eps[ep_index].new_ring;
1488 
1489 	/* Fill the endpoint context */
1490 	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1491 				      EP_INTERVAL(interval) |
1492 				      EP_MULT(mult));
1493 	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1494 				       MAX_PACKET(max_packet) |
1495 				       MAX_BURST(max_burst) |
1496 				       ERROR_COUNT(err_count));
1497 	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1498 				  ep_ring->cycle_state);
1499 
1500 	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1501 				      EP_AVG_TRB_LENGTH(avg_trb_len));
1502 
1503 	return 0;
1504 }
1505 
1506 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1507 		struct xhci_virt_device *virt_dev,
1508 		struct usb_host_endpoint *ep)
1509 {
1510 	unsigned int ep_index;
1511 	struct xhci_ep_ctx *ep_ctx;
1512 
1513 	ep_index = xhci_get_endpoint_index(&ep->desc);
1514 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1515 
1516 	ep_ctx->ep_info = 0;
1517 	ep_ctx->ep_info2 = 0;
1518 	ep_ctx->deq = 0;
1519 	ep_ctx->tx_info = 0;
1520 	/* Don't free the endpoint ring until the set interface or configuration
1521 	 * request succeeds.
1522 	 */
1523 }
1524 
1525 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1526 {
1527 	bw_info->ep_interval = 0;
1528 	bw_info->mult = 0;
1529 	bw_info->num_packets = 0;
1530 	bw_info->max_packet_size = 0;
1531 	bw_info->type = 0;
1532 	bw_info->max_esit_payload = 0;
1533 }
1534 
1535 void xhci_update_bw_info(struct xhci_hcd *xhci,
1536 		struct xhci_container_ctx *in_ctx,
1537 		struct xhci_input_control_ctx *ctrl_ctx,
1538 		struct xhci_virt_device *virt_dev)
1539 {
1540 	struct xhci_bw_info *bw_info;
1541 	struct xhci_ep_ctx *ep_ctx;
1542 	unsigned int ep_type;
1543 	int i;
1544 
1545 	for (i = 1; i < 31; i++) {
1546 		bw_info = &virt_dev->eps[i].bw_info;
1547 
1548 		/* We can't tell what endpoint type is being dropped, but
1549 		 * unconditionally clearing the bandwidth info for non-periodic
1550 		 * endpoints should be harmless because the info will never be
1551 		 * set in the first place.
1552 		 */
1553 		if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1554 			/* Dropped endpoint */
1555 			xhci_clear_endpoint_bw_info(bw_info);
1556 			continue;
1557 		}
1558 
1559 		if (EP_IS_ADDED(ctrl_ctx, i)) {
1560 			ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1561 			ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1562 
1563 			/* Ignore non-periodic endpoints */
1564 			if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1565 					ep_type != ISOC_IN_EP &&
1566 					ep_type != INT_IN_EP)
1567 				continue;
1568 
1569 			/* Added or changed endpoint */
1570 			bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1571 					le32_to_cpu(ep_ctx->ep_info));
1572 			/* Number of packets and mult are zero-based in the
1573 			 * input context, but we want one-based for the
1574 			 * interval table.
1575 			 */
1576 			bw_info->mult = CTX_TO_EP_MULT(
1577 					le32_to_cpu(ep_ctx->ep_info)) + 1;
1578 			bw_info->num_packets = CTX_TO_MAX_BURST(
1579 					le32_to_cpu(ep_ctx->ep_info2)) + 1;
1580 			bw_info->max_packet_size = MAX_PACKET_DECODED(
1581 					le32_to_cpu(ep_ctx->ep_info2));
1582 			bw_info->type = ep_type;
1583 			bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1584 					le32_to_cpu(ep_ctx->tx_info));
1585 		}
1586 	}
1587 }
1588 
1589 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1590  * Useful when you want to change one particular aspect of the endpoint and then
1591  * issue a configure endpoint command.
1592  */
1593 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1594 		struct xhci_container_ctx *in_ctx,
1595 		struct xhci_container_ctx *out_ctx,
1596 		unsigned int ep_index)
1597 {
1598 	struct xhci_ep_ctx *out_ep_ctx;
1599 	struct xhci_ep_ctx *in_ep_ctx;
1600 
1601 	out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1602 	in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1603 
1604 	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1605 	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1606 	in_ep_ctx->deq = out_ep_ctx->deq;
1607 	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1608 	if (xhci->quirks & XHCI_MTK_HOST) {
1609 		in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1610 		in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1611 	}
1612 }
1613 
1614 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1615  * Useful when you want to change one particular aspect of the endpoint and then
1616  * issue a configure endpoint command.  Only the context entries field matters,
1617  * but we'll copy the whole thing anyway.
1618  */
1619 void xhci_slot_copy(struct xhci_hcd *xhci,
1620 		struct xhci_container_ctx *in_ctx,
1621 		struct xhci_container_ctx *out_ctx)
1622 {
1623 	struct xhci_slot_ctx *in_slot_ctx;
1624 	struct xhci_slot_ctx *out_slot_ctx;
1625 
1626 	in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1627 	out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1628 
1629 	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1630 	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1631 	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1632 	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1633 }
1634 
1635 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1636 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1637 {
1638 	int i;
1639 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1640 	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1641 
1642 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1643 			"Allocating %d scratchpad buffers", num_sp);
1644 
1645 	if (!num_sp)
1646 		return 0;
1647 
1648 	xhci->scratchpad = kzalloc_node(sizeof(*xhci->scratchpad), flags,
1649 				dev_to_node(dev));
1650 	if (!xhci->scratchpad)
1651 		goto fail_sp;
1652 
1653 	xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1654 				     num_sp * sizeof(u64),
1655 				     &xhci->scratchpad->sp_dma, flags);
1656 	if (!xhci->scratchpad->sp_array)
1657 		goto fail_sp2;
1658 
1659 	xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *),
1660 					flags, dev_to_node(dev));
1661 	if (!xhci->scratchpad->sp_buffers)
1662 		goto fail_sp3;
1663 
1664 	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1665 	for (i = 0; i < num_sp; i++) {
1666 		dma_addr_t dma;
1667 		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1668 					       flags);
1669 		if (!buf)
1670 			goto fail_sp4;
1671 
1672 		xhci->scratchpad->sp_array[i] = dma;
1673 		xhci->scratchpad->sp_buffers[i] = buf;
1674 	}
1675 
1676 	return 0;
1677 
1678  fail_sp4:
1679 	for (i = i - 1; i >= 0; i--) {
1680 		dma_free_coherent(dev, xhci->page_size,
1681 				    xhci->scratchpad->sp_buffers[i],
1682 				    xhci->scratchpad->sp_array[i]);
1683 	}
1684 
1685 	kfree(xhci->scratchpad->sp_buffers);
1686 
1687  fail_sp3:
1688 	dma_free_coherent(dev, num_sp * sizeof(u64),
1689 			    xhci->scratchpad->sp_array,
1690 			    xhci->scratchpad->sp_dma);
1691 
1692  fail_sp2:
1693 	kfree(xhci->scratchpad);
1694 	xhci->scratchpad = NULL;
1695 
1696  fail_sp:
1697 	return -ENOMEM;
1698 }
1699 
1700 static void scratchpad_free(struct xhci_hcd *xhci)
1701 {
1702 	int num_sp;
1703 	int i;
1704 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1705 
1706 	if (!xhci->scratchpad)
1707 		return;
1708 
1709 	num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1710 
1711 	for (i = 0; i < num_sp; i++) {
1712 		dma_free_coherent(dev, xhci->page_size,
1713 				    xhci->scratchpad->sp_buffers[i],
1714 				    xhci->scratchpad->sp_array[i]);
1715 	}
1716 	kfree(xhci->scratchpad->sp_buffers);
1717 	dma_free_coherent(dev, num_sp * sizeof(u64),
1718 			    xhci->scratchpad->sp_array,
1719 			    xhci->scratchpad->sp_dma);
1720 	kfree(xhci->scratchpad);
1721 	xhci->scratchpad = NULL;
1722 }
1723 
1724 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1725 		bool allocate_completion, gfp_t mem_flags)
1726 {
1727 	struct xhci_command *command;
1728 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1729 
1730 	command = kzalloc_node(sizeof(*command), mem_flags, dev_to_node(dev));
1731 	if (!command)
1732 		return NULL;
1733 
1734 	if (allocate_completion) {
1735 		command->completion =
1736 			kzalloc_node(sizeof(struct completion), mem_flags,
1737 				dev_to_node(dev));
1738 		if (!command->completion) {
1739 			kfree(command);
1740 			return NULL;
1741 		}
1742 		init_completion(command->completion);
1743 	}
1744 
1745 	command->status = 0;
1746 	INIT_LIST_HEAD(&command->cmd_list);
1747 	return command;
1748 }
1749 
1750 struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
1751 		bool allocate_completion, gfp_t mem_flags)
1752 {
1753 	struct xhci_command *command;
1754 
1755 	command = xhci_alloc_command(xhci, allocate_completion, mem_flags);
1756 	if (!command)
1757 		return NULL;
1758 
1759 	command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1760 						   mem_flags);
1761 	if (!command->in_ctx) {
1762 		kfree(command->completion);
1763 		kfree(command);
1764 		return NULL;
1765 	}
1766 	return command;
1767 }
1768 
1769 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1770 {
1771 	kfree(urb_priv);
1772 }
1773 
1774 void xhci_free_command(struct xhci_hcd *xhci,
1775 		struct xhci_command *command)
1776 {
1777 	xhci_free_container_ctx(xhci,
1778 			command->in_ctx);
1779 	kfree(command->completion);
1780 	kfree(command);
1781 }
1782 
1783 int xhci_alloc_erst(struct xhci_hcd *xhci,
1784 		    struct xhci_ring *evt_ring,
1785 		    struct xhci_erst *erst,
1786 		    gfp_t flags)
1787 {
1788 	size_t size;
1789 	unsigned int val;
1790 	struct xhci_segment *seg;
1791 	struct xhci_erst_entry *entry;
1792 
1793 	size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1794 	erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1795 					   size, &erst->erst_dma_addr, flags);
1796 	if (!erst->entries)
1797 		return -ENOMEM;
1798 
1799 	erst->num_entries = evt_ring->num_segs;
1800 
1801 	seg = evt_ring->first_seg;
1802 	for (val = 0; val < evt_ring->num_segs; val++) {
1803 		entry = &erst->entries[val];
1804 		entry->seg_addr = cpu_to_le64(seg->dma);
1805 		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1806 		entry->rsvd = 0;
1807 		seg = seg->next;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
1814 {
1815 	size_t size;
1816 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1817 
1818 	size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
1819 	if (erst->entries)
1820 		dma_free_coherent(dev, size,
1821 				erst->entries,
1822 				erst->erst_dma_addr);
1823 	erst->entries = NULL;
1824 }
1825 
1826 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1827 {
1828 	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
1829 	int i, j, num_ports;
1830 
1831 	cancel_delayed_work_sync(&xhci->cmd_timer);
1832 
1833 	xhci_free_erst(xhci, &xhci->erst);
1834 
1835 	if (xhci->event_ring)
1836 		xhci_ring_free(xhci, xhci->event_ring);
1837 	xhci->event_ring = NULL;
1838 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1839 
1840 	if (xhci->cmd_ring)
1841 		xhci_ring_free(xhci, xhci->cmd_ring);
1842 	xhci->cmd_ring = NULL;
1843 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1844 	xhci_cleanup_command_queue(xhci);
1845 
1846 	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1847 	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1848 		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1849 		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1850 			struct list_head *ep = &bwt->interval_bw[j].endpoints;
1851 			while (!list_empty(ep))
1852 				list_del_init(ep->next);
1853 		}
1854 	}
1855 
1856 	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1857 		xhci_free_virt_devices_depth_first(xhci, i);
1858 
1859 	dma_pool_destroy(xhci->segment_pool);
1860 	xhci->segment_pool = NULL;
1861 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1862 
1863 	dma_pool_destroy(xhci->device_pool);
1864 	xhci->device_pool = NULL;
1865 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1866 
1867 	dma_pool_destroy(xhci->small_streams_pool);
1868 	xhci->small_streams_pool = NULL;
1869 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1870 			"Freed small stream array pool");
1871 
1872 	dma_pool_destroy(xhci->medium_streams_pool);
1873 	xhci->medium_streams_pool = NULL;
1874 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1875 			"Freed medium stream array pool");
1876 
1877 	if (xhci->dcbaa)
1878 		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1879 				xhci->dcbaa, xhci->dcbaa->dma);
1880 	xhci->dcbaa = NULL;
1881 
1882 	scratchpad_free(xhci);
1883 
1884 	if (!xhci->rh_bw)
1885 		goto no_bw;
1886 
1887 	for (i = 0; i < num_ports; i++) {
1888 		struct xhci_tt_bw_info *tt, *n;
1889 		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1890 			list_del(&tt->tt_list);
1891 			kfree(tt);
1892 		}
1893 	}
1894 
1895 no_bw:
1896 	xhci->cmd_ring_reserved_trbs = 0;
1897 	xhci->usb2_rhub.num_ports = 0;
1898 	xhci->usb3_rhub.num_ports = 0;
1899 	xhci->num_active_eps = 0;
1900 	kfree(xhci->usb2_rhub.ports);
1901 	kfree(xhci->usb3_rhub.ports);
1902 	kfree(xhci->hw_ports);
1903 	kfree(xhci->rh_bw);
1904 	kfree(xhci->ext_caps);
1905 	for (i = 0; i < xhci->num_port_caps; i++)
1906 		kfree(xhci->port_caps[i].psi);
1907 	kfree(xhci->port_caps);
1908 	xhci->num_port_caps = 0;
1909 
1910 	xhci->usb2_rhub.ports = NULL;
1911 	xhci->usb3_rhub.ports = NULL;
1912 	xhci->hw_ports = NULL;
1913 	xhci->rh_bw = NULL;
1914 	xhci->ext_caps = NULL;
1915 	xhci->port_caps = NULL;
1916 
1917 	xhci->page_size = 0;
1918 	xhci->page_shift = 0;
1919 	xhci->usb2_rhub.bus_state.bus_suspended = 0;
1920 	xhci->usb3_rhub.bus_state.bus_suspended = 0;
1921 }
1922 
1923 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1924 		struct xhci_segment *input_seg,
1925 		union xhci_trb *start_trb,
1926 		union xhci_trb *end_trb,
1927 		dma_addr_t input_dma,
1928 		struct xhci_segment *result_seg,
1929 		char *test_name, int test_number)
1930 {
1931 	unsigned long long start_dma;
1932 	unsigned long long end_dma;
1933 	struct xhci_segment *seg;
1934 
1935 	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1936 	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1937 
1938 	seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1939 	if (seg != result_seg) {
1940 		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1941 				test_name, test_number);
1942 		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1943 				"input DMA 0x%llx\n",
1944 				input_seg,
1945 				(unsigned long long) input_dma);
1946 		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1947 				"ending TRB %p (0x%llx DMA)\n",
1948 				start_trb, start_dma,
1949 				end_trb, end_dma);
1950 		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1951 				result_seg, seg);
1952 		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1953 			  true);
1954 		return -1;
1955 	}
1956 	return 0;
1957 }
1958 
1959 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1960 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1961 {
1962 	struct {
1963 		dma_addr_t		input_dma;
1964 		struct xhci_segment	*result_seg;
1965 	} simple_test_vector [] = {
1966 		/* A zeroed DMA field should fail */
1967 		{ 0, NULL },
1968 		/* One TRB before the ring start should fail */
1969 		{ xhci->event_ring->first_seg->dma - 16, NULL },
1970 		/* One byte before the ring start should fail */
1971 		{ xhci->event_ring->first_seg->dma - 1, NULL },
1972 		/* Starting TRB should succeed */
1973 		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1974 		/* Ending TRB should succeed */
1975 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1976 			xhci->event_ring->first_seg },
1977 		/* One byte after the ring end should fail */
1978 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1979 		/* One TRB after the ring end should fail */
1980 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1981 		/* An address of all ones should fail */
1982 		{ (dma_addr_t) (~0), NULL },
1983 	};
1984 	struct {
1985 		struct xhci_segment	*input_seg;
1986 		union xhci_trb		*start_trb;
1987 		union xhci_trb		*end_trb;
1988 		dma_addr_t		input_dma;
1989 		struct xhci_segment	*result_seg;
1990 	} complex_test_vector [] = {
1991 		/* Test feeding a valid DMA address from a different ring */
1992 		{	.input_seg = xhci->event_ring->first_seg,
1993 			.start_trb = xhci->event_ring->first_seg->trbs,
1994 			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1995 			.input_dma = xhci->cmd_ring->first_seg->dma,
1996 			.result_seg = NULL,
1997 		},
1998 		/* Test feeding a valid end TRB from a different ring */
1999 		{	.input_seg = xhci->event_ring->first_seg,
2000 			.start_trb = xhci->event_ring->first_seg->trbs,
2001 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2002 			.input_dma = xhci->cmd_ring->first_seg->dma,
2003 			.result_seg = NULL,
2004 		},
2005 		/* Test feeding a valid start and end TRB from a different ring */
2006 		{	.input_seg = xhci->event_ring->first_seg,
2007 			.start_trb = xhci->cmd_ring->first_seg->trbs,
2008 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2009 			.input_dma = xhci->cmd_ring->first_seg->dma,
2010 			.result_seg = NULL,
2011 		},
2012 		/* TRB in this ring, but after this TD */
2013 		{	.input_seg = xhci->event_ring->first_seg,
2014 			.start_trb = &xhci->event_ring->first_seg->trbs[0],
2015 			.end_trb = &xhci->event_ring->first_seg->trbs[3],
2016 			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
2017 			.result_seg = NULL,
2018 		},
2019 		/* TRB in this ring, but before this TD */
2020 		{	.input_seg = xhci->event_ring->first_seg,
2021 			.start_trb = &xhci->event_ring->first_seg->trbs[3],
2022 			.end_trb = &xhci->event_ring->first_seg->trbs[6],
2023 			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2024 			.result_seg = NULL,
2025 		},
2026 		/* TRB in this ring, but after this wrapped TD */
2027 		{	.input_seg = xhci->event_ring->first_seg,
2028 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2029 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2030 			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
2031 			.result_seg = NULL,
2032 		},
2033 		/* TRB in this ring, but before this wrapped TD */
2034 		{	.input_seg = xhci->event_ring->first_seg,
2035 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2036 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2037 			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2038 			.result_seg = NULL,
2039 		},
2040 		/* TRB not in this ring, and we have a wrapped TD */
2041 		{	.input_seg = xhci->event_ring->first_seg,
2042 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2043 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2044 			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2045 			.result_seg = NULL,
2046 		},
2047 	};
2048 
2049 	unsigned int num_tests;
2050 	int i, ret;
2051 
2052 	num_tests = ARRAY_SIZE(simple_test_vector);
2053 	for (i = 0; i < num_tests; i++) {
2054 		ret = xhci_test_trb_in_td(xhci,
2055 				xhci->event_ring->first_seg,
2056 				xhci->event_ring->first_seg->trbs,
2057 				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2058 				simple_test_vector[i].input_dma,
2059 				simple_test_vector[i].result_seg,
2060 				"Simple", i);
2061 		if (ret < 0)
2062 			return ret;
2063 	}
2064 
2065 	num_tests = ARRAY_SIZE(complex_test_vector);
2066 	for (i = 0; i < num_tests; i++) {
2067 		ret = xhci_test_trb_in_td(xhci,
2068 				complex_test_vector[i].input_seg,
2069 				complex_test_vector[i].start_trb,
2070 				complex_test_vector[i].end_trb,
2071 				complex_test_vector[i].input_dma,
2072 				complex_test_vector[i].result_seg,
2073 				"Complex", i);
2074 		if (ret < 0)
2075 			return ret;
2076 	}
2077 	xhci_dbg(xhci, "TRB math tests passed.\n");
2078 	return 0;
2079 }
2080 
2081 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2082 {
2083 	u64 temp;
2084 	dma_addr_t deq;
2085 
2086 	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2087 			xhci->event_ring->dequeue);
2088 	if (!deq)
2089 		xhci_warn(xhci, "WARN something wrong with SW event ring "
2090 				"dequeue ptr.\n");
2091 	/* Update HC event ring dequeue pointer */
2092 	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2093 	temp &= ERST_PTR_MASK;
2094 	/* Don't clear the EHB bit (which is RW1C) because
2095 	 * there might be more events to service.
2096 	 */
2097 	temp &= ~ERST_EHB;
2098 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2099 			"// Write event ring dequeue pointer, "
2100 			"preserving EHB bit");
2101 	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2102 			&xhci->ir_set->erst_dequeue);
2103 }
2104 
2105 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2106 		__le32 __iomem *addr, int max_caps)
2107 {
2108 	u32 temp, port_offset, port_count;
2109 	int i;
2110 	u8 major_revision, minor_revision;
2111 	struct xhci_hub *rhub;
2112 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2113 	struct xhci_port_cap *port_cap;
2114 
2115 	temp = readl(addr);
2116 	major_revision = XHCI_EXT_PORT_MAJOR(temp);
2117 	minor_revision = XHCI_EXT_PORT_MINOR(temp);
2118 
2119 	if (major_revision == 0x03) {
2120 		rhub = &xhci->usb3_rhub;
2121 		/*
2122 		 * Some hosts incorrectly use sub-minor version for minor
2123 		 * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
2124 		 * for bcdUSB 0x310). Since there is no USB release with sub
2125 		 * minor version 0x301 to 0x309, we can assume that they are
2126 		 * incorrect and fix it here.
2127 		 */
2128 		if (minor_revision > 0x00 && minor_revision < 0x10)
2129 			minor_revision <<= 4;
2130 	} else if (major_revision <= 0x02) {
2131 		rhub = &xhci->usb2_rhub;
2132 	} else {
2133 		xhci_warn(xhci, "Ignoring unknown port speed, "
2134 				"Ext Cap %p, revision = 0x%x\n",
2135 				addr, major_revision);
2136 		/* Ignoring port protocol we can't understand. FIXME */
2137 		return;
2138 	}
2139 	rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2140 
2141 	if (rhub->min_rev < minor_revision)
2142 		rhub->min_rev = minor_revision;
2143 
2144 	/* Port offset and count in the third dword, see section 7.2 */
2145 	temp = readl(addr + 2);
2146 	port_offset = XHCI_EXT_PORT_OFF(temp);
2147 	port_count = XHCI_EXT_PORT_COUNT(temp);
2148 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2149 			"Ext Cap %p, port offset = %u, "
2150 			"count = %u, revision = 0x%x",
2151 			addr, port_offset, port_count, major_revision);
2152 	/* Port count includes the current port offset */
2153 	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2154 		/* WTF? "Valid values are ‘1’ to MaxPorts" */
2155 		return;
2156 
2157 	port_cap = &xhci->port_caps[xhci->num_port_caps++];
2158 	if (xhci->num_port_caps > max_caps)
2159 		return;
2160 
2161 	port_cap->maj_rev = major_revision;
2162 	port_cap->min_rev = minor_revision;
2163 	port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
2164 
2165 	if (port_cap->psi_count) {
2166 		port_cap->psi = kcalloc_node(port_cap->psi_count,
2167 					     sizeof(*port_cap->psi),
2168 					     GFP_KERNEL, dev_to_node(dev));
2169 		if (!port_cap->psi)
2170 			port_cap->psi_count = 0;
2171 
2172 		port_cap->psi_uid_count++;
2173 		for (i = 0; i < port_cap->psi_count; i++) {
2174 			port_cap->psi[i] = readl(addr + 4 + i);
2175 
2176 			/* count unique ID values, two consecutive entries can
2177 			 * have the same ID if link is assymetric
2178 			 */
2179 			if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
2180 				  XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
2181 				port_cap->psi_uid_count++;
2182 
2183 			xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2184 				  XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
2185 				  XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
2186 				  XHCI_EXT_PORT_PLT(port_cap->psi[i]),
2187 				  XHCI_EXT_PORT_PFD(port_cap->psi[i]),
2188 				  XHCI_EXT_PORT_LP(port_cap->psi[i]),
2189 				  XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
2190 		}
2191 	}
2192 	/* cache usb2 port capabilities */
2193 	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2194 		xhci->ext_caps[xhci->num_ext_caps++] = temp;
2195 
2196 	if ((xhci->hci_version >= 0x100) && (major_revision != 0x03) &&
2197 		 (temp & XHCI_HLC)) {
2198 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2199 			       "xHCI 1.0: support USB2 hardware lpm");
2200 		xhci->hw_lpm_support = 1;
2201 	}
2202 
2203 	port_offset--;
2204 	for (i = port_offset; i < (port_offset + port_count); i++) {
2205 		struct xhci_port *hw_port = &xhci->hw_ports[i];
2206 		/* Duplicate entry.  Ignore the port if the revisions differ. */
2207 		if (hw_port->rhub) {
2208 			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2209 					" port %u\n", addr, i);
2210 			xhci_warn(xhci, "Port was marked as USB %u, "
2211 					"duplicated as USB %u\n",
2212 					hw_port->rhub->maj_rev, major_revision);
2213 			/* Only adjust the roothub port counts if we haven't
2214 			 * found a similar duplicate.
2215 			 */
2216 			if (hw_port->rhub != rhub &&
2217 				 hw_port->hcd_portnum != DUPLICATE_ENTRY) {
2218 				hw_port->rhub->num_ports--;
2219 				hw_port->hcd_portnum = DUPLICATE_ENTRY;
2220 			}
2221 			continue;
2222 		}
2223 		hw_port->rhub = rhub;
2224 		hw_port->port_cap = port_cap;
2225 		rhub->num_ports++;
2226 	}
2227 	/* FIXME: Should we disable ports not in the Extended Capabilities? */
2228 }
2229 
2230 static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
2231 					struct xhci_hub *rhub, gfp_t flags)
2232 {
2233 	int port_index = 0;
2234 	int i;
2235 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2236 
2237 	if (!rhub->num_ports)
2238 		return;
2239 	rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
2240 			flags, dev_to_node(dev));
2241 	if (!rhub->ports)
2242 		return;
2243 
2244 	for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
2245 		if (xhci->hw_ports[i].rhub != rhub ||
2246 		    xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
2247 			continue;
2248 		xhci->hw_ports[i].hcd_portnum = port_index;
2249 		rhub->ports[port_index] = &xhci->hw_ports[i];
2250 		port_index++;
2251 		if (port_index == rhub->num_ports)
2252 			break;
2253 	}
2254 }
2255 
2256 /*
2257  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2258  * specify what speeds each port is supposed to be.  We can't count on the port
2259  * speed bits in the PORTSC register being correct until a device is connected,
2260  * but we need to set up the two fake roothubs with the correct number of USB
2261  * 3.0 and USB 2.0 ports at host controller initialization time.
2262  */
2263 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2264 {
2265 	void __iomem *base;
2266 	u32 offset;
2267 	unsigned int num_ports;
2268 	int i, j;
2269 	int cap_count = 0;
2270 	u32 cap_start;
2271 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2272 
2273 	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2274 	xhci->hw_ports = kcalloc_node(num_ports, sizeof(*xhci->hw_ports),
2275 				flags, dev_to_node(dev));
2276 	if (!xhci->hw_ports)
2277 		return -ENOMEM;
2278 
2279 	for (i = 0; i < num_ports; i++) {
2280 		xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
2281 			NUM_PORT_REGS * i;
2282 		xhci->hw_ports[i].hw_portnum = i;
2283 	}
2284 
2285 	xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
2286 				   dev_to_node(dev));
2287 	if (!xhci->rh_bw)
2288 		return -ENOMEM;
2289 	for (i = 0; i < num_ports; i++) {
2290 		struct xhci_interval_bw_table *bw_table;
2291 
2292 		INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2293 		bw_table = &xhci->rh_bw[i].bw_table;
2294 		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2295 			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2296 	}
2297 	base = &xhci->cap_regs->hc_capbase;
2298 
2299 	cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2300 	if (!cap_start) {
2301 		xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2302 		return -ENODEV;
2303 	}
2304 
2305 	offset = cap_start;
2306 	/* count extended protocol capability entries for later caching */
2307 	while (offset) {
2308 		cap_count++;
2309 		offset = xhci_find_next_ext_cap(base, offset,
2310 						      XHCI_EXT_CAPS_PROTOCOL);
2311 	}
2312 
2313 	xhci->ext_caps = kcalloc_node(cap_count, sizeof(*xhci->ext_caps),
2314 				flags, dev_to_node(dev));
2315 	if (!xhci->ext_caps)
2316 		return -ENOMEM;
2317 
2318 	xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
2319 				flags, dev_to_node(dev));
2320 	if (!xhci->port_caps)
2321 		return -ENOMEM;
2322 
2323 	offset = cap_start;
2324 
2325 	while (offset) {
2326 		xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2327 		if (xhci->usb2_rhub.num_ports + xhci->usb3_rhub.num_ports ==
2328 		    num_ports)
2329 			break;
2330 		offset = xhci_find_next_ext_cap(base, offset,
2331 						XHCI_EXT_CAPS_PROTOCOL);
2332 	}
2333 	if (xhci->usb2_rhub.num_ports == 0 && xhci->usb3_rhub.num_ports == 0) {
2334 		xhci_warn(xhci, "No ports on the roothubs?\n");
2335 		return -ENODEV;
2336 	}
2337 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2338 		       "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2339 		       xhci->usb2_rhub.num_ports, xhci->usb3_rhub.num_ports);
2340 
2341 	/* Place limits on the number of roothub ports so that the hub
2342 	 * descriptors aren't longer than the USB core will allocate.
2343 	 */
2344 	if (xhci->usb3_rhub.num_ports > USB_SS_MAXPORTS) {
2345 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2346 				"Limiting USB 3.0 roothub ports to %u.",
2347 				USB_SS_MAXPORTS);
2348 		xhci->usb3_rhub.num_ports = USB_SS_MAXPORTS;
2349 	}
2350 	if (xhci->usb2_rhub.num_ports > USB_MAXCHILDREN) {
2351 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2352 				"Limiting USB 2.0 roothub ports to %u.",
2353 				USB_MAXCHILDREN);
2354 		xhci->usb2_rhub.num_ports = USB_MAXCHILDREN;
2355 	}
2356 
2357 	if (!xhci->usb2_rhub.num_ports)
2358 		xhci_info(xhci, "USB2 root hub has no ports\n");
2359 
2360 	if (!xhci->usb3_rhub.num_ports)
2361 		xhci_info(xhci, "USB3 root hub has no ports\n");
2362 
2363 	xhci_create_rhub_port_array(xhci, &xhci->usb2_rhub, flags);
2364 	xhci_create_rhub_port_array(xhci, &xhci->usb3_rhub, flags);
2365 
2366 	return 0;
2367 }
2368 
2369 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2370 {
2371 	dma_addr_t	dma;
2372 	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
2373 	unsigned int	val, val2;
2374 	u64		val_64;
2375 	u32		page_size, temp;
2376 	int		i, ret;
2377 
2378 	INIT_LIST_HEAD(&xhci->cmd_list);
2379 
2380 	/* init command timeout work */
2381 	INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2382 	init_completion(&xhci->cmd_ring_stop_completion);
2383 
2384 	page_size = readl(&xhci->op_regs->page_size);
2385 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2386 			"Supported page size register = 0x%x", page_size);
2387 	i = ffs(page_size);
2388 	if (i < 16)
2389 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2390 			"Supported page size of %iK", (1 << (i+12)) / 1024);
2391 	else
2392 		xhci_warn(xhci, "WARN: no supported page size\n");
2393 	/* Use 4K pages, since that's common and the minimum the HC supports */
2394 	xhci->page_shift = 12;
2395 	xhci->page_size = 1 << xhci->page_shift;
2396 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2397 			"HCD page size set to %iK", xhci->page_size / 1024);
2398 
2399 	/*
2400 	 * Program the Number of Device Slots Enabled field in the CONFIG
2401 	 * register with the max value of slots the HC can handle.
2402 	 */
2403 	val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2404 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2405 			"// xHC can handle at most %d device slots.", val);
2406 	val2 = readl(&xhci->op_regs->config_reg);
2407 	val |= (val2 & ~HCS_SLOTS_MASK);
2408 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2409 			"// Setting Max device slots reg = 0x%x.", val);
2410 	writel(val, &xhci->op_regs->config_reg);
2411 
2412 	/*
2413 	 * xHCI section 5.4.6 - Device Context array must be
2414 	 * "physically contiguous and 64-byte (cache line) aligned".
2415 	 */
2416 	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2417 			flags);
2418 	if (!xhci->dcbaa)
2419 		goto fail;
2420 	xhci->dcbaa->dma = dma;
2421 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2422 			"// Device context base array address = 0x%llx (DMA), %p (virt)",
2423 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2424 	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2425 
2426 	/*
2427 	 * Initialize the ring segment pool.  The ring must be a contiguous
2428 	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2429 	 * however, the command ring segment needs 64-byte aligned segments
2430 	 * and our use of dma addresses in the trb_address_map radix tree needs
2431 	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2432 	 */
2433 	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2434 			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2435 
2436 	/* See Table 46 and Note on Figure 55 */
2437 	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2438 			2112, 64, xhci->page_size);
2439 	if (!xhci->segment_pool || !xhci->device_pool)
2440 		goto fail;
2441 
2442 	/* Linear stream context arrays don't have any boundary restrictions,
2443 	 * and only need to be 16-byte aligned.
2444 	 */
2445 	xhci->small_streams_pool =
2446 		dma_pool_create("xHCI 256 byte stream ctx arrays",
2447 			dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2448 	xhci->medium_streams_pool =
2449 		dma_pool_create("xHCI 1KB stream ctx arrays",
2450 			dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2451 	/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2452 	 * will be allocated with dma_alloc_coherent()
2453 	 */
2454 
2455 	if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2456 		goto fail;
2457 
2458 	/* Set up the command ring to have one segments for now. */
2459 	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2460 	if (!xhci->cmd_ring)
2461 		goto fail;
2462 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2463 			"Allocated command ring at %p", xhci->cmd_ring);
2464 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2465 			(unsigned long long)xhci->cmd_ring->first_seg->dma);
2466 
2467 	/* Set the address in the Command Ring Control register */
2468 	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2469 	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2470 		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2471 		xhci->cmd_ring->cycle_state;
2472 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2473 			"// Setting command ring address to 0x%016llx", val_64);
2474 	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2475 
2476 	/* Reserve one command ring TRB for disabling LPM.
2477 	 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2478 	 * disabling LPM, we only need to reserve one TRB for all devices.
2479 	 */
2480 	xhci->cmd_ring_reserved_trbs++;
2481 
2482 	val = readl(&xhci->cap_regs->db_off);
2483 	val &= DBOFF_MASK;
2484 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2485 			"// Doorbell array is located at offset 0x%x"
2486 			" from cap regs base addr", val);
2487 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
2488 	/* Set ir_set to interrupt register set 0 */
2489 	xhci->ir_set = &xhci->run_regs->ir_set[0];
2490 
2491 	/*
2492 	 * Event ring setup: Allocate a normal ring, but also setup
2493 	 * the event ring segment table (ERST).  Section 4.9.3.
2494 	 */
2495 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2496 	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2497 					0, flags);
2498 	if (!xhci->event_ring)
2499 		goto fail;
2500 	if (xhci_check_trb_in_td_math(xhci) < 0)
2501 		goto fail;
2502 
2503 	ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
2504 	if (ret)
2505 		goto fail;
2506 
2507 	/* set ERST count with the number of entries in the segment table */
2508 	val = readl(&xhci->ir_set->erst_size);
2509 	val &= ERST_SIZE_MASK;
2510 	val |= ERST_NUM_SEGS;
2511 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2512 			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
2513 			val);
2514 	writel(val, &xhci->ir_set->erst_size);
2515 
2516 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2517 			"// Set ERST entries to point to event ring.");
2518 	/* set the segment table base address */
2519 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2520 			"// Set ERST base address for ir_set 0 = 0x%llx",
2521 			(unsigned long long)xhci->erst.erst_dma_addr);
2522 	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2523 	val_64 &= ERST_PTR_MASK;
2524 	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2525 	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2526 
2527 	/* Set the event ring dequeue address */
2528 	xhci_set_hc_event_deq(xhci);
2529 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2530 			"Wrote ERST address to ir_set 0.");
2531 
2532 	xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
2533 
2534 	/*
2535 	 * XXX: Might need to set the Interrupter Moderation Register to
2536 	 * something other than the default (~1ms minimum between interrupts).
2537 	 * See section 5.5.1.2.
2538 	 */
2539 	for (i = 0; i < MAX_HC_SLOTS; i++)
2540 		xhci->devs[i] = NULL;
2541 	for (i = 0; i < USB_MAXCHILDREN; i++) {
2542 		xhci->usb2_rhub.bus_state.resume_done[i] = 0;
2543 		xhci->usb3_rhub.bus_state.resume_done[i] = 0;
2544 		/* Only the USB 2.0 completions will ever be used. */
2545 		init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
2546 		init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
2547 	}
2548 
2549 	if (scratchpad_alloc(xhci, flags))
2550 		goto fail;
2551 	if (xhci_setup_port_arrays(xhci, flags))
2552 		goto fail;
2553 
2554 	/* Enable USB 3.0 device notifications for function remote wake, which
2555 	 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2556 	 * U3 (device suspend).
2557 	 */
2558 	temp = readl(&xhci->op_regs->dev_notification);
2559 	temp &= ~DEV_NOTE_MASK;
2560 	temp |= DEV_NOTE_FWAKE;
2561 	writel(temp, &xhci->op_regs->dev_notification);
2562 
2563 	return 0;
2564 
2565 fail:
2566 	xhci_halt(xhci);
2567 	xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
2568 	xhci_mem_cleanup(xhci);
2569 	return -ENOMEM;
2570 }
2571