xref: /openbmc/linux/drivers/usb/host/xhci-mem.c (revision 8c0b9ee8)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma-mapping.h>
28 
29 #include "xhci.h"
30 #include "xhci-trace.h"
31 
32 /*
33  * Allocates a generic ring segment from the ring pool, sets the dma address,
34  * initializes the segment to zero, and sets the private next pointer to NULL.
35  *
36  * Section 4.11.1.1:
37  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
38  */
39 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
40 					unsigned int cycle_state, gfp_t flags)
41 {
42 	struct xhci_segment *seg;
43 	dma_addr_t	dma;
44 	int		i;
45 
46 	seg = kzalloc(sizeof *seg, flags);
47 	if (!seg)
48 		return NULL;
49 
50 	seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
51 	if (!seg->trbs) {
52 		kfree(seg);
53 		return NULL;
54 	}
55 
56 	memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
57 	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
58 	if (cycle_state == 0) {
59 		for (i = 0; i < TRBS_PER_SEGMENT; i++)
60 			seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
61 	}
62 	seg->dma = dma;
63 	seg->next = NULL;
64 
65 	return seg;
66 }
67 
68 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
69 {
70 	if (seg->trbs) {
71 		dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
72 		seg->trbs = NULL;
73 	}
74 	kfree(seg);
75 }
76 
77 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
78 				struct xhci_segment *first)
79 {
80 	struct xhci_segment *seg;
81 
82 	seg = first->next;
83 	while (seg != first) {
84 		struct xhci_segment *next = seg->next;
85 		xhci_segment_free(xhci, seg);
86 		seg = next;
87 	}
88 	xhci_segment_free(xhci, first);
89 }
90 
91 /*
92  * Make the prev segment point to the next segment.
93  *
94  * Change the last TRB in the prev segment to be a Link TRB which points to the
95  * DMA address of the next segment.  The caller needs to set any Link TRB
96  * related flags, such as End TRB, Toggle Cycle, and no snoop.
97  */
98 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
99 		struct xhci_segment *next, enum xhci_ring_type type)
100 {
101 	u32 val;
102 
103 	if (!prev || !next)
104 		return;
105 	prev->next = next;
106 	if (type != TYPE_EVENT) {
107 		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
108 			cpu_to_le64(next->dma);
109 
110 		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
111 		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
112 		val &= ~TRB_TYPE_BITMASK;
113 		val |= TRB_TYPE(TRB_LINK);
114 		/* Always set the chain bit with 0.95 hardware */
115 		/* Set chain bit for isoc rings on AMD 0.96 host */
116 		if (xhci_link_trb_quirk(xhci) ||
117 				(type == TYPE_ISOC &&
118 				 (xhci->quirks & XHCI_AMD_0x96_HOST)))
119 			val |= TRB_CHAIN;
120 		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
121 	}
122 }
123 
124 /*
125  * Link the ring to the new segments.
126  * Set Toggle Cycle for the new ring if needed.
127  */
128 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
129 		struct xhci_segment *first, struct xhci_segment *last,
130 		unsigned int num_segs)
131 {
132 	struct xhci_segment *next;
133 
134 	if (!ring || !first || !last)
135 		return;
136 
137 	next = ring->enq_seg->next;
138 	xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
139 	xhci_link_segments(xhci, last, next, ring->type);
140 	ring->num_segs += num_segs;
141 	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
142 
143 	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
144 		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
145 			&= ~cpu_to_le32(LINK_TOGGLE);
146 		last->trbs[TRBS_PER_SEGMENT-1].link.control
147 			|= cpu_to_le32(LINK_TOGGLE);
148 		ring->last_seg = last;
149 	}
150 }
151 
152 /*
153  * We need a radix tree for mapping physical addresses of TRBs to which stream
154  * ID they belong to.  We need to do this because the host controller won't tell
155  * us which stream ring the TRB came from.  We could store the stream ID in an
156  * event data TRB, but that doesn't help us for the cancellation case, since the
157  * endpoint may stop before it reaches that event data TRB.
158  *
159  * The radix tree maps the upper portion of the TRB DMA address to a ring
160  * segment that has the same upper portion of DMA addresses.  For example, say I
161  * have segments of size 1KB, that are always 1KB aligned.  A segment may
162  * start at 0x10c91000 and end at 0x10c913f0.  If I use the upper 10 bits, the
163  * key to the stream ID is 0x43244.  I can use the DMA address of the TRB to
164  * pass the radix tree a key to get the right stream ID:
165  *
166  *	0x10c90fff >> 10 = 0x43243
167  *	0x10c912c0 >> 10 = 0x43244
168  *	0x10c91400 >> 10 = 0x43245
169  *
170  * Obviously, only those TRBs with DMA addresses that are within the segment
171  * will make the radix tree return the stream ID for that ring.
172  *
173  * Caveats for the radix tree:
174  *
175  * The radix tree uses an unsigned long as a key pair.  On 32-bit systems, an
176  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
177  * 64-bits.  Since we only request 32-bit DMA addresses, we can use that as the
178  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
179  * PCI DMA addresses on a 64-bit system).  There might be a problem on 32-bit
180  * extended systems (where the DMA address can be bigger than 32-bits),
181  * if we allow the PCI dma mask to be bigger than 32-bits.  So don't do that.
182  */
183 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
184 		struct xhci_ring *ring,
185 		struct xhci_segment *seg,
186 		gfp_t mem_flags)
187 {
188 	unsigned long key;
189 	int ret;
190 
191 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
192 	/* Skip any segments that were already added. */
193 	if (radix_tree_lookup(trb_address_map, key))
194 		return 0;
195 
196 	ret = radix_tree_maybe_preload(mem_flags);
197 	if (ret)
198 		return ret;
199 	ret = radix_tree_insert(trb_address_map,
200 			key, ring);
201 	radix_tree_preload_end();
202 	return ret;
203 }
204 
205 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
206 		struct xhci_segment *seg)
207 {
208 	unsigned long key;
209 
210 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
211 	if (radix_tree_lookup(trb_address_map, key))
212 		radix_tree_delete(trb_address_map, key);
213 }
214 
215 static int xhci_update_stream_segment_mapping(
216 		struct radix_tree_root *trb_address_map,
217 		struct xhci_ring *ring,
218 		struct xhci_segment *first_seg,
219 		struct xhci_segment *last_seg,
220 		gfp_t mem_flags)
221 {
222 	struct xhci_segment *seg;
223 	struct xhci_segment *failed_seg;
224 	int ret;
225 
226 	if (WARN_ON_ONCE(trb_address_map == NULL))
227 		return 0;
228 
229 	seg = first_seg;
230 	do {
231 		ret = xhci_insert_segment_mapping(trb_address_map,
232 				ring, seg, mem_flags);
233 		if (ret)
234 			goto remove_streams;
235 		if (seg == last_seg)
236 			return 0;
237 		seg = seg->next;
238 	} while (seg != first_seg);
239 
240 	return 0;
241 
242 remove_streams:
243 	failed_seg = seg;
244 	seg = first_seg;
245 	do {
246 		xhci_remove_segment_mapping(trb_address_map, seg);
247 		if (seg == failed_seg)
248 			return ret;
249 		seg = seg->next;
250 	} while (seg != first_seg);
251 
252 	return ret;
253 }
254 
255 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
256 {
257 	struct xhci_segment *seg;
258 
259 	if (WARN_ON_ONCE(ring->trb_address_map == NULL))
260 		return;
261 
262 	seg = ring->first_seg;
263 	do {
264 		xhci_remove_segment_mapping(ring->trb_address_map, seg);
265 		seg = seg->next;
266 	} while (seg != ring->first_seg);
267 }
268 
269 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
270 {
271 	return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
272 			ring->first_seg, ring->last_seg, mem_flags);
273 }
274 
275 /* XXX: Do we need the hcd structure in all these functions? */
276 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
277 {
278 	if (!ring)
279 		return;
280 
281 	if (ring->first_seg) {
282 		if (ring->type == TYPE_STREAM)
283 			xhci_remove_stream_mapping(ring);
284 		xhci_free_segments_for_ring(xhci, ring->first_seg);
285 	}
286 
287 	kfree(ring);
288 }
289 
290 static void xhci_initialize_ring_info(struct xhci_ring *ring,
291 					unsigned int cycle_state)
292 {
293 	/* The ring is empty, so the enqueue pointer == dequeue pointer */
294 	ring->enqueue = ring->first_seg->trbs;
295 	ring->enq_seg = ring->first_seg;
296 	ring->dequeue = ring->enqueue;
297 	ring->deq_seg = ring->first_seg;
298 	/* The ring is initialized to 0. The producer must write 1 to the cycle
299 	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
300 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
301 	 *
302 	 * New rings are initialized with cycle state equal to 1; if we are
303 	 * handling ring expansion, set the cycle state equal to the old ring.
304 	 */
305 	ring->cycle_state = cycle_state;
306 	/* Not necessary for new rings, but needed for re-initialized rings */
307 	ring->enq_updates = 0;
308 	ring->deq_updates = 0;
309 
310 	/*
311 	 * Each segment has a link TRB, and leave an extra TRB for SW
312 	 * accounting purpose
313 	 */
314 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
315 }
316 
317 /* Allocate segments and link them for a ring */
318 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
319 		struct xhci_segment **first, struct xhci_segment **last,
320 		unsigned int num_segs, unsigned int cycle_state,
321 		enum xhci_ring_type type, gfp_t flags)
322 {
323 	struct xhci_segment *prev;
324 
325 	prev = xhci_segment_alloc(xhci, cycle_state, flags);
326 	if (!prev)
327 		return -ENOMEM;
328 	num_segs--;
329 
330 	*first = prev;
331 	while (num_segs > 0) {
332 		struct xhci_segment	*next;
333 
334 		next = xhci_segment_alloc(xhci, cycle_state, flags);
335 		if (!next) {
336 			prev = *first;
337 			while (prev) {
338 				next = prev->next;
339 				xhci_segment_free(xhci, prev);
340 				prev = next;
341 			}
342 			return -ENOMEM;
343 		}
344 		xhci_link_segments(xhci, prev, next, type);
345 
346 		prev = next;
347 		num_segs--;
348 	}
349 	xhci_link_segments(xhci, prev, *first, type);
350 	*last = prev;
351 
352 	return 0;
353 }
354 
355 /**
356  * Create a new ring with zero or more segments.
357  *
358  * Link each segment together into a ring.
359  * Set the end flag and the cycle toggle bit on the last segment.
360  * See section 4.9.1 and figures 15 and 16.
361  */
362 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
363 		unsigned int num_segs, unsigned int cycle_state,
364 		enum xhci_ring_type type, gfp_t flags)
365 {
366 	struct xhci_ring	*ring;
367 	int ret;
368 
369 	ring = kzalloc(sizeof *(ring), flags);
370 	if (!ring)
371 		return NULL;
372 
373 	ring->num_segs = num_segs;
374 	INIT_LIST_HEAD(&ring->td_list);
375 	ring->type = type;
376 	if (num_segs == 0)
377 		return ring;
378 
379 	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
380 			&ring->last_seg, num_segs, cycle_state, type, flags);
381 	if (ret)
382 		goto fail;
383 
384 	/* Only event ring does not use link TRB */
385 	if (type != TYPE_EVENT) {
386 		/* See section 4.9.2.1 and 6.4.4.1 */
387 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
388 			cpu_to_le32(LINK_TOGGLE);
389 	}
390 	xhci_initialize_ring_info(ring, cycle_state);
391 	return ring;
392 
393 fail:
394 	kfree(ring);
395 	return NULL;
396 }
397 
398 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
399 		struct xhci_virt_device *virt_dev,
400 		unsigned int ep_index)
401 {
402 	int rings_cached;
403 
404 	rings_cached = virt_dev->num_rings_cached;
405 	if (rings_cached < XHCI_MAX_RINGS_CACHED) {
406 		virt_dev->ring_cache[rings_cached] =
407 			virt_dev->eps[ep_index].ring;
408 		virt_dev->num_rings_cached++;
409 		xhci_dbg(xhci, "Cached old ring, "
410 				"%d ring%s cached\n",
411 				virt_dev->num_rings_cached,
412 				(virt_dev->num_rings_cached > 1) ? "s" : "");
413 	} else {
414 		xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
415 		xhci_dbg(xhci, "Ring cache full (%d rings), "
416 				"freeing ring\n",
417 				virt_dev->num_rings_cached);
418 	}
419 	virt_dev->eps[ep_index].ring = NULL;
420 }
421 
422 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
423  * pointers to the beginning of the ring.
424  */
425 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
426 			struct xhci_ring *ring, unsigned int cycle_state,
427 			enum xhci_ring_type type)
428 {
429 	struct xhci_segment	*seg = ring->first_seg;
430 	int i;
431 
432 	do {
433 		memset(seg->trbs, 0,
434 				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
435 		if (cycle_state == 0) {
436 			for (i = 0; i < TRBS_PER_SEGMENT; i++)
437 				seg->trbs[i].link.control |=
438 					cpu_to_le32(TRB_CYCLE);
439 		}
440 		/* All endpoint rings have link TRBs */
441 		xhci_link_segments(xhci, seg, seg->next, type);
442 		seg = seg->next;
443 	} while (seg != ring->first_seg);
444 	ring->type = type;
445 	xhci_initialize_ring_info(ring, cycle_state);
446 	/* td list should be empty since all URBs have been cancelled,
447 	 * but just in case...
448 	 */
449 	INIT_LIST_HEAD(&ring->td_list);
450 }
451 
452 /*
453  * Expand an existing ring.
454  * Look for a cached ring or allocate a new ring which has same segment numbers
455  * and link the two rings.
456  */
457 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
458 				unsigned int num_trbs, gfp_t flags)
459 {
460 	struct xhci_segment	*first;
461 	struct xhci_segment	*last;
462 	unsigned int		num_segs;
463 	unsigned int		num_segs_needed;
464 	int			ret;
465 
466 	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
467 				(TRBS_PER_SEGMENT - 1);
468 
469 	/* Allocate number of segments we needed, or double the ring size */
470 	num_segs = ring->num_segs > num_segs_needed ?
471 			ring->num_segs : num_segs_needed;
472 
473 	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
474 			num_segs, ring->cycle_state, ring->type, flags);
475 	if (ret)
476 		return -ENOMEM;
477 
478 	if (ring->type == TYPE_STREAM)
479 		ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
480 						ring, first, last, flags);
481 	if (ret) {
482 		struct xhci_segment *next;
483 		do {
484 			next = first->next;
485 			xhci_segment_free(xhci, first);
486 			if (first == last)
487 				break;
488 			first = next;
489 		} while (true);
490 		return ret;
491 	}
492 
493 	xhci_link_rings(xhci, ring, first, last, num_segs);
494 	xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
495 			"ring expansion succeed, now has %d segments",
496 			ring->num_segs);
497 
498 	return 0;
499 }
500 
501 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
502 
503 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
504 						    int type, gfp_t flags)
505 {
506 	struct xhci_container_ctx *ctx;
507 
508 	if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
509 		return NULL;
510 
511 	ctx = kzalloc(sizeof(*ctx), flags);
512 	if (!ctx)
513 		return NULL;
514 
515 	ctx->type = type;
516 	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
517 	if (type == XHCI_CTX_TYPE_INPUT)
518 		ctx->size += CTX_SIZE(xhci->hcc_params);
519 
520 	ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
521 	if (!ctx->bytes) {
522 		kfree(ctx);
523 		return NULL;
524 	}
525 	memset(ctx->bytes, 0, ctx->size);
526 	return ctx;
527 }
528 
529 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
530 			     struct xhci_container_ctx *ctx)
531 {
532 	if (!ctx)
533 		return;
534 	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
535 	kfree(ctx);
536 }
537 
538 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
539 					      struct xhci_container_ctx *ctx)
540 {
541 	if (ctx->type != XHCI_CTX_TYPE_INPUT)
542 		return NULL;
543 
544 	return (struct xhci_input_control_ctx *)ctx->bytes;
545 }
546 
547 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
548 					struct xhci_container_ctx *ctx)
549 {
550 	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
551 		return (struct xhci_slot_ctx *)ctx->bytes;
552 
553 	return (struct xhci_slot_ctx *)
554 		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
555 }
556 
557 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
558 				    struct xhci_container_ctx *ctx,
559 				    unsigned int ep_index)
560 {
561 	/* increment ep index by offset of start of ep ctx array */
562 	ep_index++;
563 	if (ctx->type == XHCI_CTX_TYPE_INPUT)
564 		ep_index++;
565 
566 	return (struct xhci_ep_ctx *)
567 		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
568 }
569 
570 
571 /***************** Streams structures manipulation *************************/
572 
573 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
574 		unsigned int num_stream_ctxs,
575 		struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
576 {
577 	struct device *dev = xhci_to_hcd(xhci)->self.controller;
578 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
579 
580 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
581 		dma_free_coherent(dev, size,
582 				stream_ctx, dma);
583 	else if (size <= SMALL_STREAM_ARRAY_SIZE)
584 		return dma_pool_free(xhci->small_streams_pool,
585 				stream_ctx, dma);
586 	else
587 		return dma_pool_free(xhci->medium_streams_pool,
588 				stream_ctx, dma);
589 }
590 
591 /*
592  * The stream context array for each endpoint with bulk streams enabled can
593  * vary in size, based on:
594  *  - how many streams the endpoint supports,
595  *  - the maximum primary stream array size the host controller supports,
596  *  - and how many streams the device driver asks for.
597  *
598  * The stream context array must be a power of 2, and can be as small as
599  * 64 bytes or as large as 1MB.
600  */
601 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
602 		unsigned int num_stream_ctxs, dma_addr_t *dma,
603 		gfp_t mem_flags)
604 {
605 	struct device *dev = xhci_to_hcd(xhci)->self.controller;
606 	size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
607 
608 	if (size > MEDIUM_STREAM_ARRAY_SIZE)
609 		return dma_alloc_coherent(dev, size,
610 				dma, mem_flags);
611 	else if (size <= SMALL_STREAM_ARRAY_SIZE)
612 		return dma_pool_alloc(xhci->small_streams_pool,
613 				mem_flags, dma);
614 	else
615 		return dma_pool_alloc(xhci->medium_streams_pool,
616 				mem_flags, dma);
617 }
618 
619 struct xhci_ring *xhci_dma_to_transfer_ring(
620 		struct xhci_virt_ep *ep,
621 		u64 address)
622 {
623 	if (ep->ep_state & EP_HAS_STREAMS)
624 		return radix_tree_lookup(&ep->stream_info->trb_address_map,
625 				address >> TRB_SEGMENT_SHIFT);
626 	return ep->ring;
627 }
628 
629 struct xhci_ring *xhci_stream_id_to_ring(
630 		struct xhci_virt_device *dev,
631 		unsigned int ep_index,
632 		unsigned int stream_id)
633 {
634 	struct xhci_virt_ep *ep = &dev->eps[ep_index];
635 
636 	if (stream_id == 0)
637 		return ep->ring;
638 	if (!ep->stream_info)
639 		return NULL;
640 
641 	if (stream_id > ep->stream_info->num_streams)
642 		return NULL;
643 	return ep->stream_info->stream_rings[stream_id];
644 }
645 
646 /*
647  * Change an endpoint's internal structure so it supports stream IDs.  The
648  * number of requested streams includes stream 0, which cannot be used by device
649  * drivers.
650  *
651  * The number of stream contexts in the stream context array may be bigger than
652  * the number of streams the driver wants to use.  This is because the number of
653  * stream context array entries must be a power of two.
654  */
655 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
656 		unsigned int num_stream_ctxs,
657 		unsigned int num_streams, gfp_t mem_flags)
658 {
659 	struct xhci_stream_info *stream_info;
660 	u32 cur_stream;
661 	struct xhci_ring *cur_ring;
662 	u64 addr;
663 	int ret;
664 
665 	xhci_dbg(xhci, "Allocating %u streams and %u "
666 			"stream context array entries.\n",
667 			num_streams, num_stream_ctxs);
668 	if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
669 		xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
670 		return NULL;
671 	}
672 	xhci->cmd_ring_reserved_trbs++;
673 
674 	stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
675 	if (!stream_info)
676 		goto cleanup_trbs;
677 
678 	stream_info->num_streams = num_streams;
679 	stream_info->num_stream_ctxs = num_stream_ctxs;
680 
681 	/* Initialize the array of virtual pointers to stream rings. */
682 	stream_info->stream_rings = kzalloc(
683 			sizeof(struct xhci_ring *)*num_streams,
684 			mem_flags);
685 	if (!stream_info->stream_rings)
686 		goto cleanup_info;
687 
688 	/* Initialize the array of DMA addresses for stream rings for the HW. */
689 	stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
690 			num_stream_ctxs, &stream_info->ctx_array_dma,
691 			mem_flags);
692 	if (!stream_info->stream_ctx_array)
693 		goto cleanup_ctx;
694 	memset(stream_info->stream_ctx_array, 0,
695 			sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
696 
697 	/* Allocate everything needed to free the stream rings later */
698 	stream_info->free_streams_command =
699 		xhci_alloc_command(xhci, true, true, mem_flags);
700 	if (!stream_info->free_streams_command)
701 		goto cleanup_ctx;
702 
703 	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
704 
705 	/* Allocate rings for all the streams that the driver will use,
706 	 * and add their segment DMA addresses to the radix tree.
707 	 * Stream 0 is reserved.
708 	 */
709 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
710 		stream_info->stream_rings[cur_stream] =
711 			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
712 		cur_ring = stream_info->stream_rings[cur_stream];
713 		if (!cur_ring)
714 			goto cleanup_rings;
715 		cur_ring->stream_id = cur_stream;
716 		cur_ring->trb_address_map = &stream_info->trb_address_map;
717 		/* Set deq ptr, cycle bit, and stream context type */
718 		addr = cur_ring->first_seg->dma |
719 			SCT_FOR_CTX(SCT_PRI_TR) |
720 			cur_ring->cycle_state;
721 		stream_info->stream_ctx_array[cur_stream].stream_ring =
722 			cpu_to_le64(addr);
723 		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
724 				cur_stream, (unsigned long long) addr);
725 
726 		ret = xhci_update_stream_mapping(cur_ring, mem_flags);
727 		if (ret) {
728 			xhci_ring_free(xhci, cur_ring);
729 			stream_info->stream_rings[cur_stream] = NULL;
730 			goto cleanup_rings;
731 		}
732 	}
733 	/* Leave the other unused stream ring pointers in the stream context
734 	 * array initialized to zero.  This will cause the xHC to give us an
735 	 * error if the device asks for a stream ID we don't have setup (if it
736 	 * was any other way, the host controller would assume the ring is
737 	 * "empty" and wait forever for data to be queued to that stream ID).
738 	 */
739 
740 	return stream_info;
741 
742 cleanup_rings:
743 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
744 		cur_ring = stream_info->stream_rings[cur_stream];
745 		if (cur_ring) {
746 			xhci_ring_free(xhci, cur_ring);
747 			stream_info->stream_rings[cur_stream] = NULL;
748 		}
749 	}
750 	xhci_free_command(xhci, stream_info->free_streams_command);
751 cleanup_ctx:
752 	kfree(stream_info->stream_rings);
753 cleanup_info:
754 	kfree(stream_info);
755 cleanup_trbs:
756 	xhci->cmd_ring_reserved_trbs--;
757 	return NULL;
758 }
759 /*
760  * Sets the MaxPStreams field and the Linear Stream Array field.
761  * Sets the dequeue pointer to the stream context array.
762  */
763 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
764 		struct xhci_ep_ctx *ep_ctx,
765 		struct xhci_stream_info *stream_info)
766 {
767 	u32 max_primary_streams;
768 	/* MaxPStreams is the number of stream context array entries, not the
769 	 * number we're actually using.  Must be in 2^(MaxPstreams + 1) format.
770 	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
771 	 */
772 	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
773 	xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
774 			"Setting number of stream ctx array entries to %u",
775 			1 << (max_primary_streams + 1));
776 	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
777 	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
778 				       | EP_HAS_LSA);
779 	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
780 }
781 
782 /*
783  * Sets the MaxPStreams field and the Linear Stream Array field to 0.
784  * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
785  * not at the beginning of the ring).
786  */
787 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
788 		struct xhci_virt_ep *ep)
789 {
790 	dma_addr_t addr;
791 	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
792 	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
793 	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
794 }
795 
796 /* Frees all stream contexts associated with the endpoint,
797  *
798  * Caller should fix the endpoint context streams fields.
799  */
800 void xhci_free_stream_info(struct xhci_hcd *xhci,
801 		struct xhci_stream_info *stream_info)
802 {
803 	int cur_stream;
804 	struct xhci_ring *cur_ring;
805 
806 	if (!stream_info)
807 		return;
808 
809 	for (cur_stream = 1; cur_stream < stream_info->num_streams;
810 			cur_stream++) {
811 		cur_ring = stream_info->stream_rings[cur_stream];
812 		if (cur_ring) {
813 			xhci_ring_free(xhci, cur_ring);
814 			stream_info->stream_rings[cur_stream] = NULL;
815 		}
816 	}
817 	xhci_free_command(xhci, stream_info->free_streams_command);
818 	xhci->cmd_ring_reserved_trbs--;
819 	if (stream_info->stream_ctx_array)
820 		xhci_free_stream_ctx(xhci,
821 				stream_info->num_stream_ctxs,
822 				stream_info->stream_ctx_array,
823 				stream_info->ctx_array_dma);
824 
825 	kfree(stream_info->stream_rings);
826 	kfree(stream_info);
827 }
828 
829 
830 /***************** Device context manipulation *************************/
831 
832 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
833 		struct xhci_virt_ep *ep)
834 {
835 	setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
836 		    (unsigned long)ep);
837 	ep->xhci = xhci;
838 }
839 
840 static void xhci_free_tt_info(struct xhci_hcd *xhci,
841 		struct xhci_virt_device *virt_dev,
842 		int slot_id)
843 {
844 	struct list_head *tt_list_head;
845 	struct xhci_tt_bw_info *tt_info, *next;
846 	bool slot_found = false;
847 
848 	/* If the device never made it past the Set Address stage,
849 	 * it may not have the real_port set correctly.
850 	 */
851 	if (virt_dev->real_port == 0 ||
852 			virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
853 		xhci_dbg(xhci, "Bad real port.\n");
854 		return;
855 	}
856 
857 	tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
858 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
859 		/* Multi-TT hubs will have more than one entry */
860 		if (tt_info->slot_id == slot_id) {
861 			slot_found = true;
862 			list_del(&tt_info->tt_list);
863 			kfree(tt_info);
864 		} else if (slot_found) {
865 			break;
866 		}
867 	}
868 }
869 
870 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
871 		struct xhci_virt_device *virt_dev,
872 		struct usb_device *hdev,
873 		struct usb_tt *tt, gfp_t mem_flags)
874 {
875 	struct xhci_tt_bw_info		*tt_info;
876 	unsigned int			num_ports;
877 	int				i, j;
878 
879 	if (!tt->multi)
880 		num_ports = 1;
881 	else
882 		num_ports = hdev->maxchild;
883 
884 	for (i = 0; i < num_ports; i++, tt_info++) {
885 		struct xhci_interval_bw_table *bw_table;
886 
887 		tt_info = kzalloc(sizeof(*tt_info), mem_flags);
888 		if (!tt_info)
889 			goto free_tts;
890 		INIT_LIST_HEAD(&tt_info->tt_list);
891 		list_add(&tt_info->tt_list,
892 				&xhci->rh_bw[virt_dev->real_port - 1].tts);
893 		tt_info->slot_id = virt_dev->udev->slot_id;
894 		if (tt->multi)
895 			tt_info->ttport = i+1;
896 		bw_table = &tt_info->bw_table;
897 		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
898 			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
899 	}
900 	return 0;
901 
902 free_tts:
903 	xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
904 	return -ENOMEM;
905 }
906 
907 
908 /* All the xhci_tds in the ring's TD list should be freed at this point.
909  * Should be called with xhci->lock held if there is any chance the TT lists
910  * will be manipulated by the configure endpoint, allocate device, or update
911  * hub functions while this function is removing the TT entries from the list.
912  */
913 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
914 {
915 	struct xhci_virt_device *dev;
916 	int i;
917 	int old_active_eps = 0;
918 
919 	/* Slot ID 0 is reserved */
920 	if (slot_id == 0 || !xhci->devs[slot_id])
921 		return;
922 
923 	dev = xhci->devs[slot_id];
924 	xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
925 	if (!dev)
926 		return;
927 
928 	if (dev->tt_info)
929 		old_active_eps = dev->tt_info->active_eps;
930 
931 	for (i = 0; i < 31; ++i) {
932 		if (dev->eps[i].ring)
933 			xhci_ring_free(xhci, dev->eps[i].ring);
934 		if (dev->eps[i].stream_info)
935 			xhci_free_stream_info(xhci,
936 					dev->eps[i].stream_info);
937 		/* Endpoints on the TT/root port lists should have been removed
938 		 * when usb_disable_device() was called for the device.
939 		 * We can't drop them anyway, because the udev might have gone
940 		 * away by this point, and we can't tell what speed it was.
941 		 */
942 		if (!list_empty(&dev->eps[i].bw_endpoint_list))
943 			xhci_warn(xhci, "Slot %u endpoint %u "
944 					"not removed from BW list!\n",
945 					slot_id, i);
946 	}
947 	/* If this is a hub, free the TT(s) from the TT list */
948 	xhci_free_tt_info(xhci, dev, slot_id);
949 	/* If necessary, update the number of active TTs on this root port */
950 	xhci_update_tt_active_eps(xhci, dev, old_active_eps);
951 
952 	if (dev->ring_cache) {
953 		for (i = 0; i < dev->num_rings_cached; i++)
954 			xhci_ring_free(xhci, dev->ring_cache[i]);
955 		kfree(dev->ring_cache);
956 	}
957 
958 	if (dev->in_ctx)
959 		xhci_free_container_ctx(xhci, dev->in_ctx);
960 	if (dev->out_ctx)
961 		xhci_free_container_ctx(xhci, dev->out_ctx);
962 
963 	kfree(xhci->devs[slot_id]);
964 	xhci->devs[slot_id] = NULL;
965 }
966 
967 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
968 		struct usb_device *udev, gfp_t flags)
969 {
970 	struct xhci_virt_device *dev;
971 	int i;
972 
973 	/* Slot ID 0 is reserved */
974 	if (slot_id == 0 || xhci->devs[slot_id]) {
975 		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
976 		return 0;
977 	}
978 
979 	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
980 	if (!xhci->devs[slot_id])
981 		return 0;
982 	dev = xhci->devs[slot_id];
983 
984 	/* Allocate the (output) device context that will be used in the HC. */
985 	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
986 	if (!dev->out_ctx)
987 		goto fail;
988 
989 	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
990 			(unsigned long long)dev->out_ctx->dma);
991 
992 	/* Allocate the (input) device context for address device command */
993 	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
994 	if (!dev->in_ctx)
995 		goto fail;
996 
997 	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
998 			(unsigned long long)dev->in_ctx->dma);
999 
1000 	/* Initialize the cancellation list and watchdog timers for each ep */
1001 	for (i = 0; i < 31; i++) {
1002 		xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1003 		INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1004 		INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1005 	}
1006 
1007 	/* Allocate endpoint 0 ring */
1008 	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
1009 	if (!dev->eps[0].ring)
1010 		goto fail;
1011 
1012 	/* Allocate pointers to the ring cache */
1013 	dev->ring_cache = kzalloc(
1014 			sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
1015 			flags);
1016 	if (!dev->ring_cache)
1017 		goto fail;
1018 	dev->num_rings_cached = 0;
1019 
1020 	init_completion(&dev->cmd_completion);
1021 	dev->udev = udev;
1022 
1023 	/* Point to output device context in dcbaa. */
1024 	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1025 	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1026 		 slot_id,
1027 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
1028 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1029 
1030 	return 1;
1031 fail:
1032 	xhci_free_virt_device(xhci, slot_id);
1033 	return 0;
1034 }
1035 
1036 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1037 		struct usb_device *udev)
1038 {
1039 	struct xhci_virt_device *virt_dev;
1040 	struct xhci_ep_ctx	*ep0_ctx;
1041 	struct xhci_ring	*ep_ring;
1042 
1043 	virt_dev = xhci->devs[udev->slot_id];
1044 	ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1045 	ep_ring = virt_dev->eps[0].ring;
1046 	/*
1047 	 * FIXME we don't keep track of the dequeue pointer very well after a
1048 	 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1049 	 * host to our enqueue pointer.  This should only be called after a
1050 	 * configured device has reset, so all control transfers should have
1051 	 * been completed or cancelled before the reset.
1052 	 */
1053 	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1054 							ep_ring->enqueue)
1055 				   | ep_ring->cycle_state);
1056 }
1057 
1058 /*
1059  * The xHCI roothub may have ports of differing speeds in any order in the port
1060  * status registers.  xhci->port_array provides an array of the port speed for
1061  * each offset into the port status registers.
1062  *
1063  * The xHCI hardware wants to know the roothub port number that the USB device
1064  * is attached to (or the roothub port its ancestor hub is attached to).  All we
1065  * know is the index of that port under either the USB 2.0 or the USB 3.0
1066  * roothub, but that doesn't give us the real index into the HW port status
1067  * registers. Call xhci_find_raw_port_number() to get real index.
1068  */
1069 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1070 		struct usb_device *udev)
1071 {
1072 	struct usb_device *top_dev;
1073 	struct usb_hcd *hcd;
1074 
1075 	if (udev->speed == USB_SPEED_SUPER)
1076 		hcd = xhci->shared_hcd;
1077 	else
1078 		hcd = xhci->main_hcd;
1079 
1080 	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1081 			top_dev = top_dev->parent)
1082 		/* Found device below root hub */;
1083 
1084 	return	xhci_find_raw_port_number(hcd, top_dev->portnum);
1085 }
1086 
1087 /* Setup an xHCI virtual device for a Set Address command */
1088 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1089 {
1090 	struct xhci_virt_device *dev;
1091 	struct xhci_ep_ctx	*ep0_ctx;
1092 	struct xhci_slot_ctx    *slot_ctx;
1093 	u32			port_num;
1094 	u32			max_packets;
1095 	struct usb_device *top_dev;
1096 
1097 	dev = xhci->devs[udev->slot_id];
1098 	/* Slot ID 0 is reserved */
1099 	if (udev->slot_id == 0 || !dev) {
1100 		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1101 				udev->slot_id);
1102 		return -EINVAL;
1103 	}
1104 	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1105 	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1106 
1107 	/* 3) Only the control endpoint is valid - one endpoint context */
1108 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1109 	switch (udev->speed) {
1110 	case USB_SPEED_SUPER:
1111 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1112 		max_packets = MAX_PACKET(512);
1113 		break;
1114 	case USB_SPEED_HIGH:
1115 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1116 		max_packets = MAX_PACKET(64);
1117 		break;
1118 	/* USB core guesses at a 64-byte max packet first for FS devices */
1119 	case USB_SPEED_FULL:
1120 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1121 		max_packets = MAX_PACKET(64);
1122 		break;
1123 	case USB_SPEED_LOW:
1124 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1125 		max_packets = MAX_PACKET(8);
1126 		break;
1127 	case USB_SPEED_WIRELESS:
1128 		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1129 		return -EINVAL;
1130 		break;
1131 	default:
1132 		/* Speed was set earlier, this shouldn't happen. */
1133 		return -EINVAL;
1134 	}
1135 	/* Find the root hub port this device is under */
1136 	port_num = xhci_find_real_port_number(xhci, udev);
1137 	if (!port_num)
1138 		return -EINVAL;
1139 	slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1140 	/* Set the port number in the virtual_device to the faked port number */
1141 	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1142 			top_dev = top_dev->parent)
1143 		/* Found device below root hub */;
1144 	dev->fake_port = top_dev->portnum;
1145 	dev->real_port = port_num;
1146 	xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1147 	xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1148 
1149 	/* Find the right bandwidth table that this device will be a part of.
1150 	 * If this is a full speed device attached directly to a root port (or a
1151 	 * decendent of one), it counts as a primary bandwidth domain, not a
1152 	 * secondary bandwidth domain under a TT.  An xhci_tt_info structure
1153 	 * will never be created for the HS root hub.
1154 	 */
1155 	if (!udev->tt || !udev->tt->hub->parent) {
1156 		dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1157 	} else {
1158 		struct xhci_root_port_bw_info *rh_bw;
1159 		struct xhci_tt_bw_info *tt_bw;
1160 
1161 		rh_bw = &xhci->rh_bw[port_num - 1];
1162 		/* Find the right TT. */
1163 		list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1164 			if (tt_bw->slot_id != udev->tt->hub->slot_id)
1165 				continue;
1166 
1167 			if (!dev->udev->tt->multi ||
1168 					(udev->tt->multi &&
1169 					 tt_bw->ttport == dev->udev->ttport)) {
1170 				dev->bw_table = &tt_bw->bw_table;
1171 				dev->tt_info = tt_bw;
1172 				break;
1173 			}
1174 		}
1175 		if (!dev->tt_info)
1176 			xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1177 	}
1178 
1179 	/* Is this a LS/FS device under an external HS hub? */
1180 	if (udev->tt && udev->tt->hub->parent) {
1181 		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1182 						(udev->ttport << 8));
1183 		if (udev->tt->multi)
1184 			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1185 	}
1186 	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1187 	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1188 
1189 	/* Step 4 - ring already allocated */
1190 	/* Step 5 */
1191 	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1192 
1193 	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1194 	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1195 					 max_packets);
1196 
1197 	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1198 				   dev->eps[0].ring->cycle_state);
1199 
1200 	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1201 
1202 	return 0;
1203 }
1204 
1205 /*
1206  * Convert interval expressed as 2^(bInterval - 1) == interval into
1207  * straight exponent value 2^n == interval.
1208  *
1209  */
1210 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1211 		struct usb_host_endpoint *ep)
1212 {
1213 	unsigned int interval;
1214 
1215 	interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1216 	if (interval != ep->desc.bInterval - 1)
1217 		dev_warn(&udev->dev,
1218 			 "ep %#x - rounding interval to %d %sframes\n",
1219 			 ep->desc.bEndpointAddress,
1220 			 1 << interval,
1221 			 udev->speed == USB_SPEED_FULL ? "" : "micro");
1222 
1223 	if (udev->speed == USB_SPEED_FULL) {
1224 		/*
1225 		 * Full speed isoc endpoints specify interval in frames,
1226 		 * not microframes. We are using microframes everywhere,
1227 		 * so adjust accordingly.
1228 		 */
1229 		interval += 3;	/* 1 frame = 2^3 uframes */
1230 	}
1231 
1232 	return interval;
1233 }
1234 
1235 /*
1236  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1237  * microframes, rounded down to nearest power of 2.
1238  */
1239 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1240 		struct usb_host_endpoint *ep, unsigned int desc_interval,
1241 		unsigned int min_exponent, unsigned int max_exponent)
1242 {
1243 	unsigned int interval;
1244 
1245 	interval = fls(desc_interval) - 1;
1246 	interval = clamp_val(interval, min_exponent, max_exponent);
1247 	if ((1 << interval) != desc_interval)
1248 		dev_warn(&udev->dev,
1249 			 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1250 			 ep->desc.bEndpointAddress,
1251 			 1 << interval,
1252 			 desc_interval);
1253 
1254 	return interval;
1255 }
1256 
1257 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1258 		struct usb_host_endpoint *ep)
1259 {
1260 	if (ep->desc.bInterval == 0)
1261 		return 0;
1262 	return xhci_microframes_to_exponent(udev, ep,
1263 			ep->desc.bInterval, 0, 15);
1264 }
1265 
1266 
1267 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1268 		struct usb_host_endpoint *ep)
1269 {
1270 	return xhci_microframes_to_exponent(udev, ep,
1271 			ep->desc.bInterval * 8, 3, 10);
1272 }
1273 
1274 /* Return the polling or NAK interval.
1275  *
1276  * The polling interval is expressed in "microframes".  If xHCI's Interval field
1277  * is set to N, it will service the endpoint every 2^(Interval)*125us.
1278  *
1279  * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1280  * is set to 0.
1281  */
1282 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1283 		struct usb_host_endpoint *ep)
1284 {
1285 	unsigned int interval = 0;
1286 
1287 	switch (udev->speed) {
1288 	case USB_SPEED_HIGH:
1289 		/* Max NAK rate */
1290 		if (usb_endpoint_xfer_control(&ep->desc) ||
1291 		    usb_endpoint_xfer_bulk(&ep->desc)) {
1292 			interval = xhci_parse_microframe_interval(udev, ep);
1293 			break;
1294 		}
1295 		/* Fall through - SS and HS isoc/int have same decoding */
1296 
1297 	case USB_SPEED_SUPER:
1298 		if (usb_endpoint_xfer_int(&ep->desc) ||
1299 		    usb_endpoint_xfer_isoc(&ep->desc)) {
1300 			interval = xhci_parse_exponent_interval(udev, ep);
1301 		}
1302 		break;
1303 
1304 	case USB_SPEED_FULL:
1305 		if (usb_endpoint_xfer_isoc(&ep->desc)) {
1306 			interval = xhci_parse_exponent_interval(udev, ep);
1307 			break;
1308 		}
1309 		/*
1310 		 * Fall through for interrupt endpoint interval decoding
1311 		 * since it uses the same rules as low speed interrupt
1312 		 * endpoints.
1313 		 */
1314 
1315 	case USB_SPEED_LOW:
1316 		if (usb_endpoint_xfer_int(&ep->desc) ||
1317 		    usb_endpoint_xfer_isoc(&ep->desc)) {
1318 
1319 			interval = xhci_parse_frame_interval(udev, ep);
1320 		}
1321 		break;
1322 
1323 	default:
1324 		BUG();
1325 	}
1326 	return EP_INTERVAL(interval);
1327 }
1328 
1329 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1330  * High speed endpoint descriptors can define "the number of additional
1331  * transaction opportunities per microframe", but that goes in the Max Burst
1332  * endpoint context field.
1333  */
1334 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1335 		struct usb_host_endpoint *ep)
1336 {
1337 	if (udev->speed != USB_SPEED_SUPER ||
1338 			!usb_endpoint_xfer_isoc(&ep->desc))
1339 		return 0;
1340 	return ep->ss_ep_comp.bmAttributes;
1341 }
1342 
1343 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1344 {
1345 	int in;
1346 	u32 type;
1347 
1348 	in = usb_endpoint_dir_in(&ep->desc);
1349 	if (usb_endpoint_xfer_control(&ep->desc)) {
1350 		type = EP_TYPE(CTRL_EP);
1351 	} else if (usb_endpoint_xfer_bulk(&ep->desc)) {
1352 		if (in)
1353 			type = EP_TYPE(BULK_IN_EP);
1354 		else
1355 			type = EP_TYPE(BULK_OUT_EP);
1356 	} else if (usb_endpoint_xfer_isoc(&ep->desc)) {
1357 		if (in)
1358 			type = EP_TYPE(ISOC_IN_EP);
1359 		else
1360 			type = EP_TYPE(ISOC_OUT_EP);
1361 	} else if (usb_endpoint_xfer_int(&ep->desc)) {
1362 		if (in)
1363 			type = EP_TYPE(INT_IN_EP);
1364 		else
1365 			type = EP_TYPE(INT_OUT_EP);
1366 	} else {
1367 		type = 0;
1368 	}
1369 	return type;
1370 }
1371 
1372 /* Return the maximum endpoint service interval time (ESIT) payload.
1373  * Basically, this is the maxpacket size, multiplied by the burst size
1374  * and mult size.
1375  */
1376 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1377 		struct usb_host_endpoint *ep)
1378 {
1379 	int max_burst;
1380 	int max_packet;
1381 
1382 	/* Only applies for interrupt or isochronous endpoints */
1383 	if (usb_endpoint_xfer_control(&ep->desc) ||
1384 			usb_endpoint_xfer_bulk(&ep->desc))
1385 		return 0;
1386 
1387 	if (udev->speed == USB_SPEED_SUPER)
1388 		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1389 
1390 	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1391 	max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
1392 	/* A 0 in max burst means 1 transfer per ESIT */
1393 	return max_packet * (max_burst + 1);
1394 }
1395 
1396 /* Set up an endpoint with one ring segment.  Do not allocate stream rings.
1397  * Drivers will have to call usb_alloc_streams() to do that.
1398  */
1399 int xhci_endpoint_init(struct xhci_hcd *xhci,
1400 		struct xhci_virt_device *virt_dev,
1401 		struct usb_device *udev,
1402 		struct usb_host_endpoint *ep,
1403 		gfp_t mem_flags)
1404 {
1405 	unsigned int ep_index;
1406 	struct xhci_ep_ctx *ep_ctx;
1407 	struct xhci_ring *ep_ring;
1408 	unsigned int max_packet;
1409 	unsigned int max_burst;
1410 	enum xhci_ring_type type;
1411 	u32 max_esit_payload;
1412 	u32 endpoint_type;
1413 
1414 	ep_index = xhci_get_endpoint_index(&ep->desc);
1415 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1416 
1417 	endpoint_type = xhci_get_endpoint_type(ep);
1418 	if (!endpoint_type)
1419 		return -EINVAL;
1420 	ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
1421 
1422 	type = usb_endpoint_type(&ep->desc);
1423 	/* Set up the endpoint ring */
1424 	virt_dev->eps[ep_index].new_ring =
1425 		xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
1426 	if (!virt_dev->eps[ep_index].new_ring) {
1427 		/* Attempt to use the ring cache */
1428 		if (virt_dev->num_rings_cached == 0)
1429 			return -ENOMEM;
1430 		virt_dev->eps[ep_index].new_ring =
1431 			virt_dev->ring_cache[virt_dev->num_rings_cached];
1432 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1433 		virt_dev->num_rings_cached--;
1434 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 					1, type);
1436 	}
1437 	virt_dev->eps[ep_index].skip = false;
1438 	ep_ring = virt_dev->eps[ep_index].new_ring;
1439 	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
1440 
1441 	ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
1442 				      | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
1443 
1444 	/* FIXME dig Mult and streams info out of ep companion desc */
1445 
1446 	/* Allow 3 retries for everything but isoc;
1447 	 * CErr shall be set to 0 for Isoch endpoints.
1448 	 */
1449 	if (!usb_endpoint_xfer_isoc(&ep->desc))
1450 		ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
1451 	else
1452 		ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
1453 
1454 	/* Set the max packet size and max burst */
1455 	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1456 	max_burst = 0;
1457 	switch (udev->speed) {
1458 	case USB_SPEED_SUPER:
1459 		/* dig out max burst from ep companion desc */
1460 		max_burst = ep->ss_ep_comp.bMaxBurst;
1461 		break;
1462 	case USB_SPEED_HIGH:
1463 		/* Some devices get this wrong */
1464 		if (usb_endpoint_xfer_bulk(&ep->desc))
1465 			max_packet = 512;
1466 		/* bits 11:12 specify the number of additional transaction
1467 		 * opportunities per microframe (USB 2.0, section 9.6.6)
1468 		 */
1469 		if (usb_endpoint_xfer_isoc(&ep->desc) ||
1470 				usb_endpoint_xfer_int(&ep->desc)) {
1471 			max_burst = (usb_endpoint_maxp(&ep->desc)
1472 				     & 0x1800) >> 11;
1473 		}
1474 		break;
1475 	case USB_SPEED_FULL:
1476 	case USB_SPEED_LOW:
1477 		break;
1478 	default:
1479 		BUG();
1480 	}
1481 	ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1482 			MAX_BURST(max_burst));
1483 	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1484 	ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1485 
1486 	/*
1487 	 * XXX no idea how to calculate the average TRB buffer length for bulk
1488 	 * endpoints, as the driver gives us no clue how big each scatter gather
1489 	 * list entry (or buffer) is going to be.
1490 	 *
1491 	 * For isochronous and interrupt endpoints, we set it to the max
1492 	 * available, until we have new API in the USB core to allow drivers to
1493 	 * declare how much bandwidth they actually need.
1494 	 *
1495 	 * Normally, it would be calculated by taking the total of the buffer
1496 	 * lengths in the TD and then dividing by the number of TRBs in a TD,
1497 	 * including link TRBs, No-op TRBs, and Event data TRBs.  Since we don't
1498 	 * use Event Data TRBs, and we don't chain in a link TRB on short
1499 	 * transfers, we're basically dividing by 1.
1500 	 *
1501 	 * xHCI 1.0 specification indicates that the Average TRB Length should
1502 	 * be set to 8 for control endpoints.
1503 	 */
1504 	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
1505 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1506 	else
1507 		ep_ctx->tx_info |=
1508 			 cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
1509 
1510 	/* FIXME Debug endpoint context */
1511 	return 0;
1512 }
1513 
1514 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1515 		struct xhci_virt_device *virt_dev,
1516 		struct usb_host_endpoint *ep)
1517 {
1518 	unsigned int ep_index;
1519 	struct xhci_ep_ctx *ep_ctx;
1520 
1521 	ep_index = xhci_get_endpoint_index(&ep->desc);
1522 	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1523 
1524 	ep_ctx->ep_info = 0;
1525 	ep_ctx->ep_info2 = 0;
1526 	ep_ctx->deq = 0;
1527 	ep_ctx->tx_info = 0;
1528 	/* Don't free the endpoint ring until the set interface or configuration
1529 	 * request succeeds.
1530 	 */
1531 }
1532 
1533 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1534 {
1535 	bw_info->ep_interval = 0;
1536 	bw_info->mult = 0;
1537 	bw_info->num_packets = 0;
1538 	bw_info->max_packet_size = 0;
1539 	bw_info->type = 0;
1540 	bw_info->max_esit_payload = 0;
1541 }
1542 
1543 void xhci_update_bw_info(struct xhci_hcd *xhci,
1544 		struct xhci_container_ctx *in_ctx,
1545 		struct xhci_input_control_ctx *ctrl_ctx,
1546 		struct xhci_virt_device *virt_dev)
1547 {
1548 	struct xhci_bw_info *bw_info;
1549 	struct xhci_ep_ctx *ep_ctx;
1550 	unsigned int ep_type;
1551 	int i;
1552 
1553 	for (i = 1; i < 31; ++i) {
1554 		bw_info = &virt_dev->eps[i].bw_info;
1555 
1556 		/* We can't tell what endpoint type is being dropped, but
1557 		 * unconditionally clearing the bandwidth info for non-periodic
1558 		 * endpoints should be harmless because the info will never be
1559 		 * set in the first place.
1560 		 */
1561 		if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1562 			/* Dropped endpoint */
1563 			xhci_clear_endpoint_bw_info(bw_info);
1564 			continue;
1565 		}
1566 
1567 		if (EP_IS_ADDED(ctrl_ctx, i)) {
1568 			ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1569 			ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1570 
1571 			/* Ignore non-periodic endpoints */
1572 			if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1573 					ep_type != ISOC_IN_EP &&
1574 					ep_type != INT_IN_EP)
1575 				continue;
1576 
1577 			/* Added or changed endpoint */
1578 			bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1579 					le32_to_cpu(ep_ctx->ep_info));
1580 			/* Number of packets and mult are zero-based in the
1581 			 * input context, but we want one-based for the
1582 			 * interval table.
1583 			 */
1584 			bw_info->mult = CTX_TO_EP_MULT(
1585 					le32_to_cpu(ep_ctx->ep_info)) + 1;
1586 			bw_info->num_packets = CTX_TO_MAX_BURST(
1587 					le32_to_cpu(ep_ctx->ep_info2)) + 1;
1588 			bw_info->max_packet_size = MAX_PACKET_DECODED(
1589 					le32_to_cpu(ep_ctx->ep_info2));
1590 			bw_info->type = ep_type;
1591 			bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1592 					le32_to_cpu(ep_ctx->tx_info));
1593 		}
1594 	}
1595 }
1596 
1597 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1598  * Useful when you want to change one particular aspect of the endpoint and then
1599  * issue a configure endpoint command.
1600  */
1601 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1602 		struct xhci_container_ctx *in_ctx,
1603 		struct xhci_container_ctx *out_ctx,
1604 		unsigned int ep_index)
1605 {
1606 	struct xhci_ep_ctx *out_ep_ctx;
1607 	struct xhci_ep_ctx *in_ep_ctx;
1608 
1609 	out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1610 	in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1611 
1612 	in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1613 	in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 	in_ep_ctx->deq = out_ep_ctx->deq;
1615 	in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616 }
1617 
1618 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1619  * Useful when you want to change one particular aspect of the endpoint and then
1620  * issue a configure endpoint command.  Only the context entries field matters,
1621  * but we'll copy the whole thing anyway.
1622  */
1623 void xhci_slot_copy(struct xhci_hcd *xhci,
1624 		struct xhci_container_ctx *in_ctx,
1625 		struct xhci_container_ctx *out_ctx)
1626 {
1627 	struct xhci_slot_ctx *in_slot_ctx;
1628 	struct xhci_slot_ctx *out_slot_ctx;
1629 
1630 	in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1631 	out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1632 
1633 	in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1634 	in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1635 	in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1636 	in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1637 }
1638 
1639 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1640 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1641 {
1642 	int i;
1643 	struct device *dev = xhci_to_hcd(xhci)->self.controller;
1644 	int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1645 
1646 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1647 			"Allocating %d scratchpad buffers", num_sp);
1648 
1649 	if (!num_sp)
1650 		return 0;
1651 
1652 	xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1653 	if (!xhci->scratchpad)
1654 		goto fail_sp;
1655 
1656 	xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1657 				     num_sp * sizeof(u64),
1658 				     &xhci->scratchpad->sp_dma, flags);
1659 	if (!xhci->scratchpad->sp_array)
1660 		goto fail_sp2;
1661 
1662 	xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1663 	if (!xhci->scratchpad->sp_buffers)
1664 		goto fail_sp3;
1665 
1666 	xhci->scratchpad->sp_dma_buffers =
1667 		kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1668 
1669 	if (!xhci->scratchpad->sp_dma_buffers)
1670 		goto fail_sp4;
1671 
1672 	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 	for (i = 0; i < num_sp; i++) {
1674 		dma_addr_t dma;
1675 		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 				flags);
1677 		if (!buf)
1678 			goto fail_sp5;
1679 
1680 		xhci->scratchpad->sp_array[i] = dma;
1681 		xhci->scratchpad->sp_buffers[i] = buf;
1682 		xhci->scratchpad->sp_dma_buffers[i] = dma;
1683 	}
1684 
1685 	return 0;
1686 
1687  fail_sp5:
1688 	for (i = i - 1; i >= 0; i--) {
1689 		dma_free_coherent(dev, xhci->page_size,
1690 				    xhci->scratchpad->sp_buffers[i],
1691 				    xhci->scratchpad->sp_dma_buffers[i]);
1692 	}
1693 	kfree(xhci->scratchpad->sp_dma_buffers);
1694 
1695  fail_sp4:
1696 	kfree(xhci->scratchpad->sp_buffers);
1697 
1698  fail_sp3:
1699 	dma_free_coherent(dev, num_sp * sizeof(u64),
1700 			    xhci->scratchpad->sp_array,
1701 			    xhci->scratchpad->sp_dma);
1702 
1703  fail_sp2:
1704 	kfree(xhci->scratchpad);
1705 	xhci->scratchpad = NULL;
1706 
1707  fail_sp:
1708 	return -ENOMEM;
1709 }
1710 
1711 static void scratchpad_free(struct xhci_hcd *xhci)
1712 {
1713 	int num_sp;
1714 	int i;
1715 	struct device *dev = xhci_to_hcd(xhci)->self.controller;
1716 
1717 	if (!xhci->scratchpad)
1718 		return;
1719 
1720 	num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1721 
1722 	for (i = 0; i < num_sp; i++) {
1723 		dma_free_coherent(dev, xhci->page_size,
1724 				    xhci->scratchpad->sp_buffers[i],
1725 				    xhci->scratchpad->sp_dma_buffers[i]);
1726 	}
1727 	kfree(xhci->scratchpad->sp_dma_buffers);
1728 	kfree(xhci->scratchpad->sp_buffers);
1729 	dma_free_coherent(dev, num_sp * sizeof(u64),
1730 			    xhci->scratchpad->sp_array,
1731 			    xhci->scratchpad->sp_dma);
1732 	kfree(xhci->scratchpad);
1733 	xhci->scratchpad = NULL;
1734 }
1735 
1736 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1737 		bool allocate_in_ctx, bool allocate_completion,
1738 		gfp_t mem_flags)
1739 {
1740 	struct xhci_command *command;
1741 
1742 	command = kzalloc(sizeof(*command), mem_flags);
1743 	if (!command)
1744 		return NULL;
1745 
1746 	if (allocate_in_ctx) {
1747 		command->in_ctx =
1748 			xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1749 					mem_flags);
1750 		if (!command->in_ctx) {
1751 			kfree(command);
1752 			return NULL;
1753 		}
1754 	}
1755 
1756 	if (allocate_completion) {
1757 		command->completion =
1758 			kzalloc(sizeof(struct completion), mem_flags);
1759 		if (!command->completion) {
1760 			xhci_free_container_ctx(xhci, command->in_ctx);
1761 			kfree(command);
1762 			return NULL;
1763 		}
1764 		init_completion(command->completion);
1765 	}
1766 
1767 	command->status = 0;
1768 	INIT_LIST_HEAD(&command->cmd_list);
1769 	return command;
1770 }
1771 
1772 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1773 {
1774 	if (urb_priv) {
1775 		kfree(urb_priv->td[0]);
1776 		kfree(urb_priv);
1777 	}
1778 }
1779 
1780 void xhci_free_command(struct xhci_hcd *xhci,
1781 		struct xhci_command *command)
1782 {
1783 	xhci_free_container_ctx(xhci,
1784 			command->in_ctx);
1785 	kfree(command->completion);
1786 	kfree(command);
1787 }
1788 
1789 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1790 {
1791 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
1792 	int size;
1793 	int i, j, num_ports;
1794 
1795 	del_timer_sync(&xhci->cmd_timer);
1796 
1797 	/* Free the Event Ring Segment Table and the actual Event Ring */
1798 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1799 	if (xhci->erst.entries)
1800 		dma_free_coherent(dev, size,
1801 				xhci->erst.entries, xhci->erst.erst_dma_addr);
1802 	xhci->erst.entries = NULL;
1803 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1804 	if (xhci->event_ring)
1805 		xhci_ring_free(xhci, xhci->event_ring);
1806 	xhci->event_ring = NULL;
1807 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1808 
1809 	if (xhci->lpm_command)
1810 		xhci_free_command(xhci, xhci->lpm_command);
1811 	xhci->lpm_command = NULL;
1812 	if (xhci->cmd_ring)
1813 		xhci_ring_free(xhci, xhci->cmd_ring);
1814 	xhci->cmd_ring = NULL;
1815 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1816 	xhci_cleanup_command_queue(xhci);
1817 
1818 	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1819 	for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1820 		struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1821 		for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1822 			struct list_head *ep = &bwt->interval_bw[j].endpoints;
1823 			while (!list_empty(ep))
1824 				list_del_init(ep->next);
1825 		}
1826 	}
1827 
1828 	for (i = 1; i < MAX_HC_SLOTS; ++i)
1829 		xhci_free_virt_device(xhci, i);
1830 
1831 	if (xhci->segment_pool)
1832 		dma_pool_destroy(xhci->segment_pool);
1833 	xhci->segment_pool = NULL;
1834 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1835 
1836 	if (xhci->device_pool)
1837 		dma_pool_destroy(xhci->device_pool);
1838 	xhci->device_pool = NULL;
1839 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1840 
1841 	if (xhci->small_streams_pool)
1842 		dma_pool_destroy(xhci->small_streams_pool);
1843 	xhci->small_streams_pool = NULL;
1844 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1845 			"Freed small stream array pool");
1846 
1847 	if (xhci->medium_streams_pool)
1848 		dma_pool_destroy(xhci->medium_streams_pool);
1849 	xhci->medium_streams_pool = NULL;
1850 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1851 			"Freed medium stream array pool");
1852 
1853 	if (xhci->dcbaa)
1854 		dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1855 				xhci->dcbaa, xhci->dcbaa->dma);
1856 	xhci->dcbaa = NULL;
1857 
1858 	scratchpad_free(xhci);
1859 
1860 	if (!xhci->rh_bw)
1861 		goto no_bw;
1862 
1863 	for (i = 0; i < num_ports; i++) {
1864 		struct xhci_tt_bw_info *tt, *n;
1865 		list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1866 			list_del(&tt->tt_list);
1867 			kfree(tt);
1868 		}
1869 	}
1870 
1871 no_bw:
1872 	xhci->cmd_ring_reserved_trbs = 0;
1873 	xhci->num_usb2_ports = 0;
1874 	xhci->num_usb3_ports = 0;
1875 	xhci->num_active_eps = 0;
1876 	kfree(xhci->usb2_ports);
1877 	kfree(xhci->usb3_ports);
1878 	kfree(xhci->port_array);
1879 	kfree(xhci->rh_bw);
1880 	kfree(xhci->ext_caps);
1881 
1882 	xhci->page_size = 0;
1883 	xhci->page_shift = 0;
1884 	xhci->bus_state[0].bus_suspended = 0;
1885 	xhci->bus_state[1].bus_suspended = 0;
1886 }
1887 
1888 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1889 		struct xhci_segment *input_seg,
1890 		union xhci_trb *start_trb,
1891 		union xhci_trb *end_trb,
1892 		dma_addr_t input_dma,
1893 		struct xhci_segment *result_seg,
1894 		char *test_name, int test_number)
1895 {
1896 	unsigned long long start_dma;
1897 	unsigned long long end_dma;
1898 	struct xhci_segment *seg;
1899 
1900 	start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1901 	end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1902 
1903 	seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1904 	if (seg != result_seg) {
1905 		xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1906 				test_name, test_number);
1907 		xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1908 				"input DMA 0x%llx\n",
1909 				input_seg,
1910 				(unsigned long long) input_dma);
1911 		xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1912 				"ending TRB %p (0x%llx DMA)\n",
1913 				start_trb, start_dma,
1914 				end_trb, end_dma);
1915 		xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1916 				result_seg, seg);
1917 		trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1918 			  true);
1919 		return -1;
1920 	}
1921 	return 0;
1922 }
1923 
1924 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1925 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1926 {
1927 	struct {
1928 		dma_addr_t		input_dma;
1929 		struct xhci_segment	*result_seg;
1930 	} simple_test_vector [] = {
1931 		/* A zeroed DMA field should fail */
1932 		{ 0, NULL },
1933 		/* One TRB before the ring start should fail */
1934 		{ xhci->event_ring->first_seg->dma - 16, NULL },
1935 		/* One byte before the ring start should fail */
1936 		{ xhci->event_ring->first_seg->dma - 1, NULL },
1937 		/* Starting TRB should succeed */
1938 		{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1939 		/* Ending TRB should succeed */
1940 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1941 			xhci->event_ring->first_seg },
1942 		/* One byte after the ring end should fail */
1943 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1944 		/* One TRB after the ring end should fail */
1945 		{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1946 		/* An address of all ones should fail */
1947 		{ (dma_addr_t) (~0), NULL },
1948 	};
1949 	struct {
1950 		struct xhci_segment	*input_seg;
1951 		union xhci_trb		*start_trb;
1952 		union xhci_trb		*end_trb;
1953 		dma_addr_t		input_dma;
1954 		struct xhci_segment	*result_seg;
1955 	} complex_test_vector [] = {
1956 		/* Test feeding a valid DMA address from a different ring */
1957 		{	.input_seg = xhci->event_ring->first_seg,
1958 			.start_trb = xhci->event_ring->first_seg->trbs,
1959 			.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1960 			.input_dma = xhci->cmd_ring->first_seg->dma,
1961 			.result_seg = NULL,
1962 		},
1963 		/* Test feeding a valid end TRB from a different ring */
1964 		{	.input_seg = xhci->event_ring->first_seg,
1965 			.start_trb = xhci->event_ring->first_seg->trbs,
1966 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1967 			.input_dma = xhci->cmd_ring->first_seg->dma,
1968 			.result_seg = NULL,
1969 		},
1970 		/* Test feeding a valid start and end TRB from a different ring */
1971 		{	.input_seg = xhci->event_ring->first_seg,
1972 			.start_trb = xhci->cmd_ring->first_seg->trbs,
1973 			.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1974 			.input_dma = xhci->cmd_ring->first_seg->dma,
1975 			.result_seg = NULL,
1976 		},
1977 		/* TRB in this ring, but after this TD */
1978 		{	.input_seg = xhci->event_ring->first_seg,
1979 			.start_trb = &xhci->event_ring->first_seg->trbs[0],
1980 			.end_trb = &xhci->event_ring->first_seg->trbs[3],
1981 			.input_dma = xhci->event_ring->first_seg->dma + 4*16,
1982 			.result_seg = NULL,
1983 		},
1984 		/* TRB in this ring, but before this TD */
1985 		{	.input_seg = xhci->event_ring->first_seg,
1986 			.start_trb = &xhci->event_ring->first_seg->trbs[3],
1987 			.end_trb = &xhci->event_ring->first_seg->trbs[6],
1988 			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
1989 			.result_seg = NULL,
1990 		},
1991 		/* TRB in this ring, but after this wrapped TD */
1992 		{	.input_seg = xhci->event_ring->first_seg,
1993 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1994 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
1995 			.input_dma = xhci->event_ring->first_seg->dma + 2*16,
1996 			.result_seg = NULL,
1997 		},
1998 		/* TRB in this ring, but before this wrapped TD */
1999 		{	.input_seg = xhci->event_ring->first_seg,
2000 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2001 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2002 			.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2003 			.result_seg = NULL,
2004 		},
2005 		/* TRB not in this ring, and we have a wrapped TD */
2006 		{	.input_seg = xhci->event_ring->first_seg,
2007 			.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2008 			.end_trb = &xhci->event_ring->first_seg->trbs[1],
2009 			.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2010 			.result_seg = NULL,
2011 		},
2012 	};
2013 
2014 	unsigned int num_tests;
2015 	int i, ret;
2016 
2017 	num_tests = ARRAY_SIZE(simple_test_vector);
2018 	for (i = 0; i < num_tests; i++) {
2019 		ret = xhci_test_trb_in_td(xhci,
2020 				xhci->event_ring->first_seg,
2021 				xhci->event_ring->first_seg->trbs,
2022 				&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2023 				simple_test_vector[i].input_dma,
2024 				simple_test_vector[i].result_seg,
2025 				"Simple", i);
2026 		if (ret < 0)
2027 			return ret;
2028 	}
2029 
2030 	num_tests = ARRAY_SIZE(complex_test_vector);
2031 	for (i = 0; i < num_tests; i++) {
2032 		ret = xhci_test_trb_in_td(xhci,
2033 				complex_test_vector[i].input_seg,
2034 				complex_test_vector[i].start_trb,
2035 				complex_test_vector[i].end_trb,
2036 				complex_test_vector[i].input_dma,
2037 				complex_test_vector[i].result_seg,
2038 				"Complex", i);
2039 		if (ret < 0)
2040 			return ret;
2041 	}
2042 	xhci_dbg(xhci, "TRB math tests passed.\n");
2043 	return 0;
2044 }
2045 
2046 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2047 {
2048 	u64 temp;
2049 	dma_addr_t deq;
2050 
2051 	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2052 			xhci->event_ring->dequeue);
2053 	if (deq == 0 && !in_interrupt())
2054 		xhci_warn(xhci, "WARN something wrong with SW event ring "
2055 				"dequeue ptr.\n");
2056 	/* Update HC event ring dequeue pointer */
2057 	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2058 	temp &= ERST_PTR_MASK;
2059 	/* Don't clear the EHB bit (which is RW1C) because
2060 	 * there might be more events to service.
2061 	 */
2062 	temp &= ~ERST_EHB;
2063 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2064 			"// Write event ring dequeue pointer, "
2065 			"preserving EHB bit");
2066 	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2067 			&xhci->ir_set->erst_dequeue);
2068 }
2069 
2070 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2071 		__le32 __iomem *addr, u8 major_revision, int max_caps)
2072 {
2073 	u32 temp, port_offset, port_count;
2074 	int i;
2075 
2076 	if (major_revision > 0x03) {
2077 		xhci_warn(xhci, "Ignoring unknown port speed, "
2078 				"Ext Cap %p, revision = 0x%x\n",
2079 				addr, major_revision);
2080 		/* Ignoring port protocol we can't understand. FIXME */
2081 		return;
2082 	}
2083 
2084 	/* Port offset and count in the third dword, see section 7.2 */
2085 	temp = readl(addr + 2);
2086 	port_offset = XHCI_EXT_PORT_OFF(temp);
2087 	port_count = XHCI_EXT_PORT_COUNT(temp);
2088 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2089 			"Ext Cap %p, port offset = %u, "
2090 			"count = %u, revision = 0x%x",
2091 			addr, port_offset, port_count, major_revision);
2092 	/* Port count includes the current port offset */
2093 	if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2094 		/* WTF? "Valid values are ‘1’ to MaxPorts" */
2095 		return;
2096 
2097 	/* cache usb2 port capabilities */
2098 	if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2099 		xhci->ext_caps[xhci->num_ext_caps++] = temp;
2100 
2101 	/* Check the host's USB2 LPM capability */
2102 	if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2103 			(temp & XHCI_L1C)) {
2104 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2105 				"xHCI 0.96: support USB2 software lpm");
2106 		xhci->sw_lpm_support = 1;
2107 	}
2108 
2109 	if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2110 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2111 				"xHCI 1.0: support USB2 software lpm");
2112 		xhci->sw_lpm_support = 1;
2113 		if (temp & XHCI_HLC) {
2114 			xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2115 					"xHCI 1.0: support USB2 hardware lpm");
2116 			xhci->hw_lpm_support = 1;
2117 		}
2118 	}
2119 
2120 	port_offset--;
2121 	for (i = port_offset; i < (port_offset + port_count); i++) {
2122 		/* Duplicate entry.  Ignore the port if the revisions differ. */
2123 		if (xhci->port_array[i] != 0) {
2124 			xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2125 					" port %u\n", addr, i);
2126 			xhci_warn(xhci, "Port was marked as USB %u, "
2127 					"duplicated as USB %u\n",
2128 					xhci->port_array[i], major_revision);
2129 			/* Only adjust the roothub port counts if we haven't
2130 			 * found a similar duplicate.
2131 			 */
2132 			if (xhci->port_array[i] != major_revision &&
2133 				xhci->port_array[i] != DUPLICATE_ENTRY) {
2134 				if (xhci->port_array[i] == 0x03)
2135 					xhci->num_usb3_ports--;
2136 				else
2137 					xhci->num_usb2_ports--;
2138 				xhci->port_array[i] = DUPLICATE_ENTRY;
2139 			}
2140 			/* FIXME: Should we disable the port? */
2141 			continue;
2142 		}
2143 		xhci->port_array[i] = major_revision;
2144 		if (major_revision == 0x03)
2145 			xhci->num_usb3_ports++;
2146 		else
2147 			xhci->num_usb2_ports++;
2148 	}
2149 	/* FIXME: Should we disable ports not in the Extended Capabilities? */
2150 }
2151 
2152 /*
2153  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2154  * specify what speeds each port is supposed to be.  We can't count on the port
2155  * speed bits in the PORTSC register being correct until a device is connected,
2156  * but we need to set up the two fake roothubs with the correct number of USB
2157  * 3.0 and USB 2.0 ports at host controller initialization time.
2158  */
2159 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2160 {
2161 	__le32 __iomem *addr, *tmp_addr;
2162 	u32 offset, tmp_offset;
2163 	unsigned int num_ports;
2164 	int i, j, port_index;
2165 	int cap_count = 0;
2166 
2167 	addr = &xhci->cap_regs->hcc_params;
2168 	offset = XHCI_HCC_EXT_CAPS(readl(addr));
2169 	if (offset == 0) {
2170 		xhci_err(xhci, "No Extended Capability registers, "
2171 				"unable to set up roothub.\n");
2172 		return -ENODEV;
2173 	}
2174 
2175 	num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2176 	xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2177 	if (!xhci->port_array)
2178 		return -ENOMEM;
2179 
2180 	xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2181 	if (!xhci->rh_bw)
2182 		return -ENOMEM;
2183 	for (i = 0; i < num_ports; i++) {
2184 		struct xhci_interval_bw_table *bw_table;
2185 
2186 		INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2187 		bw_table = &xhci->rh_bw[i].bw_table;
2188 		for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2189 			INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2190 	}
2191 
2192 	/*
2193 	 * For whatever reason, the first capability offset is from the
2194 	 * capability register base, not from the HCCPARAMS register.
2195 	 * See section 5.3.6 for offset calculation.
2196 	 */
2197 	addr = &xhci->cap_regs->hc_capbase + offset;
2198 
2199 	tmp_addr = addr;
2200 	tmp_offset = offset;
2201 
2202 	/* count extended protocol capability entries for later caching */
2203 	do {
2204 		u32 cap_id;
2205 		cap_id = readl(tmp_addr);
2206 		if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2207 			cap_count++;
2208 		tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
2209 		tmp_addr += tmp_offset;
2210 	} while (tmp_offset);
2211 
2212 	xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2213 	if (!xhci->ext_caps)
2214 		return -ENOMEM;
2215 
2216 	while (1) {
2217 		u32 cap_id;
2218 
2219 		cap_id = readl(addr);
2220 		if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
2221 			xhci_add_in_port(xhci, num_ports, addr,
2222 					(u8) XHCI_EXT_PORT_MAJOR(cap_id),
2223 					cap_count);
2224 		offset = XHCI_EXT_CAPS_NEXT(cap_id);
2225 		if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
2226 				== num_ports)
2227 			break;
2228 		/*
2229 		 * Once you're into the Extended Capabilities, the offset is
2230 		 * always relative to the register holding the offset.
2231 		 */
2232 		addr += offset;
2233 	}
2234 
2235 	if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2236 		xhci_warn(xhci, "No ports on the roothubs?\n");
2237 		return -ENODEV;
2238 	}
2239 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2240 			"Found %u USB 2.0 ports and %u USB 3.0 ports.",
2241 			xhci->num_usb2_ports, xhci->num_usb3_ports);
2242 
2243 	/* Place limits on the number of roothub ports so that the hub
2244 	 * descriptors aren't longer than the USB core will allocate.
2245 	 */
2246 	if (xhci->num_usb3_ports > 15) {
2247 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2248 				"Limiting USB 3.0 roothub ports to 15.");
2249 		xhci->num_usb3_ports = 15;
2250 	}
2251 	if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2252 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2253 				"Limiting USB 2.0 roothub ports to %u.",
2254 				USB_MAXCHILDREN);
2255 		xhci->num_usb2_ports = USB_MAXCHILDREN;
2256 	}
2257 
2258 	/*
2259 	 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2260 	 * Not sure how the USB core will handle a hub with no ports...
2261 	 */
2262 	if (xhci->num_usb2_ports) {
2263 		xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2264 				xhci->num_usb2_ports, flags);
2265 		if (!xhci->usb2_ports)
2266 			return -ENOMEM;
2267 
2268 		port_index = 0;
2269 		for (i = 0; i < num_ports; i++) {
2270 			if (xhci->port_array[i] == 0x03 ||
2271 					xhci->port_array[i] == 0 ||
2272 					xhci->port_array[i] == DUPLICATE_ENTRY)
2273 				continue;
2274 
2275 			xhci->usb2_ports[port_index] =
2276 				&xhci->op_regs->port_status_base +
2277 				NUM_PORT_REGS*i;
2278 			xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2279 					"USB 2.0 port at index %u, "
2280 					"addr = %p", i,
2281 					xhci->usb2_ports[port_index]);
2282 			port_index++;
2283 			if (port_index == xhci->num_usb2_ports)
2284 				break;
2285 		}
2286 	}
2287 	if (xhci->num_usb3_ports) {
2288 		xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2289 				xhci->num_usb3_ports, flags);
2290 		if (!xhci->usb3_ports)
2291 			return -ENOMEM;
2292 
2293 		port_index = 0;
2294 		for (i = 0; i < num_ports; i++)
2295 			if (xhci->port_array[i] == 0x03) {
2296 				xhci->usb3_ports[port_index] =
2297 					&xhci->op_regs->port_status_base +
2298 					NUM_PORT_REGS*i;
2299 				xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2300 						"USB 3.0 port at index %u, "
2301 						"addr = %p", i,
2302 						xhci->usb3_ports[port_index]);
2303 				port_index++;
2304 				if (port_index == xhci->num_usb3_ports)
2305 					break;
2306 			}
2307 	}
2308 	return 0;
2309 }
2310 
2311 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2312 {
2313 	dma_addr_t	dma;
2314 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
2315 	unsigned int	val, val2;
2316 	u64		val_64;
2317 	struct xhci_segment	*seg;
2318 	u32 page_size, temp;
2319 	int i;
2320 
2321 	INIT_LIST_HEAD(&xhci->cmd_list);
2322 
2323 	page_size = readl(&xhci->op_regs->page_size);
2324 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2325 			"Supported page size register = 0x%x", page_size);
2326 	for (i = 0; i < 16; i++) {
2327 		if ((0x1 & page_size) != 0)
2328 			break;
2329 		page_size = page_size >> 1;
2330 	}
2331 	if (i < 16)
2332 		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2333 			"Supported page size of %iK", (1 << (i+12)) / 1024);
2334 	else
2335 		xhci_warn(xhci, "WARN: no supported page size\n");
2336 	/* Use 4K pages, since that's common and the minimum the HC supports */
2337 	xhci->page_shift = 12;
2338 	xhci->page_size = 1 << xhci->page_shift;
2339 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2340 			"HCD page size set to %iK", xhci->page_size / 1024);
2341 
2342 	/*
2343 	 * Program the Number of Device Slots Enabled field in the CONFIG
2344 	 * register with the max value of slots the HC can handle.
2345 	 */
2346 	val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2347 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2348 			"// xHC can handle at most %d device slots.", val);
2349 	val2 = readl(&xhci->op_regs->config_reg);
2350 	val |= (val2 & ~HCS_SLOTS_MASK);
2351 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2352 			"// Setting Max device slots reg = 0x%x.", val);
2353 	writel(val, &xhci->op_regs->config_reg);
2354 
2355 	/*
2356 	 * Section 5.4.8 - doorbell array must be
2357 	 * "physically contiguous and 64-byte (cache line) aligned".
2358 	 */
2359 	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2360 			GFP_KERNEL);
2361 	if (!xhci->dcbaa)
2362 		goto fail;
2363 	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2364 	xhci->dcbaa->dma = dma;
2365 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2366 			"// Device context base array address = 0x%llx (DMA), %p (virt)",
2367 			(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2368 	xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2369 
2370 	/*
2371 	 * Initialize the ring segment pool.  The ring must be a contiguous
2372 	 * structure comprised of TRBs.  The TRBs must be 16 byte aligned,
2373 	 * however, the command ring segment needs 64-byte aligned segments
2374 	 * and our use of dma addresses in the trb_address_map radix tree needs
2375 	 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2376 	 */
2377 	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2378 			TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2379 
2380 	/* See Table 46 and Note on Figure 55 */
2381 	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2382 			2112, 64, xhci->page_size);
2383 	if (!xhci->segment_pool || !xhci->device_pool)
2384 		goto fail;
2385 
2386 	/* Linear stream context arrays don't have any boundary restrictions,
2387 	 * and only need to be 16-byte aligned.
2388 	 */
2389 	xhci->small_streams_pool =
2390 		dma_pool_create("xHCI 256 byte stream ctx arrays",
2391 			dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2392 	xhci->medium_streams_pool =
2393 		dma_pool_create("xHCI 1KB stream ctx arrays",
2394 			dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2395 	/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2396 	 * will be allocated with dma_alloc_coherent()
2397 	 */
2398 
2399 	if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2400 		goto fail;
2401 
2402 	/* Set up the command ring to have one segments for now. */
2403 	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
2404 	if (!xhci->cmd_ring)
2405 		goto fail;
2406 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2407 			"Allocated command ring at %p", xhci->cmd_ring);
2408 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2409 			(unsigned long long)xhci->cmd_ring->first_seg->dma);
2410 
2411 	/* Set the address in the Command Ring Control register */
2412 	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2413 	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2414 		(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2415 		xhci->cmd_ring->cycle_state;
2416 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2417 			"// Setting command ring address to 0x%x", val);
2418 	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2419 	xhci_dbg_cmd_ptrs(xhci);
2420 
2421 	xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2422 	if (!xhci->lpm_command)
2423 		goto fail;
2424 
2425 	/* Reserve one command ring TRB for disabling LPM.
2426 	 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2427 	 * disabling LPM, we only need to reserve one TRB for all devices.
2428 	 */
2429 	xhci->cmd_ring_reserved_trbs++;
2430 
2431 	val = readl(&xhci->cap_regs->db_off);
2432 	val &= DBOFF_MASK;
2433 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2434 			"// Doorbell array is located at offset 0x%x"
2435 			" from cap regs base addr", val);
2436 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
2437 	xhci_dbg_regs(xhci);
2438 	xhci_print_run_regs(xhci);
2439 	/* Set ir_set to interrupt register set 0 */
2440 	xhci->ir_set = &xhci->run_regs->ir_set[0];
2441 
2442 	/*
2443 	 * Event ring setup: Allocate a normal ring, but also setup
2444 	 * the event ring segment table (ERST).  Section 4.9.3.
2445 	 */
2446 	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2447 	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2448 						flags);
2449 	if (!xhci->event_ring)
2450 		goto fail;
2451 	if (xhci_check_trb_in_td_math(xhci) < 0)
2452 		goto fail;
2453 
2454 	xhci->erst.entries = dma_alloc_coherent(dev,
2455 			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2456 			GFP_KERNEL);
2457 	if (!xhci->erst.entries)
2458 		goto fail;
2459 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2460 			"// Allocated event ring segment table at 0x%llx",
2461 			(unsigned long long)dma);
2462 
2463 	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2464 	xhci->erst.num_entries = ERST_NUM_SEGS;
2465 	xhci->erst.erst_dma_addr = dma;
2466 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2467 			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2468 			xhci->erst.num_entries,
2469 			xhci->erst.entries,
2470 			(unsigned long long)xhci->erst.erst_dma_addr);
2471 
2472 	/* set ring base address and size for each segment table entry */
2473 	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2474 		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2475 		entry->seg_addr = cpu_to_le64(seg->dma);
2476 		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2477 		entry->rsvd = 0;
2478 		seg = seg->next;
2479 	}
2480 
2481 	/* set ERST count with the number of entries in the segment table */
2482 	val = readl(&xhci->ir_set->erst_size);
2483 	val &= ERST_SIZE_MASK;
2484 	val |= ERST_NUM_SEGS;
2485 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2486 			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
2487 			val);
2488 	writel(val, &xhci->ir_set->erst_size);
2489 
2490 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2491 			"// Set ERST entries to point to event ring.");
2492 	/* set the segment table base address */
2493 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2494 			"// Set ERST base address for ir_set 0 = 0x%llx",
2495 			(unsigned long long)xhci->erst.erst_dma_addr);
2496 	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2497 	val_64 &= ERST_PTR_MASK;
2498 	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2499 	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2500 
2501 	/* Set the event ring dequeue address */
2502 	xhci_set_hc_event_deq(xhci);
2503 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2504 			"Wrote ERST address to ir_set 0.");
2505 	xhci_print_ir_set(xhci, 0);
2506 
2507 	/* init command timeout timer */
2508 	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2509 		    (unsigned long)xhci);
2510 
2511 	/*
2512 	 * XXX: Might need to set the Interrupter Moderation Register to
2513 	 * something other than the default (~1ms minimum between interrupts).
2514 	 * See section 5.5.1.2.
2515 	 */
2516 	init_completion(&xhci->addr_dev);
2517 	for (i = 0; i < MAX_HC_SLOTS; ++i)
2518 		xhci->devs[i] = NULL;
2519 	for (i = 0; i < USB_MAXCHILDREN; ++i) {
2520 		xhci->bus_state[0].resume_done[i] = 0;
2521 		xhci->bus_state[1].resume_done[i] = 0;
2522 		/* Only the USB 2.0 completions will ever be used. */
2523 		init_completion(&xhci->bus_state[1].rexit_done[i]);
2524 	}
2525 
2526 	if (scratchpad_alloc(xhci, flags))
2527 		goto fail;
2528 	if (xhci_setup_port_arrays(xhci, flags))
2529 		goto fail;
2530 
2531 	/* Enable USB 3.0 device notifications for function remote wake, which
2532 	 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2533 	 * U3 (device suspend).
2534 	 */
2535 	temp = readl(&xhci->op_regs->dev_notification);
2536 	temp &= ~DEV_NOTE_MASK;
2537 	temp |= DEV_NOTE_FWAKE;
2538 	writel(temp, &xhci->op_regs->dev_notification);
2539 
2540 	return 0;
2541 
2542 fail:
2543 	xhci_warn(xhci, "Couldn't initialize memory\n");
2544 	xhci_halt(xhci);
2545 	xhci_reset(xhci);
2546 	xhci_mem_cleanup(xhci);
2547 	return -ENOMEM;
2548 }
2549