xref: /openbmc/linux/drivers/usb/cdns3/cdnsp-mem.c (revision 3d82904559f4f5a2622db1b21de3edf2eded7664)
1*3d829045SPawel Laszczak // SPDX-License-Identifier: GPL-2.0
2*3d829045SPawel Laszczak /*
3*3d829045SPawel Laszczak  * Cadence CDNSP DRD Driver.
4*3d829045SPawel Laszczak  *
5*3d829045SPawel Laszczak  * Copyright (C) 2020 Cadence.
6*3d829045SPawel Laszczak  *
7*3d829045SPawel Laszczak  * Author: Pawel Laszczak <pawell@cadence.com>
8*3d829045SPawel Laszczak  *
9*3d829045SPawel Laszczak  * Code based on Linux XHCI driver.
10*3d829045SPawel Laszczak  * Origin: Copyright (C) 2008 Intel Corp.
11*3d829045SPawel Laszczak  */
12*3d829045SPawel Laszczak 
13*3d829045SPawel Laszczak #include <linux/dma-mapping.h>
14*3d829045SPawel Laszczak #include <linux/dmapool.h>
15*3d829045SPawel Laszczak #include <linux/slab.h>
16*3d829045SPawel Laszczak #include <linux/usb.h>
17*3d829045SPawel Laszczak 
18*3d829045SPawel Laszczak #include "cdnsp-gadget.h"
19*3d829045SPawel Laszczak 
20*3d829045SPawel Laszczak static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
21*3d829045SPawel Laszczak 				   struct cdnsp_ep *pep);
22*3d829045SPawel Laszczak /*
23*3d829045SPawel Laszczak  * Allocates a generic ring segment from the ring pool, sets the dma address,
24*3d829045SPawel Laszczak  * initializes the segment to zero, and sets the private next pointer to NULL.
25*3d829045SPawel Laszczak  *
26*3d829045SPawel Laszczak  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
27*3d829045SPawel Laszczak  */
28*3d829045SPawel Laszczak static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
29*3d829045SPawel Laszczak 						 unsigned int cycle_state,
30*3d829045SPawel Laszczak 						 unsigned int max_packet,
31*3d829045SPawel Laszczak 						 gfp_t flags)
32*3d829045SPawel Laszczak {
33*3d829045SPawel Laszczak 	struct cdnsp_segment *seg;
34*3d829045SPawel Laszczak 	dma_addr_t dma;
35*3d829045SPawel Laszczak 	int i;
36*3d829045SPawel Laszczak 
37*3d829045SPawel Laszczak 	seg = kzalloc(sizeof(*seg), flags);
38*3d829045SPawel Laszczak 	if (!seg)
39*3d829045SPawel Laszczak 		return NULL;
40*3d829045SPawel Laszczak 
41*3d829045SPawel Laszczak 	seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
42*3d829045SPawel Laszczak 	if (!seg->trbs) {
43*3d829045SPawel Laszczak 		kfree(seg);
44*3d829045SPawel Laszczak 		return NULL;
45*3d829045SPawel Laszczak 	}
46*3d829045SPawel Laszczak 
47*3d829045SPawel Laszczak 	if (max_packet) {
48*3d829045SPawel Laszczak 		seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
49*3d829045SPawel Laszczak 		if (!seg->bounce_buf)
50*3d829045SPawel Laszczak 			goto free_dma;
51*3d829045SPawel Laszczak 	}
52*3d829045SPawel Laszczak 
53*3d829045SPawel Laszczak 	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
54*3d829045SPawel Laszczak 	if (cycle_state == 0) {
55*3d829045SPawel Laszczak 		for (i = 0; i < TRBS_PER_SEGMENT; i++)
56*3d829045SPawel Laszczak 			seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
57*3d829045SPawel Laszczak 	}
58*3d829045SPawel Laszczak 	seg->dma = dma;
59*3d829045SPawel Laszczak 	seg->next = NULL;
60*3d829045SPawel Laszczak 
61*3d829045SPawel Laszczak 	return seg;
62*3d829045SPawel Laszczak 
63*3d829045SPawel Laszczak free_dma:
64*3d829045SPawel Laszczak 	dma_pool_free(pdev->segment_pool, seg->trbs, dma);
65*3d829045SPawel Laszczak 	kfree(seg);
66*3d829045SPawel Laszczak 
67*3d829045SPawel Laszczak 	return NULL;
68*3d829045SPawel Laszczak }
69*3d829045SPawel Laszczak 
70*3d829045SPawel Laszczak static void cdnsp_segment_free(struct cdnsp_device *pdev,
71*3d829045SPawel Laszczak 			       struct cdnsp_segment *seg)
72*3d829045SPawel Laszczak {
73*3d829045SPawel Laszczak 	if (seg->trbs)
74*3d829045SPawel Laszczak 		dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
75*3d829045SPawel Laszczak 
76*3d829045SPawel Laszczak 	kfree(seg->bounce_buf);
77*3d829045SPawel Laszczak 	kfree(seg);
78*3d829045SPawel Laszczak }
79*3d829045SPawel Laszczak 
80*3d829045SPawel Laszczak static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
81*3d829045SPawel Laszczak 					 struct cdnsp_segment *first)
82*3d829045SPawel Laszczak {
83*3d829045SPawel Laszczak 	struct cdnsp_segment *seg;
84*3d829045SPawel Laszczak 
85*3d829045SPawel Laszczak 	seg = first->next;
86*3d829045SPawel Laszczak 
87*3d829045SPawel Laszczak 	while (seg != first) {
88*3d829045SPawel Laszczak 		struct cdnsp_segment *next = seg->next;
89*3d829045SPawel Laszczak 
90*3d829045SPawel Laszczak 		cdnsp_segment_free(pdev, seg);
91*3d829045SPawel Laszczak 		seg = next;
92*3d829045SPawel Laszczak 	}
93*3d829045SPawel Laszczak 
94*3d829045SPawel Laszczak 	cdnsp_segment_free(pdev, first);
95*3d829045SPawel Laszczak }
96*3d829045SPawel Laszczak 
97*3d829045SPawel Laszczak /*
98*3d829045SPawel Laszczak  * Make the prev segment point to the next segment.
99*3d829045SPawel Laszczak  *
100*3d829045SPawel Laszczak  * Change the last TRB in the prev segment to be a Link TRB which points to the
101*3d829045SPawel Laszczak  * DMA address of the next segment. The caller needs to set any Link TRB
102*3d829045SPawel Laszczak  * related flags, such as End TRB, Toggle Cycle, and no snoop.
103*3d829045SPawel Laszczak  */
104*3d829045SPawel Laszczak static void cdnsp_link_segments(struct cdnsp_device *pdev,
105*3d829045SPawel Laszczak 				struct cdnsp_segment *prev,
106*3d829045SPawel Laszczak 				struct cdnsp_segment *next,
107*3d829045SPawel Laszczak 				enum cdnsp_ring_type type)
108*3d829045SPawel Laszczak {
109*3d829045SPawel Laszczak 	struct cdnsp_link_trb *link;
110*3d829045SPawel Laszczak 	u32 val;
111*3d829045SPawel Laszczak 
112*3d829045SPawel Laszczak 	if (!prev || !next)
113*3d829045SPawel Laszczak 		return;
114*3d829045SPawel Laszczak 
115*3d829045SPawel Laszczak 	prev->next = next;
116*3d829045SPawel Laszczak 	if (type != TYPE_EVENT) {
117*3d829045SPawel Laszczak 		link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
118*3d829045SPawel Laszczak 		link->segment_ptr = cpu_to_le64(next->dma);
119*3d829045SPawel Laszczak 
120*3d829045SPawel Laszczak 		/*
121*3d829045SPawel Laszczak 		 * Set the last TRB in the segment to have a TRB type ID
122*3d829045SPawel Laszczak 		 * of Link TRB
123*3d829045SPawel Laszczak 		 */
124*3d829045SPawel Laszczak 		val = le32_to_cpu(link->control);
125*3d829045SPawel Laszczak 		val &= ~TRB_TYPE_BITMASK;
126*3d829045SPawel Laszczak 		val |= TRB_TYPE(TRB_LINK);
127*3d829045SPawel Laszczak 		link->control = cpu_to_le32(val);
128*3d829045SPawel Laszczak 	}
129*3d829045SPawel Laszczak }
130*3d829045SPawel Laszczak 
131*3d829045SPawel Laszczak /*
132*3d829045SPawel Laszczak  * Link the ring to the new segments.
133*3d829045SPawel Laszczak  * Set Toggle Cycle for the new ring if needed.
134*3d829045SPawel Laszczak  */
135*3d829045SPawel Laszczak static void cdnsp_link_rings(struct cdnsp_device *pdev,
136*3d829045SPawel Laszczak 			     struct cdnsp_ring *ring,
137*3d829045SPawel Laszczak 			     struct cdnsp_segment *first,
138*3d829045SPawel Laszczak 			     struct cdnsp_segment *last,
139*3d829045SPawel Laszczak 			     unsigned int num_segs)
140*3d829045SPawel Laszczak {
141*3d829045SPawel Laszczak 	struct cdnsp_segment *next;
142*3d829045SPawel Laszczak 
143*3d829045SPawel Laszczak 	if (!ring || !first || !last)
144*3d829045SPawel Laszczak 		return;
145*3d829045SPawel Laszczak 
146*3d829045SPawel Laszczak 	next = ring->enq_seg->next;
147*3d829045SPawel Laszczak 	cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
148*3d829045SPawel Laszczak 	cdnsp_link_segments(pdev, last, next, ring->type);
149*3d829045SPawel Laszczak 	ring->num_segs += num_segs;
150*3d829045SPawel Laszczak 	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
151*3d829045SPawel Laszczak 
152*3d829045SPawel Laszczak 	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
153*3d829045SPawel Laszczak 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
154*3d829045SPawel Laszczak 			~cpu_to_le32(LINK_TOGGLE);
155*3d829045SPawel Laszczak 		last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
156*3d829045SPawel Laszczak 			cpu_to_le32(LINK_TOGGLE);
157*3d829045SPawel Laszczak 		ring->last_seg = last;
158*3d829045SPawel Laszczak 	}
159*3d829045SPawel Laszczak }
160*3d829045SPawel Laszczak 
161*3d829045SPawel Laszczak /*
162*3d829045SPawel Laszczak  * We need a radix tree for mapping physical addresses of TRBs to which stream
163*3d829045SPawel Laszczak  * ID they belong to. We need to do this because the device controller won't
164*3d829045SPawel Laszczak  * tell us which stream ring the TRB came from. We could store the stream ID
165*3d829045SPawel Laszczak  * in an event data TRB, but that doesn't help us for the cancellation case,
166*3d829045SPawel Laszczak  * since the endpoint may stop before it reaches that event data TRB.
167*3d829045SPawel Laszczak  *
168*3d829045SPawel Laszczak  * The radix tree maps the upper portion of the TRB DMA address to a ring
169*3d829045SPawel Laszczak  * segment that has the same upper portion of DMA addresses. For example,
170*3d829045SPawel Laszczak  * say I have segments of size 1KB, that are always 1KB aligned. A segment may
171*3d829045SPawel Laszczak  * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
172*3d829045SPawel Laszczak  * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
173*3d829045SPawel Laszczak  * pass the radix tree a key to get the right stream ID:
174*3d829045SPawel Laszczak  *
175*3d829045SPawel Laszczak  *	0x10c90fff >> 10 = 0x43243
176*3d829045SPawel Laszczak  *	0x10c912c0 >> 10 = 0x43244
177*3d829045SPawel Laszczak  *	0x10c91400 >> 10 = 0x43245
178*3d829045SPawel Laszczak  *
179*3d829045SPawel Laszczak  * Obviously, only those TRBs with DMA addresses that are within the segment
180*3d829045SPawel Laszczak  * will make the radix tree return the stream ID for that ring.
181*3d829045SPawel Laszczak  *
182*3d829045SPawel Laszczak  * Caveats for the radix tree:
183*3d829045SPawel Laszczak  *
184*3d829045SPawel Laszczak  * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
185*3d829045SPawel Laszczak  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
186*3d829045SPawel Laszczak  * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
187*3d829045SPawel Laszczak  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
188*3d829045SPawel Laszczak  * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
189*3d829045SPawel Laszczak  * extended systems (where the DMA address can be bigger than 32-bits),
190*3d829045SPawel Laszczak  * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
191*3d829045SPawel Laszczak  */
192*3d829045SPawel Laszczak static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
193*3d829045SPawel Laszczak 					struct cdnsp_ring *ring,
194*3d829045SPawel Laszczak 					struct cdnsp_segment *seg,
195*3d829045SPawel Laszczak 					gfp_t mem_flags)
196*3d829045SPawel Laszczak {
197*3d829045SPawel Laszczak 	unsigned long key;
198*3d829045SPawel Laszczak 	int ret;
199*3d829045SPawel Laszczak 
200*3d829045SPawel Laszczak 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
201*3d829045SPawel Laszczak 
202*3d829045SPawel Laszczak 	/* Skip any segments that were already added. */
203*3d829045SPawel Laszczak 	if (radix_tree_lookup(trb_address_map, key))
204*3d829045SPawel Laszczak 		return 0;
205*3d829045SPawel Laszczak 
206*3d829045SPawel Laszczak 	ret = radix_tree_maybe_preload(mem_flags);
207*3d829045SPawel Laszczak 	if (ret)
208*3d829045SPawel Laszczak 		return ret;
209*3d829045SPawel Laszczak 
210*3d829045SPawel Laszczak 	ret = radix_tree_insert(trb_address_map, key, ring);
211*3d829045SPawel Laszczak 	radix_tree_preload_end();
212*3d829045SPawel Laszczak 
213*3d829045SPawel Laszczak 	return ret;
214*3d829045SPawel Laszczak }
215*3d829045SPawel Laszczak 
216*3d829045SPawel Laszczak static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
217*3d829045SPawel Laszczak 					 struct cdnsp_segment *seg)
218*3d829045SPawel Laszczak {
219*3d829045SPawel Laszczak 	unsigned long key;
220*3d829045SPawel Laszczak 
221*3d829045SPawel Laszczak 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
222*3d829045SPawel Laszczak 	if (radix_tree_lookup(trb_address_map, key))
223*3d829045SPawel Laszczak 		radix_tree_delete(trb_address_map, key);
224*3d829045SPawel Laszczak }
225*3d829045SPawel Laszczak 
226*3d829045SPawel Laszczak static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
227*3d829045SPawel Laszczak 					       struct cdnsp_ring *ring,
228*3d829045SPawel Laszczak 					       struct cdnsp_segment *first_seg,
229*3d829045SPawel Laszczak 					       struct cdnsp_segment *last_seg,
230*3d829045SPawel Laszczak 					       gfp_t mem_flags)
231*3d829045SPawel Laszczak {
232*3d829045SPawel Laszczak 	struct cdnsp_segment *failed_seg;
233*3d829045SPawel Laszczak 	struct cdnsp_segment *seg;
234*3d829045SPawel Laszczak 	int ret;
235*3d829045SPawel Laszczak 
236*3d829045SPawel Laszczak 	seg = first_seg;
237*3d829045SPawel Laszczak 	do {
238*3d829045SPawel Laszczak 		ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
239*3d829045SPawel Laszczak 						   mem_flags);
240*3d829045SPawel Laszczak 		if (ret)
241*3d829045SPawel Laszczak 			goto remove_streams;
242*3d829045SPawel Laszczak 		if (seg == last_seg)
243*3d829045SPawel Laszczak 			return 0;
244*3d829045SPawel Laszczak 		seg = seg->next;
245*3d829045SPawel Laszczak 	} while (seg != first_seg);
246*3d829045SPawel Laszczak 
247*3d829045SPawel Laszczak 	return 0;
248*3d829045SPawel Laszczak 
249*3d829045SPawel Laszczak remove_streams:
250*3d829045SPawel Laszczak 	failed_seg = seg;
251*3d829045SPawel Laszczak 	seg = first_seg;
252*3d829045SPawel Laszczak 	do {
253*3d829045SPawel Laszczak 		cdnsp_remove_segment_mapping(trb_address_map, seg);
254*3d829045SPawel Laszczak 		if (seg == failed_seg)
255*3d829045SPawel Laszczak 			return ret;
256*3d829045SPawel Laszczak 		seg = seg->next;
257*3d829045SPawel Laszczak 	} while (seg != first_seg);
258*3d829045SPawel Laszczak 
259*3d829045SPawel Laszczak 	return ret;
260*3d829045SPawel Laszczak }
261*3d829045SPawel Laszczak 
262*3d829045SPawel Laszczak static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
263*3d829045SPawel Laszczak {
264*3d829045SPawel Laszczak 	struct cdnsp_segment *seg;
265*3d829045SPawel Laszczak 
266*3d829045SPawel Laszczak 	seg = ring->first_seg;
267*3d829045SPawel Laszczak 	do {
268*3d829045SPawel Laszczak 		cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
269*3d829045SPawel Laszczak 		seg = seg->next;
270*3d829045SPawel Laszczak 	} while (seg != ring->first_seg);
271*3d829045SPawel Laszczak }
272*3d829045SPawel Laszczak 
273*3d829045SPawel Laszczak static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
274*3d829045SPawel Laszczak {
275*3d829045SPawel Laszczak 	return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
276*3d829045SPawel Laszczak 			ring->first_seg, ring->last_seg, GFP_ATOMIC);
277*3d829045SPawel Laszczak }
278*3d829045SPawel Laszczak 
279*3d829045SPawel Laszczak static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
280*3d829045SPawel Laszczak {
281*3d829045SPawel Laszczak 	if (!ring)
282*3d829045SPawel Laszczak 		return;
283*3d829045SPawel Laszczak 
284*3d829045SPawel Laszczak 	if (ring->first_seg) {
285*3d829045SPawel Laszczak 		if (ring->type == TYPE_STREAM)
286*3d829045SPawel Laszczak 			cdnsp_remove_stream_mapping(ring);
287*3d829045SPawel Laszczak 
288*3d829045SPawel Laszczak 		cdnsp_free_segments_for_ring(pdev, ring->first_seg);
289*3d829045SPawel Laszczak 	}
290*3d829045SPawel Laszczak 
291*3d829045SPawel Laszczak 	kfree(ring);
292*3d829045SPawel Laszczak }
293*3d829045SPawel Laszczak 
294*3d829045SPawel Laszczak void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
295*3d829045SPawel Laszczak {
296*3d829045SPawel Laszczak 	ring->enqueue = ring->first_seg->trbs;
297*3d829045SPawel Laszczak 	ring->enq_seg = ring->first_seg;
298*3d829045SPawel Laszczak 	ring->dequeue = ring->enqueue;
299*3d829045SPawel Laszczak 	ring->deq_seg = ring->first_seg;
300*3d829045SPawel Laszczak 
301*3d829045SPawel Laszczak 	/*
302*3d829045SPawel Laszczak 	 * The ring is initialized to 0. The producer must write 1 to the cycle
303*3d829045SPawel Laszczak 	 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
304*3d829045SPawel Laszczak 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
305*3d829045SPawel Laszczak 	 *
306*3d829045SPawel Laszczak 	 * New rings are initialized with cycle state equal to 1; if we are
307*3d829045SPawel Laszczak 	 * handling ring expansion, set the cycle state equal to the old ring.
308*3d829045SPawel Laszczak 	 */
309*3d829045SPawel Laszczak 	ring->cycle_state = 1;
310*3d829045SPawel Laszczak 
311*3d829045SPawel Laszczak 	/*
312*3d829045SPawel Laszczak 	 * Each segment has a link TRB, and leave an extra TRB for SW
313*3d829045SPawel Laszczak 	 * accounting purpose
314*3d829045SPawel Laszczak 	 */
315*3d829045SPawel Laszczak 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
316*3d829045SPawel Laszczak }
317*3d829045SPawel Laszczak 
318*3d829045SPawel Laszczak /* Allocate segments and link them for a ring. */
319*3d829045SPawel Laszczak static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
320*3d829045SPawel Laszczak 					 struct cdnsp_segment **first,
321*3d829045SPawel Laszczak 					 struct cdnsp_segment **last,
322*3d829045SPawel Laszczak 					 unsigned int num_segs,
323*3d829045SPawel Laszczak 					 unsigned int cycle_state,
324*3d829045SPawel Laszczak 					 enum cdnsp_ring_type type,
325*3d829045SPawel Laszczak 					 unsigned int max_packet,
326*3d829045SPawel Laszczak 					 gfp_t flags)
327*3d829045SPawel Laszczak {
328*3d829045SPawel Laszczak 	struct cdnsp_segment *prev;
329*3d829045SPawel Laszczak 
330*3d829045SPawel Laszczak 	/* Allocate first segment. */
331*3d829045SPawel Laszczak 	prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
332*3d829045SPawel Laszczak 	if (!prev)
333*3d829045SPawel Laszczak 		return -ENOMEM;
334*3d829045SPawel Laszczak 
335*3d829045SPawel Laszczak 	num_segs--;
336*3d829045SPawel Laszczak 	*first = prev;
337*3d829045SPawel Laszczak 
338*3d829045SPawel Laszczak 	/* Allocate all other segments. */
339*3d829045SPawel Laszczak 	while (num_segs > 0) {
340*3d829045SPawel Laszczak 		struct cdnsp_segment	*next;
341*3d829045SPawel Laszczak 
342*3d829045SPawel Laszczak 		next = cdnsp_segment_alloc(pdev, cycle_state,
343*3d829045SPawel Laszczak 					   max_packet, flags);
344*3d829045SPawel Laszczak 		if (!next) {
345*3d829045SPawel Laszczak 			cdnsp_free_segments_for_ring(pdev, *first);
346*3d829045SPawel Laszczak 			return -ENOMEM;
347*3d829045SPawel Laszczak 		}
348*3d829045SPawel Laszczak 
349*3d829045SPawel Laszczak 		cdnsp_link_segments(pdev, prev, next, type);
350*3d829045SPawel Laszczak 
351*3d829045SPawel Laszczak 		prev = next;
352*3d829045SPawel Laszczak 		num_segs--;
353*3d829045SPawel Laszczak 	}
354*3d829045SPawel Laszczak 
355*3d829045SPawel Laszczak 	cdnsp_link_segments(pdev, prev, *first, type);
356*3d829045SPawel Laszczak 	*last = prev;
357*3d829045SPawel Laszczak 
358*3d829045SPawel Laszczak 	return 0;
359*3d829045SPawel Laszczak }
360*3d829045SPawel Laszczak 
361*3d829045SPawel Laszczak /*
362*3d829045SPawel Laszczak  * Create a new ring with zero or more segments.
363*3d829045SPawel Laszczak  *
364*3d829045SPawel Laszczak  * Link each segment together into a ring.
365*3d829045SPawel Laszczak  * Set the end flag and the cycle toggle bit on the last segment.
366*3d829045SPawel Laszczak  */
367*3d829045SPawel Laszczak static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
368*3d829045SPawel Laszczak 					   unsigned int num_segs,
369*3d829045SPawel Laszczak 					   enum cdnsp_ring_type type,
370*3d829045SPawel Laszczak 					   unsigned int max_packet,
371*3d829045SPawel Laszczak 					   gfp_t flags)
372*3d829045SPawel Laszczak {
373*3d829045SPawel Laszczak 	struct cdnsp_ring *ring;
374*3d829045SPawel Laszczak 	int ret;
375*3d829045SPawel Laszczak 
376*3d829045SPawel Laszczak 	ring = kzalloc(sizeof *(ring), flags);
377*3d829045SPawel Laszczak 	if (!ring)
378*3d829045SPawel Laszczak 		return NULL;
379*3d829045SPawel Laszczak 
380*3d829045SPawel Laszczak 	ring->num_segs = num_segs;
381*3d829045SPawel Laszczak 	ring->bounce_buf_len = max_packet;
382*3d829045SPawel Laszczak 	INIT_LIST_HEAD(&ring->td_list);
383*3d829045SPawel Laszczak 	ring->type = type;
384*3d829045SPawel Laszczak 
385*3d829045SPawel Laszczak 	if (num_segs == 0)
386*3d829045SPawel Laszczak 		return ring;
387*3d829045SPawel Laszczak 
388*3d829045SPawel Laszczak 	ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
389*3d829045SPawel Laszczak 					    &ring->last_seg, num_segs,
390*3d829045SPawel Laszczak 					    1, type, max_packet, flags);
391*3d829045SPawel Laszczak 	if (ret)
392*3d829045SPawel Laszczak 		goto fail;
393*3d829045SPawel Laszczak 
394*3d829045SPawel Laszczak 	/* Only event ring does not use link TRB. */
395*3d829045SPawel Laszczak 	if (type != TYPE_EVENT)
396*3d829045SPawel Laszczak 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
397*3d829045SPawel Laszczak 			cpu_to_le32(LINK_TOGGLE);
398*3d829045SPawel Laszczak 
399*3d829045SPawel Laszczak 	cdnsp_initialize_ring_info(ring);
400*3d829045SPawel Laszczak 
401*3d829045SPawel Laszczak 	return ring;
402*3d829045SPawel Laszczak fail:
403*3d829045SPawel Laszczak 	kfree(ring);
404*3d829045SPawel Laszczak 	return NULL;
405*3d829045SPawel Laszczak }
406*3d829045SPawel Laszczak 
407*3d829045SPawel Laszczak void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
408*3d829045SPawel Laszczak {
409*3d829045SPawel Laszczak 	cdnsp_ring_free(pdev, pep->ring);
410*3d829045SPawel Laszczak 	pep->ring = NULL;
411*3d829045SPawel Laszczak 	cdnsp_free_stream_info(pdev, pep);
412*3d829045SPawel Laszczak }
413*3d829045SPawel Laszczak 
414*3d829045SPawel Laszczak /*
415*3d829045SPawel Laszczak  * Expand an existing ring.
416*3d829045SPawel Laszczak  * Allocate a new ring which has same segment numbers and link the two rings.
417*3d829045SPawel Laszczak  */
418*3d829045SPawel Laszczak int cdnsp_ring_expansion(struct cdnsp_device *pdev,
419*3d829045SPawel Laszczak 			 struct cdnsp_ring *ring,
420*3d829045SPawel Laszczak 			 unsigned int num_trbs,
421*3d829045SPawel Laszczak 			 gfp_t flags)
422*3d829045SPawel Laszczak {
423*3d829045SPawel Laszczak 	unsigned int num_segs_needed;
424*3d829045SPawel Laszczak 	struct cdnsp_segment *first;
425*3d829045SPawel Laszczak 	struct cdnsp_segment *last;
426*3d829045SPawel Laszczak 	unsigned int num_segs;
427*3d829045SPawel Laszczak 	int ret;
428*3d829045SPawel Laszczak 
429*3d829045SPawel Laszczak 	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
430*3d829045SPawel Laszczak 			(TRBS_PER_SEGMENT - 1);
431*3d829045SPawel Laszczak 
432*3d829045SPawel Laszczak 	/* Allocate number of segments we needed, or double the ring size. */
433*3d829045SPawel Laszczak 	num_segs = max(ring->num_segs, num_segs_needed);
434*3d829045SPawel Laszczak 
435*3d829045SPawel Laszczak 	ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
436*3d829045SPawel Laszczak 					    ring->cycle_state, ring->type,
437*3d829045SPawel Laszczak 					    ring->bounce_buf_len, flags);
438*3d829045SPawel Laszczak 	if (ret)
439*3d829045SPawel Laszczak 		return -ENOMEM;
440*3d829045SPawel Laszczak 
441*3d829045SPawel Laszczak 	if (ring->type == TYPE_STREAM)
442*3d829045SPawel Laszczak 		ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
443*3d829045SPawel Laszczak 							  ring, first,
444*3d829045SPawel Laszczak 							  last, flags);
445*3d829045SPawel Laszczak 
446*3d829045SPawel Laszczak 	if (ret) {
447*3d829045SPawel Laszczak 		cdnsp_free_segments_for_ring(pdev, first);
448*3d829045SPawel Laszczak 
449*3d829045SPawel Laszczak 		return ret;
450*3d829045SPawel Laszczak 	}
451*3d829045SPawel Laszczak 
452*3d829045SPawel Laszczak 	cdnsp_link_rings(pdev, ring, first, last, num_segs);
453*3d829045SPawel Laszczak 
454*3d829045SPawel Laszczak 	return 0;
455*3d829045SPawel Laszczak }
456*3d829045SPawel Laszczak 
457*3d829045SPawel Laszczak static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
458*3d829045SPawel Laszczak {
459*3d829045SPawel Laszczak 	int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
460*3d829045SPawel Laszczak 
461*3d829045SPawel Laszczak 	pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
462*3d829045SPawel Laszczak 	pdev->out_ctx.size = size;
463*3d829045SPawel Laszczak 	pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
464*3d829045SPawel Laszczak 	pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
465*3d829045SPawel Laszczak 					      &pdev->out_ctx.dma);
466*3d829045SPawel Laszczak 
467*3d829045SPawel Laszczak 	if (!pdev->out_ctx.bytes)
468*3d829045SPawel Laszczak 		return -ENOMEM;
469*3d829045SPawel Laszczak 
470*3d829045SPawel Laszczak 	pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
471*3d829045SPawel Laszczak 	pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
472*3d829045SPawel Laszczak 	pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
473*3d829045SPawel Laszczak 	pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
474*3d829045SPawel Laszczak 					     &pdev->in_ctx.dma);
475*3d829045SPawel Laszczak 
476*3d829045SPawel Laszczak 	if (!pdev->in_ctx.bytes) {
477*3d829045SPawel Laszczak 		dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
478*3d829045SPawel Laszczak 			      pdev->out_ctx.dma);
479*3d829045SPawel Laszczak 		return -ENOMEM;
480*3d829045SPawel Laszczak 	}
481*3d829045SPawel Laszczak 
482*3d829045SPawel Laszczak 	return 0;
483*3d829045SPawel Laszczak }
484*3d829045SPawel Laszczak 
485*3d829045SPawel Laszczak struct cdnsp_input_control_ctx
486*3d829045SPawel Laszczak 	*cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
487*3d829045SPawel Laszczak {
488*3d829045SPawel Laszczak 	if (ctx->type != CDNSP_CTX_TYPE_INPUT)
489*3d829045SPawel Laszczak 		return NULL;
490*3d829045SPawel Laszczak 
491*3d829045SPawel Laszczak 	return (struct cdnsp_input_control_ctx *)ctx->bytes;
492*3d829045SPawel Laszczak }
493*3d829045SPawel Laszczak 
494*3d829045SPawel Laszczak struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
495*3d829045SPawel Laszczak {
496*3d829045SPawel Laszczak 	if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
497*3d829045SPawel Laszczak 		return (struct cdnsp_slot_ctx *)ctx->bytes;
498*3d829045SPawel Laszczak 
499*3d829045SPawel Laszczak 	return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
500*3d829045SPawel Laszczak }
501*3d829045SPawel Laszczak 
502*3d829045SPawel Laszczak struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
503*3d829045SPawel Laszczak 				      unsigned int ep_index)
504*3d829045SPawel Laszczak {
505*3d829045SPawel Laszczak 	/* Increment ep index by offset of start of ep ctx array. */
506*3d829045SPawel Laszczak 	ep_index++;
507*3d829045SPawel Laszczak 	if (ctx->type == CDNSP_CTX_TYPE_INPUT)
508*3d829045SPawel Laszczak 		ep_index++;
509*3d829045SPawel Laszczak 
510*3d829045SPawel Laszczak 	return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
511*3d829045SPawel Laszczak }
512*3d829045SPawel Laszczak 
513*3d829045SPawel Laszczak static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
514*3d829045SPawel Laszczak 				  struct cdnsp_ep *pep)
515*3d829045SPawel Laszczak {
516*3d829045SPawel Laszczak 	dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
517*3d829045SPawel Laszczak 		      pep->stream_info.ctx_array_dma);
518*3d829045SPawel Laszczak }
519*3d829045SPawel Laszczak 
520*3d829045SPawel Laszczak /* The stream context array must be a power of 2. */
521*3d829045SPawel Laszczak static struct cdnsp_stream_ctx
522*3d829045SPawel Laszczak 	*cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
523*3d829045SPawel Laszczak {
524*3d829045SPawel Laszczak 	size_t size = sizeof(struct cdnsp_stream_ctx) *
525*3d829045SPawel Laszczak 		      pep->stream_info.num_stream_ctxs;
526*3d829045SPawel Laszczak 
527*3d829045SPawel Laszczak 	if (size > CDNSP_CTX_SIZE)
528*3d829045SPawel Laszczak 		return NULL;
529*3d829045SPawel Laszczak 
530*3d829045SPawel Laszczak 	/**
531*3d829045SPawel Laszczak 	 * Driver uses intentionally the device_pool to allocated stream
532*3d829045SPawel Laszczak 	 * context array. Device Pool has 2048 bytes of size what gives us
533*3d829045SPawel Laszczak 	 * 128 entries.
534*3d829045SPawel Laszczak 	 */
535*3d829045SPawel Laszczak 	return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
536*3d829045SPawel Laszczak 			       &pep->stream_info.ctx_array_dma);
537*3d829045SPawel Laszczak }
538*3d829045SPawel Laszczak 
539*3d829045SPawel Laszczak struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
540*3d829045SPawel Laszczak {
541*3d829045SPawel Laszczak 	if (pep->ep_state & EP_HAS_STREAMS)
542*3d829045SPawel Laszczak 		return radix_tree_lookup(&pep->stream_info.trb_address_map,
543*3d829045SPawel Laszczak 					 address >> TRB_SEGMENT_SHIFT);
544*3d829045SPawel Laszczak 
545*3d829045SPawel Laszczak 	return pep->ring;
546*3d829045SPawel Laszczak }
547*3d829045SPawel Laszczak 
548*3d829045SPawel Laszczak /*
549*3d829045SPawel Laszczak  * Change an endpoint's internal structure so it supports stream IDs.
550*3d829045SPawel Laszczak  * The number of requested streams includes stream 0, which cannot be used by
551*3d829045SPawel Laszczak  * driver.
552*3d829045SPawel Laszczak  *
553*3d829045SPawel Laszczak  * The number of stream contexts in the stream context array may be bigger than
554*3d829045SPawel Laszczak  * the number of streams the driver wants to use. This is because the number of
555*3d829045SPawel Laszczak  * stream context array entries must be a power of two.
556*3d829045SPawel Laszczak  */
557*3d829045SPawel Laszczak int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
558*3d829045SPawel Laszczak 			    struct cdnsp_ep *pep,
559*3d829045SPawel Laszczak 			    unsigned int num_stream_ctxs,
560*3d829045SPawel Laszczak 			    unsigned int num_streams)
561*3d829045SPawel Laszczak {
562*3d829045SPawel Laszczak 	struct cdnsp_stream_info *stream_info;
563*3d829045SPawel Laszczak 	struct cdnsp_ring *cur_ring;
564*3d829045SPawel Laszczak 	u32 cur_stream;
565*3d829045SPawel Laszczak 	u64 addr;
566*3d829045SPawel Laszczak 	int ret;
567*3d829045SPawel Laszczak 	int mps;
568*3d829045SPawel Laszczak 
569*3d829045SPawel Laszczak 	stream_info = &pep->stream_info;
570*3d829045SPawel Laszczak 	stream_info->num_streams = num_streams;
571*3d829045SPawel Laszczak 	stream_info->num_stream_ctxs = num_stream_ctxs;
572*3d829045SPawel Laszczak 
573*3d829045SPawel Laszczak 	/* Initialize the array of virtual pointers to stream rings. */
574*3d829045SPawel Laszczak 	stream_info->stream_rings = kcalloc(num_streams,
575*3d829045SPawel Laszczak 					    sizeof(struct cdnsp_ring *),
576*3d829045SPawel Laszczak 					    GFP_ATOMIC);
577*3d829045SPawel Laszczak 	if (!stream_info->stream_rings)
578*3d829045SPawel Laszczak 		return -ENOMEM;
579*3d829045SPawel Laszczak 
580*3d829045SPawel Laszczak 	/* Initialize the array of DMA addresses for stream rings for the HW. */
581*3d829045SPawel Laszczak 	stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
582*3d829045SPawel Laszczak 	if (!stream_info->stream_ctx_array)
583*3d829045SPawel Laszczak 		goto cleanup_stream_rings;
584*3d829045SPawel Laszczak 
585*3d829045SPawel Laszczak 	memset(stream_info->stream_ctx_array, 0,
586*3d829045SPawel Laszczak 	       sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
587*3d829045SPawel Laszczak 	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
588*3d829045SPawel Laszczak 	mps = usb_endpoint_maxp(pep->endpoint.desc);
589*3d829045SPawel Laszczak 
590*3d829045SPawel Laszczak 	/*
591*3d829045SPawel Laszczak 	 * Allocate rings for all the streams that the driver will use,
592*3d829045SPawel Laszczak 	 * and add their segment DMA addresses to the radix tree.
593*3d829045SPawel Laszczak 	 * Stream 0 is reserved.
594*3d829045SPawel Laszczak 	 */
595*3d829045SPawel Laszczak 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
596*3d829045SPawel Laszczak 		cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
597*3d829045SPawel Laszczak 					    GFP_ATOMIC);
598*3d829045SPawel Laszczak 		stream_info->stream_rings[cur_stream] = cur_ring;
599*3d829045SPawel Laszczak 
600*3d829045SPawel Laszczak 		if (!cur_ring)
601*3d829045SPawel Laszczak 			goto cleanup_rings;
602*3d829045SPawel Laszczak 
603*3d829045SPawel Laszczak 		cur_ring->stream_id = cur_stream;
604*3d829045SPawel Laszczak 		cur_ring->trb_address_map = &stream_info->trb_address_map;
605*3d829045SPawel Laszczak 
606*3d829045SPawel Laszczak 		/* Set deq ptr, cycle bit, and stream context type. */
607*3d829045SPawel Laszczak 		addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
608*3d829045SPawel Laszczak 		       cur_ring->cycle_state;
609*3d829045SPawel Laszczak 
610*3d829045SPawel Laszczak 		stream_info->stream_ctx_array[cur_stream].stream_ring =
611*3d829045SPawel Laszczak 			cpu_to_le64(addr);
612*3d829045SPawel Laszczak 
613*3d829045SPawel Laszczak 		ret = cdnsp_update_stream_mapping(cur_ring);
614*3d829045SPawel Laszczak 		if (ret)
615*3d829045SPawel Laszczak 			goto cleanup_rings;
616*3d829045SPawel Laszczak 	}
617*3d829045SPawel Laszczak 
618*3d829045SPawel Laszczak 	return 0;
619*3d829045SPawel Laszczak 
620*3d829045SPawel Laszczak cleanup_rings:
621*3d829045SPawel Laszczak 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
622*3d829045SPawel Laszczak 		cur_ring = stream_info->stream_rings[cur_stream];
623*3d829045SPawel Laszczak 		if (cur_ring) {
624*3d829045SPawel Laszczak 			cdnsp_ring_free(pdev, cur_ring);
625*3d829045SPawel Laszczak 			stream_info->stream_rings[cur_stream] = NULL;
626*3d829045SPawel Laszczak 		}
627*3d829045SPawel Laszczak 	}
628*3d829045SPawel Laszczak 
629*3d829045SPawel Laszczak cleanup_stream_rings:
630*3d829045SPawel Laszczak 	kfree(pep->stream_info.stream_rings);
631*3d829045SPawel Laszczak 
632*3d829045SPawel Laszczak 	return -ENOMEM;
633*3d829045SPawel Laszczak }
634*3d829045SPawel Laszczak 
635*3d829045SPawel Laszczak /* Frees all stream contexts associated with the endpoint. */
636*3d829045SPawel Laszczak static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
637*3d829045SPawel Laszczak 				   struct cdnsp_ep *pep)
638*3d829045SPawel Laszczak {
639*3d829045SPawel Laszczak 	struct cdnsp_stream_info *stream_info = &pep->stream_info;
640*3d829045SPawel Laszczak 	struct cdnsp_ring *cur_ring;
641*3d829045SPawel Laszczak 	int cur_stream;
642*3d829045SPawel Laszczak 
643*3d829045SPawel Laszczak 	if (!(pep->ep_state & EP_HAS_STREAMS))
644*3d829045SPawel Laszczak 		return;
645*3d829045SPawel Laszczak 
646*3d829045SPawel Laszczak 	for (cur_stream = 1; cur_stream < stream_info->num_streams;
647*3d829045SPawel Laszczak 	     cur_stream++) {
648*3d829045SPawel Laszczak 		cur_ring = stream_info->stream_rings[cur_stream];
649*3d829045SPawel Laszczak 		if (cur_ring) {
650*3d829045SPawel Laszczak 			cdnsp_ring_free(pdev, cur_ring);
651*3d829045SPawel Laszczak 			stream_info->stream_rings[cur_stream] = NULL;
652*3d829045SPawel Laszczak 		}
653*3d829045SPawel Laszczak 	}
654*3d829045SPawel Laszczak 
655*3d829045SPawel Laszczak 	if (stream_info->stream_ctx_array)
656*3d829045SPawel Laszczak 		cdnsp_free_stream_ctx(pdev, pep);
657*3d829045SPawel Laszczak 
658*3d829045SPawel Laszczak 	kfree(stream_info->stream_rings);
659*3d829045SPawel Laszczak 	pep->ep_state &= ~EP_HAS_STREAMS;
660*3d829045SPawel Laszczak }
661*3d829045SPawel Laszczak 
662*3d829045SPawel Laszczak /* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
663*3d829045SPawel Laszczak static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
664*3d829045SPawel Laszczak {
665*3d829045SPawel Laszczak 	pdev->dcbaa->dev_context_ptrs[1] = 0;
666*3d829045SPawel Laszczak 
667*3d829045SPawel Laszczak 	cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
668*3d829045SPawel Laszczak 
669*3d829045SPawel Laszczak 	if (pdev->in_ctx.bytes)
670*3d829045SPawel Laszczak 		dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
671*3d829045SPawel Laszczak 			      pdev->in_ctx.dma);
672*3d829045SPawel Laszczak 
673*3d829045SPawel Laszczak 	if (pdev->out_ctx.bytes)
674*3d829045SPawel Laszczak 		dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
675*3d829045SPawel Laszczak 			      pdev->out_ctx.dma);
676*3d829045SPawel Laszczak 
677*3d829045SPawel Laszczak 	pdev->in_ctx.bytes = NULL;
678*3d829045SPawel Laszczak 	pdev->out_ctx.bytes = NULL;
679*3d829045SPawel Laszczak }
680*3d829045SPawel Laszczak 
681*3d829045SPawel Laszczak static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev, gfp_t flags)
682*3d829045SPawel Laszczak {
683*3d829045SPawel Laszczak 	int ret = -ENOMEM;
684*3d829045SPawel Laszczak 
685*3d829045SPawel Laszczak 	ret = cdnsp_init_device_ctx(pdev);
686*3d829045SPawel Laszczak 	if (ret)
687*3d829045SPawel Laszczak 		return ret;
688*3d829045SPawel Laszczak 
689*3d829045SPawel Laszczak 	/* Allocate endpoint 0 ring. */
690*3d829045SPawel Laszczak 	pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, flags);
691*3d829045SPawel Laszczak 	if (!pdev->eps[0].ring)
692*3d829045SPawel Laszczak 		goto fail;
693*3d829045SPawel Laszczak 
694*3d829045SPawel Laszczak 	/* Point to output device context in dcbaa. */
695*3d829045SPawel Laszczak 	pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
696*3d829045SPawel Laszczak 	pdev->cmd.in_ctx = &pdev->in_ctx;
697*3d829045SPawel Laszczak 
698*3d829045SPawel Laszczak 	return 0;
699*3d829045SPawel Laszczak fail:
700*3d829045SPawel Laszczak 	dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
701*3d829045SPawel Laszczak 		      pdev->out_ctx.dma);
702*3d829045SPawel Laszczak 	dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
703*3d829045SPawel Laszczak 		      pdev->in_ctx.dma);
704*3d829045SPawel Laszczak 
705*3d829045SPawel Laszczak 	return ret;
706*3d829045SPawel Laszczak }
707*3d829045SPawel Laszczak 
708*3d829045SPawel Laszczak void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
709*3d829045SPawel Laszczak {
710*3d829045SPawel Laszczak 	struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
711*3d829045SPawel Laszczak 	struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
712*3d829045SPawel Laszczak 	dma_addr_t dma;
713*3d829045SPawel Laszczak 
714*3d829045SPawel Laszczak 	dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
715*3d829045SPawel Laszczak 	ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
716*3d829045SPawel Laszczak }
717*3d829045SPawel Laszczak 
718*3d829045SPawel Laszczak /* Setup an controller private device for a Set Address command. */
719*3d829045SPawel Laszczak int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
720*3d829045SPawel Laszczak {
721*3d829045SPawel Laszczak 	struct cdnsp_slot_ctx *slot_ctx;
722*3d829045SPawel Laszczak 	struct cdnsp_ep_ctx *ep0_ctx;
723*3d829045SPawel Laszczak 	u32 max_packets, port;
724*3d829045SPawel Laszczak 
725*3d829045SPawel Laszczak 	ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
726*3d829045SPawel Laszczak 	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
727*3d829045SPawel Laszczak 
728*3d829045SPawel Laszczak 	/* Only the control endpoint is valid - one endpoint context. */
729*3d829045SPawel Laszczak 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
730*3d829045SPawel Laszczak 
731*3d829045SPawel Laszczak 	switch (pdev->gadget.speed) {
732*3d829045SPawel Laszczak 	case USB_SPEED_SUPER_PLUS:
733*3d829045SPawel Laszczak 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
734*3d829045SPawel Laszczak 		max_packets = MAX_PACKET(512);
735*3d829045SPawel Laszczak 		break;
736*3d829045SPawel Laszczak 	case USB_SPEED_SUPER:
737*3d829045SPawel Laszczak 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
738*3d829045SPawel Laszczak 		max_packets = MAX_PACKET(512);
739*3d829045SPawel Laszczak 		break;
740*3d829045SPawel Laszczak 	case USB_SPEED_HIGH:
741*3d829045SPawel Laszczak 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
742*3d829045SPawel Laszczak 		max_packets = MAX_PACKET(64);
743*3d829045SPawel Laszczak 		break;
744*3d829045SPawel Laszczak 	case USB_SPEED_FULL:
745*3d829045SPawel Laszczak 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
746*3d829045SPawel Laszczak 		max_packets = MAX_PACKET(64);
747*3d829045SPawel Laszczak 		break;
748*3d829045SPawel Laszczak 	default:
749*3d829045SPawel Laszczak 		/* Speed was not set , this shouldn't happen. */
750*3d829045SPawel Laszczak 		return -EINVAL;
751*3d829045SPawel Laszczak 	}
752*3d829045SPawel Laszczak 
753*3d829045SPawel Laszczak 	port = DEV_PORT(pdev->active_port->port_num);
754*3d829045SPawel Laszczak 	slot_ctx->dev_port |= cpu_to_le32(port);
755*3d829045SPawel Laszczak 	slot_ctx->dev_state = (pdev->device_address & DEV_ADDR_MASK);
756*3d829045SPawel Laszczak 	ep0_ctx->tx_info = EP_AVG_TRB_LENGTH(0x8);
757*3d829045SPawel Laszczak 	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
758*3d829045SPawel Laszczak 	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
759*3d829045SPawel Laszczak 					 max_packets);
760*3d829045SPawel Laszczak 
761*3d829045SPawel Laszczak 	ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
762*3d829045SPawel Laszczak 				   pdev->eps[0].ring->cycle_state);
763*3d829045SPawel Laszczak 
764*3d829045SPawel Laszczak 	return 0;
765*3d829045SPawel Laszczak }
766*3d829045SPawel Laszczak 
767*3d829045SPawel Laszczak /*
768*3d829045SPawel Laszczak  * Convert interval expressed as 2^(bInterval - 1) == interval into
769*3d829045SPawel Laszczak  * straight exponent value 2^n == interval.
770*3d829045SPawel Laszczak  */
771*3d829045SPawel Laszczak static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
772*3d829045SPawel Laszczak 						  struct cdnsp_ep *pep)
773*3d829045SPawel Laszczak {
774*3d829045SPawel Laszczak 	unsigned int interval;
775*3d829045SPawel Laszczak 
776*3d829045SPawel Laszczak 	interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
777*3d829045SPawel Laszczak 	if (interval != pep->endpoint.desc->bInterval - 1)
778*3d829045SPawel Laszczak 		dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
779*3d829045SPawel Laszczak 			 pep->name, 1 << interval,
780*3d829045SPawel Laszczak 			 g->speed == USB_SPEED_FULL ? "" : "micro");
781*3d829045SPawel Laszczak 
782*3d829045SPawel Laszczak 	/*
783*3d829045SPawel Laszczak 	 * Full speed isoc endpoints specify interval in frames,
784*3d829045SPawel Laszczak 	 * not microframes. We are using microframes everywhere,
785*3d829045SPawel Laszczak 	 * so adjust accordingly.
786*3d829045SPawel Laszczak 	 */
787*3d829045SPawel Laszczak 	if (g->speed == USB_SPEED_FULL)
788*3d829045SPawel Laszczak 		interval += 3;	/* 1 frame = 2^3 uframes */
789*3d829045SPawel Laszczak 
790*3d829045SPawel Laszczak 	/* Controller handles only up to 512ms (2^12). */
791*3d829045SPawel Laszczak 	if (interval > 12)
792*3d829045SPawel Laszczak 		interval = 12;
793*3d829045SPawel Laszczak 
794*3d829045SPawel Laszczak 	return interval;
795*3d829045SPawel Laszczak }
796*3d829045SPawel Laszczak 
797*3d829045SPawel Laszczak /*
798*3d829045SPawel Laszczak  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
799*3d829045SPawel Laszczak  * microframes, rounded down to nearest power of 2.
800*3d829045SPawel Laszczak  */
801*3d829045SPawel Laszczak static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
802*3d829045SPawel Laszczak 						  struct cdnsp_ep *pep,
803*3d829045SPawel Laszczak 						  unsigned int desc_interval,
804*3d829045SPawel Laszczak 						  unsigned int min_exponent,
805*3d829045SPawel Laszczak 						  unsigned int max_exponent)
806*3d829045SPawel Laszczak {
807*3d829045SPawel Laszczak 	unsigned int interval;
808*3d829045SPawel Laszczak 
809*3d829045SPawel Laszczak 	interval = fls(desc_interval) - 1;
810*3d829045SPawel Laszczak 	return clamp_val(interval, min_exponent, max_exponent);
811*3d829045SPawel Laszczak }
812*3d829045SPawel Laszczak 
813*3d829045SPawel Laszczak /*
814*3d829045SPawel Laszczak  * Return the polling interval.
815*3d829045SPawel Laszczak  *
816*3d829045SPawel Laszczak  * The polling interval is expressed in "microframes". If controllers's Interval
817*3d829045SPawel Laszczak  * field is set to N, it will service the endpoint every 2^(Interval)*125us.
818*3d829045SPawel Laszczak  */
819*3d829045SPawel Laszczak static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
820*3d829045SPawel Laszczak 						struct cdnsp_ep *pep)
821*3d829045SPawel Laszczak {
822*3d829045SPawel Laszczak 	unsigned int interval = 0;
823*3d829045SPawel Laszczak 
824*3d829045SPawel Laszczak 	switch (g->speed) {
825*3d829045SPawel Laszczak 	case USB_SPEED_HIGH:
826*3d829045SPawel Laszczak 	case USB_SPEED_SUPER_PLUS:
827*3d829045SPawel Laszczak 	case USB_SPEED_SUPER:
828*3d829045SPawel Laszczak 		if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
829*3d829045SPawel Laszczak 		    usb_endpoint_xfer_isoc(pep->endpoint.desc))
830*3d829045SPawel Laszczak 			interval = cdnsp_parse_exponent_interval(g, pep);
831*3d829045SPawel Laszczak 		break;
832*3d829045SPawel Laszczak 	case USB_SPEED_FULL:
833*3d829045SPawel Laszczak 		if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
834*3d829045SPawel Laszczak 			interval = cdnsp_parse_exponent_interval(g, pep);
835*3d829045SPawel Laszczak 		} else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
836*3d829045SPawel Laszczak 			interval = pep->endpoint.desc->bInterval << 3;
837*3d829045SPawel Laszczak 			interval = cdnsp_microframes_to_exponent(g, pep,
838*3d829045SPawel Laszczak 								 interval,
839*3d829045SPawel Laszczak 								 3, 10);
840*3d829045SPawel Laszczak 		}
841*3d829045SPawel Laszczak 
842*3d829045SPawel Laszczak 		break;
843*3d829045SPawel Laszczak 	default:
844*3d829045SPawel Laszczak 		WARN_ON(1);
845*3d829045SPawel Laszczak 	}
846*3d829045SPawel Laszczak 
847*3d829045SPawel Laszczak 	return interval;
848*3d829045SPawel Laszczak }
849*3d829045SPawel Laszczak 
850*3d829045SPawel Laszczak /*
851*3d829045SPawel Laszczak  * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
852*3d829045SPawel Laszczak  * High speed endpoint descriptors can define "the number of additional
853*3d829045SPawel Laszczak  * transaction opportunities per microframe", but that goes in the Max Burst
854*3d829045SPawel Laszczak  * endpoint context field.
855*3d829045SPawel Laszczak  */
856*3d829045SPawel Laszczak static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
857*3d829045SPawel Laszczak {
858*3d829045SPawel Laszczak 	if (g->speed < USB_SPEED_SUPER ||
859*3d829045SPawel Laszczak 	    !usb_endpoint_xfer_isoc(pep->endpoint.desc))
860*3d829045SPawel Laszczak 		return 0;
861*3d829045SPawel Laszczak 
862*3d829045SPawel Laszczak 	return pep->endpoint.comp_desc->bmAttributes;
863*3d829045SPawel Laszczak }
864*3d829045SPawel Laszczak 
865*3d829045SPawel Laszczak static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
866*3d829045SPawel Laszczak 					struct cdnsp_ep *pep)
867*3d829045SPawel Laszczak {
868*3d829045SPawel Laszczak 	/* Super speed and Plus have max burst in ep companion desc */
869*3d829045SPawel Laszczak 	if (g->speed >= USB_SPEED_SUPER)
870*3d829045SPawel Laszczak 		return pep->endpoint.comp_desc->bMaxBurst;
871*3d829045SPawel Laszczak 
872*3d829045SPawel Laszczak 	if (g->speed == USB_SPEED_HIGH &&
873*3d829045SPawel Laszczak 	    (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
874*3d829045SPawel Laszczak 	     usb_endpoint_xfer_int(pep->endpoint.desc)))
875*3d829045SPawel Laszczak 		return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
876*3d829045SPawel Laszczak 
877*3d829045SPawel Laszczak 	return 0;
878*3d829045SPawel Laszczak }
879*3d829045SPawel Laszczak 
880*3d829045SPawel Laszczak static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
881*3d829045SPawel Laszczak {
882*3d829045SPawel Laszczak 	int in;
883*3d829045SPawel Laszczak 
884*3d829045SPawel Laszczak 	in = usb_endpoint_dir_in(desc);
885*3d829045SPawel Laszczak 
886*3d829045SPawel Laszczak 	switch (usb_endpoint_type(desc)) {
887*3d829045SPawel Laszczak 	case USB_ENDPOINT_XFER_CONTROL:
888*3d829045SPawel Laszczak 		return CTRL_EP;
889*3d829045SPawel Laszczak 	case USB_ENDPOINT_XFER_BULK:
890*3d829045SPawel Laszczak 		return in ? BULK_IN_EP : BULK_OUT_EP;
891*3d829045SPawel Laszczak 	case USB_ENDPOINT_XFER_ISOC:
892*3d829045SPawel Laszczak 		return in ? ISOC_IN_EP : ISOC_OUT_EP;
893*3d829045SPawel Laszczak 	case USB_ENDPOINT_XFER_INT:
894*3d829045SPawel Laszczak 		return in ? INT_IN_EP : INT_OUT_EP;
895*3d829045SPawel Laszczak 	}
896*3d829045SPawel Laszczak 
897*3d829045SPawel Laszczak 	return 0;
898*3d829045SPawel Laszczak }
899*3d829045SPawel Laszczak 
900*3d829045SPawel Laszczak /*
901*3d829045SPawel Laszczak  * Return the maximum endpoint service interval time (ESIT) payload.
902*3d829045SPawel Laszczak  * Basically, this is the maxpacket size, multiplied by the burst size
903*3d829045SPawel Laszczak  * and mult size.
904*3d829045SPawel Laszczak  */
905*3d829045SPawel Laszczak static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
906*3d829045SPawel Laszczak 				      struct cdnsp_ep *pep)
907*3d829045SPawel Laszczak {
908*3d829045SPawel Laszczak 	int max_packet;
909*3d829045SPawel Laszczak 	int max_burst;
910*3d829045SPawel Laszczak 
911*3d829045SPawel Laszczak 	/* Only applies for interrupt or isochronous endpoints*/
912*3d829045SPawel Laszczak 	if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
913*3d829045SPawel Laszczak 	    usb_endpoint_xfer_bulk(pep->endpoint.desc))
914*3d829045SPawel Laszczak 		return 0;
915*3d829045SPawel Laszczak 
916*3d829045SPawel Laszczak 	/* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
917*3d829045SPawel Laszczak 	if (g->speed >= USB_SPEED_SUPER_PLUS &&
918*3d829045SPawel Laszczak 	    USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
919*3d829045SPawel Laszczak 		return le32_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
920*3d829045SPawel Laszczak 	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
921*3d829045SPawel Laszczak 	else if (g->speed >= USB_SPEED_SUPER)
922*3d829045SPawel Laszczak 		return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
923*3d829045SPawel Laszczak 
924*3d829045SPawel Laszczak 	max_packet = usb_endpoint_maxp(pep->endpoint.desc);
925*3d829045SPawel Laszczak 	max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
926*3d829045SPawel Laszczak 
927*3d829045SPawel Laszczak 	/* A 0 in max burst means 1 transfer per ESIT */
928*3d829045SPawel Laszczak 	return max_packet * max_burst;
929*3d829045SPawel Laszczak }
930*3d829045SPawel Laszczak 
931*3d829045SPawel Laszczak int cdnsp_endpoint_init(struct cdnsp_device *pdev,
932*3d829045SPawel Laszczak 			struct cdnsp_ep *pep,
933*3d829045SPawel Laszczak 			gfp_t mem_flags)
934*3d829045SPawel Laszczak {
935*3d829045SPawel Laszczak 	enum cdnsp_ring_type ring_type;
936*3d829045SPawel Laszczak 	struct cdnsp_ep_ctx *ep_ctx;
937*3d829045SPawel Laszczak 	unsigned int err_count = 0;
938*3d829045SPawel Laszczak 	unsigned int avg_trb_len;
939*3d829045SPawel Laszczak 	unsigned int max_packet;
940*3d829045SPawel Laszczak 	unsigned int max_burst;
941*3d829045SPawel Laszczak 	unsigned int interval;
942*3d829045SPawel Laszczak 	u32 max_esit_payload;
943*3d829045SPawel Laszczak 	unsigned int mult;
944*3d829045SPawel Laszczak 	u32 endpoint_type;
945*3d829045SPawel Laszczak 	int ret;
946*3d829045SPawel Laszczak 
947*3d829045SPawel Laszczak 	ep_ctx = pep->in_ctx;
948*3d829045SPawel Laszczak 
949*3d829045SPawel Laszczak 	endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
950*3d829045SPawel Laszczak 	if (!endpoint_type)
951*3d829045SPawel Laszczak 		return -EINVAL;
952*3d829045SPawel Laszczak 
953*3d829045SPawel Laszczak 	ring_type = usb_endpoint_type(pep->endpoint.desc);
954*3d829045SPawel Laszczak 
955*3d829045SPawel Laszczak 	/*
956*3d829045SPawel Laszczak 	 * Get values to fill the endpoint context, mostly from ep descriptor.
957*3d829045SPawel Laszczak 	 * The average TRB buffer length for bulk endpoints is unclear as we
958*3d829045SPawel Laszczak 	 * have no clue on scatter gather list entry size. For Isoc and Int,
959*3d829045SPawel Laszczak 	 * set it to max available.
960*3d829045SPawel Laszczak 	 */
961*3d829045SPawel Laszczak 	max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
962*3d829045SPawel Laszczak 	interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
963*3d829045SPawel Laszczak 	mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
964*3d829045SPawel Laszczak 	max_packet = usb_endpoint_maxp(pep->endpoint.desc);
965*3d829045SPawel Laszczak 	max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
966*3d829045SPawel Laszczak 	avg_trb_len = max_esit_payload;
967*3d829045SPawel Laszczak 
968*3d829045SPawel Laszczak 	/* Allow 3 retries for everything but isoc, set CErr = 3. */
969*3d829045SPawel Laszczak 	if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
970*3d829045SPawel Laszczak 		err_count = 3;
971*3d829045SPawel Laszczak 	if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
972*3d829045SPawel Laszczak 	    pdev->gadget.speed == USB_SPEED_HIGH)
973*3d829045SPawel Laszczak 		max_packet = 512;
974*3d829045SPawel Laszczak 	/* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
975*3d829045SPawel Laszczak 	if (usb_endpoint_xfer_control(pep->endpoint.desc))
976*3d829045SPawel Laszczak 		avg_trb_len = 8;
977*3d829045SPawel Laszczak 
978*3d829045SPawel Laszczak 	/* Set up the endpoint ring. */
979*3d829045SPawel Laszczak 	pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
980*3d829045SPawel Laszczak 	pep->skip = false;
981*3d829045SPawel Laszczak 
982*3d829045SPawel Laszczak 	/* Fill the endpoint context */
983*3d829045SPawel Laszczak 	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
984*3d829045SPawel Laszczak 				EP_INTERVAL(interval) | EP_MULT(mult));
985*3d829045SPawel Laszczak 	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
986*3d829045SPawel Laszczak 				MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
987*3d829045SPawel Laszczak 				ERROR_COUNT(err_count));
988*3d829045SPawel Laszczak 	ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
989*3d829045SPawel Laszczak 				  pep->ring->cycle_state);
990*3d829045SPawel Laszczak 
991*3d829045SPawel Laszczak 	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
992*3d829045SPawel Laszczak 				EP_AVG_TRB_LENGTH(avg_trb_len));
993*3d829045SPawel Laszczak 
994*3d829045SPawel Laszczak 	if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
995*3d829045SPawel Laszczak 	    pdev->gadget.speed > USB_SPEED_HIGH) {
996*3d829045SPawel Laszczak 		ret = cdnsp_alloc_streams(pdev, pep);
997*3d829045SPawel Laszczak 		if (ret < 0)
998*3d829045SPawel Laszczak 			return ret;
999*3d829045SPawel Laszczak 	}
1000*3d829045SPawel Laszczak 
1001*3d829045SPawel Laszczak 	return 0;
1002*3d829045SPawel Laszczak }
1003*3d829045SPawel Laszczak 
1004*3d829045SPawel Laszczak void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
1005*3d829045SPawel Laszczak {
1006*3d829045SPawel Laszczak 	pep->in_ctx->ep_info = 0;
1007*3d829045SPawel Laszczak 	pep->in_ctx->ep_info2 = 0;
1008*3d829045SPawel Laszczak 	pep->in_ctx->deq = 0;
1009*3d829045SPawel Laszczak 	pep->in_ctx->tx_info = 0;
1010*3d829045SPawel Laszczak }
1011*3d829045SPawel Laszczak 
1012*3d829045SPawel Laszczak static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
1013*3d829045SPawel Laszczak 			    struct cdnsp_ring *evt_ring,
1014*3d829045SPawel Laszczak 			    struct cdnsp_erst *erst,
1015*3d829045SPawel Laszczak 			    gfp_t flags)
1016*3d829045SPawel Laszczak {
1017*3d829045SPawel Laszczak 	struct cdnsp_erst_entry *entry;
1018*3d829045SPawel Laszczak 	struct cdnsp_segment *seg;
1019*3d829045SPawel Laszczak 	unsigned int val;
1020*3d829045SPawel Laszczak 	size_t size;
1021*3d829045SPawel Laszczak 
1022*3d829045SPawel Laszczak 	size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
1023*3d829045SPawel Laszczak 	erst->entries = dma_alloc_coherent(pdev->dev, size,
1024*3d829045SPawel Laszczak 					   &erst->erst_dma_addr, flags);
1025*3d829045SPawel Laszczak 	if (!erst->entries)
1026*3d829045SPawel Laszczak 		return -ENOMEM;
1027*3d829045SPawel Laszczak 
1028*3d829045SPawel Laszczak 	erst->num_entries = evt_ring->num_segs;
1029*3d829045SPawel Laszczak 
1030*3d829045SPawel Laszczak 	seg = evt_ring->first_seg;
1031*3d829045SPawel Laszczak 	for (val = 0; val < evt_ring->num_segs; val++) {
1032*3d829045SPawel Laszczak 		entry = &erst->entries[val];
1033*3d829045SPawel Laszczak 		entry->seg_addr = cpu_to_le64(seg->dma);
1034*3d829045SPawel Laszczak 		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1035*3d829045SPawel Laszczak 		entry->rsvd = 0;
1036*3d829045SPawel Laszczak 		seg = seg->next;
1037*3d829045SPawel Laszczak 	}
1038*3d829045SPawel Laszczak 
1039*3d829045SPawel Laszczak 	return 0;
1040*3d829045SPawel Laszczak }
1041*3d829045SPawel Laszczak 
1042*3d829045SPawel Laszczak static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
1043*3d829045SPawel Laszczak {
1044*3d829045SPawel Laszczak 	size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
1045*3d829045SPawel Laszczak 	struct device *dev = pdev->dev;
1046*3d829045SPawel Laszczak 
1047*3d829045SPawel Laszczak 	if (erst->entries)
1048*3d829045SPawel Laszczak 		dma_free_coherent(dev, size, erst->entries,
1049*3d829045SPawel Laszczak 				  erst->erst_dma_addr);
1050*3d829045SPawel Laszczak 
1051*3d829045SPawel Laszczak 	erst->entries = NULL;
1052*3d829045SPawel Laszczak }
1053*3d829045SPawel Laszczak 
1054*3d829045SPawel Laszczak void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
1055*3d829045SPawel Laszczak {
1056*3d829045SPawel Laszczak 	struct device *dev = pdev->dev;
1057*3d829045SPawel Laszczak 
1058*3d829045SPawel Laszczak 	cdnsp_free_priv_device(pdev);
1059*3d829045SPawel Laszczak 	cdnsp_free_erst(pdev, &pdev->erst);
1060*3d829045SPawel Laszczak 
1061*3d829045SPawel Laszczak 	if (pdev->event_ring)
1062*3d829045SPawel Laszczak 		cdnsp_ring_free(pdev, pdev->event_ring);
1063*3d829045SPawel Laszczak 
1064*3d829045SPawel Laszczak 	pdev->event_ring = NULL;
1065*3d829045SPawel Laszczak 
1066*3d829045SPawel Laszczak 	if (pdev->cmd_ring)
1067*3d829045SPawel Laszczak 		cdnsp_ring_free(pdev, pdev->cmd_ring);
1068*3d829045SPawel Laszczak 
1069*3d829045SPawel Laszczak 	pdev->cmd_ring = NULL;
1070*3d829045SPawel Laszczak 
1071*3d829045SPawel Laszczak 	dma_pool_destroy(pdev->segment_pool);
1072*3d829045SPawel Laszczak 	pdev->segment_pool = NULL;
1073*3d829045SPawel Laszczak 	dma_pool_destroy(pdev->device_pool);
1074*3d829045SPawel Laszczak 	pdev->device_pool = NULL;
1075*3d829045SPawel Laszczak 
1076*3d829045SPawel Laszczak 	if (pdev->dcbaa)
1077*3d829045SPawel Laszczak 		dma_free_coherent(dev, sizeof(*pdev->dcbaa),
1078*3d829045SPawel Laszczak 				  pdev->dcbaa, pdev->dcbaa->dma);
1079*3d829045SPawel Laszczak 
1080*3d829045SPawel Laszczak 	pdev->dcbaa = NULL;
1081*3d829045SPawel Laszczak 
1082*3d829045SPawel Laszczak 	pdev->usb2_port.exist = 0;
1083*3d829045SPawel Laszczak 	pdev->usb3_port.exist = 0;
1084*3d829045SPawel Laszczak 	pdev->usb2_port.port_num = 0;
1085*3d829045SPawel Laszczak 	pdev->usb3_port.port_num = 0;
1086*3d829045SPawel Laszczak 	pdev->active_port = NULL;
1087*3d829045SPawel Laszczak }
1088*3d829045SPawel Laszczak 
1089*3d829045SPawel Laszczak static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
1090*3d829045SPawel Laszczak {
1091*3d829045SPawel Laszczak 	dma_addr_t deq;
1092*3d829045SPawel Laszczak 	u64 temp;
1093*3d829045SPawel Laszczak 
1094*3d829045SPawel Laszczak 	deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1095*3d829045SPawel Laszczak 				    pdev->event_ring->dequeue);
1096*3d829045SPawel Laszczak 
1097*3d829045SPawel Laszczak 	/* Update controller event ring dequeue pointer */
1098*3d829045SPawel Laszczak 	temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
1099*3d829045SPawel Laszczak 	temp &= ERST_PTR_MASK;
1100*3d829045SPawel Laszczak 
1101*3d829045SPawel Laszczak 	/*
1102*3d829045SPawel Laszczak 	 * Don't clear the EHB bit (which is RW1C) because
1103*3d829045SPawel Laszczak 	 * there might be more events to service.
1104*3d829045SPawel Laszczak 	 */
1105*3d829045SPawel Laszczak 	temp &= ~ERST_EHB;
1106*3d829045SPawel Laszczak 
1107*3d829045SPawel Laszczak 	cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
1108*3d829045SPawel Laszczak 		       &pdev->ir_set->erst_dequeue);
1109*3d829045SPawel Laszczak }
1110*3d829045SPawel Laszczak 
1111*3d829045SPawel Laszczak static void cdnsp_add_in_port(struct cdnsp_device *pdev,
1112*3d829045SPawel Laszczak 			      struct cdnsp_port *port,
1113*3d829045SPawel Laszczak 			      __le32 __iomem *addr)
1114*3d829045SPawel Laszczak {
1115*3d829045SPawel Laszczak 	u32 temp, port_offset;
1116*3d829045SPawel Laszczak 
1117*3d829045SPawel Laszczak 	temp = readl(addr);
1118*3d829045SPawel Laszczak 	port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
1119*3d829045SPawel Laszczak 	port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
1120*3d829045SPawel Laszczak 
1121*3d829045SPawel Laszczak 	/* Port offset and count in the third dword.*/
1122*3d829045SPawel Laszczak 	temp = readl(addr + 2);
1123*3d829045SPawel Laszczak 	port_offset = CDNSP_EXT_PORT_OFF(temp);
1124*3d829045SPawel Laszczak 
1125*3d829045SPawel Laszczak 	port->port_num = port_offset;
1126*3d829045SPawel Laszczak 	port->exist = 1;
1127*3d829045SPawel Laszczak }
1128*3d829045SPawel Laszczak 
1129*3d829045SPawel Laszczak /*
1130*3d829045SPawel Laszczak  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
1131*3d829045SPawel Laszczak  * specify what speeds each port is supposed to be.
1132*3d829045SPawel Laszczak  */
1133*3d829045SPawel Laszczak static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev, gfp_t flags)
1134*3d829045SPawel Laszczak {
1135*3d829045SPawel Laszczak 	void __iomem *base;
1136*3d829045SPawel Laszczak 	u32 offset;
1137*3d829045SPawel Laszczak 	int i;
1138*3d829045SPawel Laszczak 
1139*3d829045SPawel Laszczak 	base = &pdev->cap_regs->hc_capbase;
1140*3d829045SPawel Laszczak 	offset = cdnsp_find_next_ext_cap(base, 0,
1141*3d829045SPawel Laszczak 					 EXT_CAP_CFG_DEV_20PORT_CAP_ID);
1142*3d829045SPawel Laszczak 	pdev->port20_regs = base + offset;
1143*3d829045SPawel Laszczak 
1144*3d829045SPawel Laszczak 	offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
1145*3d829045SPawel Laszczak 	pdev->port3x_regs =  base + offset;
1146*3d829045SPawel Laszczak 
1147*3d829045SPawel Laszczak 	offset = 0;
1148*3d829045SPawel Laszczak 	base = &pdev->cap_regs->hc_capbase;
1149*3d829045SPawel Laszczak 
1150*3d829045SPawel Laszczak 	/* Driver expects max 2 extended protocol capability. */
1151*3d829045SPawel Laszczak 	for (i = 0; i < 2; i++) {
1152*3d829045SPawel Laszczak 		u32 temp;
1153*3d829045SPawel Laszczak 
1154*3d829045SPawel Laszczak 		offset = cdnsp_find_next_ext_cap(base, offset,
1155*3d829045SPawel Laszczak 						 EXT_CAPS_PROTOCOL);
1156*3d829045SPawel Laszczak 		temp = readl(base + offset);
1157*3d829045SPawel Laszczak 
1158*3d829045SPawel Laszczak 		if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
1159*3d829045SPawel Laszczak 		    !pdev->usb3_port.port_num)
1160*3d829045SPawel Laszczak 			cdnsp_add_in_port(pdev, &pdev->usb3_port,
1161*3d829045SPawel Laszczak 					  base + offset);
1162*3d829045SPawel Laszczak 
1163*3d829045SPawel Laszczak 		if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
1164*3d829045SPawel Laszczak 		    !pdev->usb2_port.port_num)
1165*3d829045SPawel Laszczak 			cdnsp_add_in_port(pdev, &pdev->usb2_port,
1166*3d829045SPawel Laszczak 					  base + offset);
1167*3d829045SPawel Laszczak 	}
1168*3d829045SPawel Laszczak 
1169*3d829045SPawel Laszczak 	if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
1170*3d829045SPawel Laszczak 		dev_err(pdev->dev, "Error: Only one port detected\n");
1171*3d829045SPawel Laszczak 		return -ENODEV;
1172*3d829045SPawel Laszczak 	}
1173*3d829045SPawel Laszczak 
1174*3d829045SPawel Laszczak 	pdev->usb2_port.regs = (struct cdnsp_port_regs *)
1175*3d829045SPawel Laszczak 			       (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1176*3d829045SPawel Laszczak 				(pdev->usb2_port.port_num - 1));
1177*3d829045SPawel Laszczak 
1178*3d829045SPawel Laszczak 	pdev->usb3_port.regs = (struct cdnsp_port_regs *)
1179*3d829045SPawel Laszczak 			       (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1180*3d829045SPawel Laszczak 				(pdev->usb3_port.port_num - 1));
1181*3d829045SPawel Laszczak 
1182*3d829045SPawel Laszczak 	return 0;
1183*3d829045SPawel Laszczak }
1184*3d829045SPawel Laszczak 
1185*3d829045SPawel Laszczak /*
1186*3d829045SPawel Laszczak  * Initialize memory for CDNSP (one-time init).
1187*3d829045SPawel Laszczak  *
1188*3d829045SPawel Laszczak  * Program the PAGESIZE register, initialize the device context array, create
1189*3d829045SPawel Laszczak  * device contexts, set up a command ring segment, create event
1190*3d829045SPawel Laszczak  * ring (one for now).
1191*3d829045SPawel Laszczak  */
1192*3d829045SPawel Laszczak int cdnsp_mem_init(struct cdnsp_device *pdev, gfp_t flags)
1193*3d829045SPawel Laszczak {
1194*3d829045SPawel Laszczak 	struct device *dev = pdev->dev;
1195*3d829045SPawel Laszczak 	int ret = -ENOMEM;
1196*3d829045SPawel Laszczak 	unsigned int val;
1197*3d829045SPawel Laszczak 	dma_addr_t dma;
1198*3d829045SPawel Laszczak 	u32 page_size;
1199*3d829045SPawel Laszczak 	u64 val_64;
1200*3d829045SPawel Laszczak 
1201*3d829045SPawel Laszczak 	/*
1202*3d829045SPawel Laszczak 	 * Use 4K pages, since that's common and the minimum the
1203*3d829045SPawel Laszczak 	 * controller supports
1204*3d829045SPawel Laszczak 	 */
1205*3d829045SPawel Laszczak 	page_size = 1 << 12;
1206*3d829045SPawel Laszczak 
1207*3d829045SPawel Laszczak 	val = readl(&pdev->op_regs->config_reg);
1208*3d829045SPawel Laszczak 	val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
1209*3d829045SPawel Laszczak 	writel(val, &pdev->op_regs->config_reg);
1210*3d829045SPawel Laszczak 
1211*3d829045SPawel Laszczak 	/*
1212*3d829045SPawel Laszczak 	 * Doorbell array must be physically contiguous
1213*3d829045SPawel Laszczak 	 * and 64-byte (cache line) aligned.
1214*3d829045SPawel Laszczak 	 */
1215*3d829045SPawel Laszczak 	pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
1216*3d829045SPawel Laszczak 					 &dma, GFP_KERNEL);
1217*3d829045SPawel Laszczak 	if (!pdev->dcbaa)
1218*3d829045SPawel Laszczak 		goto mem_init_fail;
1219*3d829045SPawel Laszczak 
1220*3d829045SPawel Laszczak 	memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa));
1221*3d829045SPawel Laszczak 	pdev->dcbaa->dma = dma;
1222*3d829045SPawel Laszczak 
1223*3d829045SPawel Laszczak 	cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
1224*3d829045SPawel Laszczak 
1225*3d829045SPawel Laszczak 	/*
1226*3d829045SPawel Laszczak 	 * Initialize the ring segment pool.  The ring must be a contiguous
1227*3d829045SPawel Laszczak 	 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1228*3d829045SPawel Laszczak 	 * however, the command ring segment needs 64-byte aligned segments
1229*3d829045SPawel Laszczak 	 * and our use of dma addresses in the trb_address_map radix tree needs
1230*3d829045SPawel Laszczak 	 * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
1231*3d829045SPawel Laszczak 	 * need.
1232*3d829045SPawel Laszczak 	 */
1233*3d829045SPawel Laszczak 	pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
1234*3d829045SPawel Laszczak 					     TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
1235*3d829045SPawel Laszczak 					     page_size);
1236*3d829045SPawel Laszczak 
1237*3d829045SPawel Laszczak 	pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
1238*3d829045SPawel Laszczak 					    CDNSP_CTX_SIZE, 64, page_size);
1239*3d829045SPawel Laszczak 
1240*3d829045SPawel Laszczak 	if (!pdev->segment_pool || !pdev->device_pool)
1241*3d829045SPawel Laszczak 		goto mem_init_fail;
1242*3d829045SPawel Laszczak 
1243*3d829045SPawel Laszczak 	/* Set up the command ring to have one segments for now. */
1244*3d829045SPawel Laszczak 	pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, flags);
1245*3d829045SPawel Laszczak 	if (!pdev->cmd_ring)
1246*3d829045SPawel Laszczak 		goto mem_init_fail;
1247*3d829045SPawel Laszczak 
1248*3d829045SPawel Laszczak 	/* Set the address in the Command Ring Control register */
1249*3d829045SPawel Laszczak 	val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
1250*3d829045SPawel Laszczak 	val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
1251*3d829045SPawel Laszczak 		 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
1252*3d829045SPawel Laszczak 		 pdev->cmd_ring->cycle_state;
1253*3d829045SPawel Laszczak 	cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
1254*3d829045SPawel Laszczak 
1255*3d829045SPawel Laszczak 	val = readl(&pdev->cap_regs->db_off);
1256*3d829045SPawel Laszczak 	val &= DBOFF_MASK;
1257*3d829045SPawel Laszczak 	pdev->dba = (void __iomem *)pdev->cap_regs + val;
1258*3d829045SPawel Laszczak 
1259*3d829045SPawel Laszczak 	/* Set ir_set to interrupt register set 0 */
1260*3d829045SPawel Laszczak 	pdev->ir_set = &pdev->run_regs->ir_set[0];
1261*3d829045SPawel Laszczak 
1262*3d829045SPawel Laszczak 	/*
1263*3d829045SPawel Laszczak 	 * Event ring setup: Allocate a normal ring, but also setup
1264*3d829045SPawel Laszczak 	 * the event ring segment table (ERST).
1265*3d829045SPawel Laszczak 	 */
1266*3d829045SPawel Laszczak 	pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
1267*3d829045SPawel Laszczak 					    0, flags);
1268*3d829045SPawel Laszczak 	if (!pdev->event_ring)
1269*3d829045SPawel Laszczak 		goto mem_init_fail;
1270*3d829045SPawel Laszczak 
1271*3d829045SPawel Laszczak 	ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst, flags);
1272*3d829045SPawel Laszczak 	if (ret)
1273*3d829045SPawel Laszczak 		goto mem_init_fail;
1274*3d829045SPawel Laszczak 
1275*3d829045SPawel Laszczak 	/* Set ERST count with the number of entries in the segment table. */
1276*3d829045SPawel Laszczak 	val = readl(&pdev->ir_set->erst_size);
1277*3d829045SPawel Laszczak 	val &= ERST_SIZE_MASK;
1278*3d829045SPawel Laszczak 	val |= ERST_NUM_SEGS;
1279*3d829045SPawel Laszczak 	writel(val, &pdev->ir_set->erst_size);
1280*3d829045SPawel Laszczak 
1281*3d829045SPawel Laszczak 	/* Set the segment table base address. */
1282*3d829045SPawel Laszczak 	val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
1283*3d829045SPawel Laszczak 	val_64 &= ERST_PTR_MASK;
1284*3d829045SPawel Laszczak 	val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
1285*3d829045SPawel Laszczak 	cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
1286*3d829045SPawel Laszczak 
1287*3d829045SPawel Laszczak 	/* Set the event ring dequeue address. */
1288*3d829045SPawel Laszczak 	cdnsp_set_event_deq(pdev);
1289*3d829045SPawel Laszczak 
1290*3d829045SPawel Laszczak 	ret = cdnsp_setup_port_arrays(pdev, flags);
1291*3d829045SPawel Laszczak 	if (ret)
1292*3d829045SPawel Laszczak 		goto mem_init_fail;
1293*3d829045SPawel Laszczak 
1294*3d829045SPawel Laszczak 	ret = cdnsp_alloc_priv_device(pdev, GFP_ATOMIC);
1295*3d829045SPawel Laszczak 	if (ret) {
1296*3d829045SPawel Laszczak 		dev_err(pdev->dev,
1297*3d829045SPawel Laszczak 			"Could not allocate cdnsp_device data structures\n");
1298*3d829045SPawel Laszczak 		goto mem_init_fail;
1299*3d829045SPawel Laszczak 	}
1300*3d829045SPawel Laszczak 
1301*3d829045SPawel Laszczak 	return 0;
1302*3d829045SPawel Laszczak 
1303*3d829045SPawel Laszczak mem_init_fail:
1304*3d829045SPawel Laszczak 	dev_err(pdev->dev, "Couldn't initialize memory\n");
1305*3d829045SPawel Laszczak 	cdnsp_halt(pdev);
1306*3d829045SPawel Laszczak 	cdnsp_reset(pdev);
1307*3d829045SPawel Laszczak 	cdnsp_mem_cleanup(pdev);
1308*3d829045SPawel Laszczak 
1309*3d829045SPawel Laszczak 	return ret;
1310*3d829045SPawel Laszczak }
1311