xref: /openbmc/linux/drivers/net/wireless/ath/ath11k/ce.c (revision d5c65159f2895379e11ca13f62feabe93278985d)
1*d5c65159SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear
2*d5c65159SKalle Valo /*
3*d5c65159SKalle Valo  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4*d5c65159SKalle Valo  */
5*d5c65159SKalle Valo 
6*d5c65159SKalle Valo #include "dp_rx.h"
7*d5c65159SKalle Valo #include "debug.h"
8*d5c65159SKalle Valo 
9*d5c65159SKalle Valo static const struct ce_attr host_ce_config_wlan[] = {
10*d5c65159SKalle Valo 	/* CE0: host->target HTC control and raw streams */
11*d5c65159SKalle Valo 	{
12*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
13*d5c65159SKalle Valo 		.src_nentries = 16,
14*d5c65159SKalle Valo 		.src_sz_max = 2048,
15*d5c65159SKalle Valo 		.dest_nentries = 0,
16*d5c65159SKalle Valo 	},
17*d5c65159SKalle Valo 
18*d5c65159SKalle Valo 	/* CE1: target->host HTT + HTC control */
19*d5c65159SKalle Valo 	{
20*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
21*d5c65159SKalle Valo 		.src_nentries = 0,
22*d5c65159SKalle Valo 		.src_sz_max = 2048,
23*d5c65159SKalle Valo 		.dest_nentries = 512,
24*d5c65159SKalle Valo 		.recv_cb = ath11k_htc_rx_completion_handler,
25*d5c65159SKalle Valo 	},
26*d5c65159SKalle Valo 
27*d5c65159SKalle Valo 	/* CE2: target->host WMI */
28*d5c65159SKalle Valo 	{
29*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
30*d5c65159SKalle Valo 		.src_nentries = 0,
31*d5c65159SKalle Valo 		.src_sz_max = 2048,
32*d5c65159SKalle Valo 		.dest_nentries = 512,
33*d5c65159SKalle Valo 		.recv_cb = ath11k_htc_rx_completion_handler,
34*d5c65159SKalle Valo 	},
35*d5c65159SKalle Valo 
36*d5c65159SKalle Valo 	/* CE3: host->target WMI (mac0) */
37*d5c65159SKalle Valo 	{
38*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
39*d5c65159SKalle Valo 		.src_nentries = 32,
40*d5c65159SKalle Valo 		.src_sz_max = 2048,
41*d5c65159SKalle Valo 		.dest_nentries = 0,
42*d5c65159SKalle Valo 	},
43*d5c65159SKalle Valo 
44*d5c65159SKalle Valo 	/* CE4: host->target HTT */
45*d5c65159SKalle Valo 	{
46*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
47*d5c65159SKalle Valo 		.src_nentries = 2048,
48*d5c65159SKalle Valo 		.src_sz_max = 256,
49*d5c65159SKalle Valo 		.dest_nentries = 0,
50*d5c65159SKalle Valo 	},
51*d5c65159SKalle Valo 
52*d5c65159SKalle Valo 	/* CE5: target->host pktlog */
53*d5c65159SKalle Valo 	{
54*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
55*d5c65159SKalle Valo 		.src_nentries = 0,
56*d5c65159SKalle Valo 		.src_sz_max = 2048,
57*d5c65159SKalle Valo 		.dest_nentries = 512,
58*d5c65159SKalle Valo 		.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
59*d5c65159SKalle Valo 	},
60*d5c65159SKalle Valo 
61*d5c65159SKalle Valo 	/* CE6: target autonomous hif_memcpy */
62*d5c65159SKalle Valo 	{
63*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
64*d5c65159SKalle Valo 		.src_nentries = 0,
65*d5c65159SKalle Valo 		.src_sz_max = 0,
66*d5c65159SKalle Valo 		.dest_nentries = 0,
67*d5c65159SKalle Valo 	},
68*d5c65159SKalle Valo 
69*d5c65159SKalle Valo 	/* CE7: host->target WMI (mac1) */
70*d5c65159SKalle Valo 	{
71*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
72*d5c65159SKalle Valo 		.src_nentries = 32,
73*d5c65159SKalle Valo 		.src_sz_max = 2048,
74*d5c65159SKalle Valo 		.dest_nentries = 0,
75*d5c65159SKalle Valo 	},
76*d5c65159SKalle Valo 
77*d5c65159SKalle Valo 	/* CE8: target autonomous hif_memcpy */
78*d5c65159SKalle Valo 	{
79*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
80*d5c65159SKalle Valo 		.src_nentries = 0,
81*d5c65159SKalle Valo 		.src_sz_max = 0,
82*d5c65159SKalle Valo 		.dest_nentries = 0,
83*d5c65159SKalle Valo 	},
84*d5c65159SKalle Valo 
85*d5c65159SKalle Valo 	/* CE9: host->target WMI (mac2) */
86*d5c65159SKalle Valo 	{
87*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
88*d5c65159SKalle Valo 		.src_nentries = 32,
89*d5c65159SKalle Valo 		.src_sz_max = 2048,
90*d5c65159SKalle Valo 		.dest_nentries = 0,
91*d5c65159SKalle Valo 	},
92*d5c65159SKalle Valo 
93*d5c65159SKalle Valo 	/* CE10: target->host HTT */
94*d5c65159SKalle Valo 	{
95*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
96*d5c65159SKalle Valo 		.src_nentries = 0,
97*d5c65159SKalle Valo 		.src_sz_max = 2048,
98*d5c65159SKalle Valo 		.dest_nentries = 512,
99*d5c65159SKalle Valo 		.recv_cb = ath11k_htc_rx_completion_handler,
100*d5c65159SKalle Valo 	},
101*d5c65159SKalle Valo 
102*d5c65159SKalle Valo 	/* CE11: Not used */
103*d5c65159SKalle Valo 	{
104*d5c65159SKalle Valo 		.flags = CE_ATTR_FLAGS,
105*d5c65159SKalle Valo 		.src_nentries = 0,
106*d5c65159SKalle Valo 		.src_sz_max = 0,
107*d5c65159SKalle Valo 		.dest_nentries = 0,
108*d5c65159SKalle Valo 	},
109*d5c65159SKalle Valo };
110*d5c65159SKalle Valo 
111*d5c65159SKalle Valo static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
112*d5c65159SKalle Valo 					 struct sk_buff *skb, dma_addr_t paddr)
113*d5c65159SKalle Valo {
114*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
115*d5c65159SKalle Valo 	struct ath11k_ce_ring *ring = pipe->dest_ring;
116*d5c65159SKalle Valo 	struct hal_srng *srng;
117*d5c65159SKalle Valo 	unsigned int write_index;
118*d5c65159SKalle Valo 	unsigned int nentries_mask = ring->nentries_mask;
119*d5c65159SKalle Valo 	u32 *desc;
120*d5c65159SKalle Valo 	int ret;
121*d5c65159SKalle Valo 
122*d5c65159SKalle Valo 	lockdep_assert_held(&ab->ce.ce_lock);
123*d5c65159SKalle Valo 
124*d5c65159SKalle Valo 	write_index = ring->write_index;
125*d5c65159SKalle Valo 
126*d5c65159SKalle Valo 	srng = &ab->hal.srng_list[ring->hal_ring_id];
127*d5c65159SKalle Valo 
128*d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
129*d5c65159SKalle Valo 
130*d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
131*d5c65159SKalle Valo 
132*d5c65159SKalle Valo 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
133*d5c65159SKalle Valo 		ret = -ENOSPC;
134*d5c65159SKalle Valo 		goto exit;
135*d5c65159SKalle Valo 	}
136*d5c65159SKalle Valo 
137*d5c65159SKalle Valo 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
138*d5c65159SKalle Valo 	if (!desc) {
139*d5c65159SKalle Valo 		ret = -ENOSPC;
140*d5c65159SKalle Valo 		goto exit;
141*d5c65159SKalle Valo 	}
142*d5c65159SKalle Valo 
143*d5c65159SKalle Valo 	ath11k_hal_ce_dst_set_desc(desc, paddr);
144*d5c65159SKalle Valo 
145*d5c65159SKalle Valo 	ring->skb[write_index] = skb;
146*d5c65159SKalle Valo 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
147*d5c65159SKalle Valo 	ring->write_index = write_index;
148*d5c65159SKalle Valo 
149*d5c65159SKalle Valo 	pipe->rx_buf_needed--;
150*d5c65159SKalle Valo 
151*d5c65159SKalle Valo 	ret = 0;
152*d5c65159SKalle Valo exit:
153*d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
154*d5c65159SKalle Valo 
155*d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
156*d5c65159SKalle Valo 
157*d5c65159SKalle Valo 	return ret;
158*d5c65159SKalle Valo }
159*d5c65159SKalle Valo 
160*d5c65159SKalle Valo static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
161*d5c65159SKalle Valo {
162*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
163*d5c65159SKalle Valo 	struct sk_buff *skb;
164*d5c65159SKalle Valo 	dma_addr_t paddr;
165*d5c65159SKalle Valo 	int ret = 0;
166*d5c65159SKalle Valo 
167*d5c65159SKalle Valo 	if (!(pipe->dest_ring || pipe->status_ring))
168*d5c65159SKalle Valo 		return 0;
169*d5c65159SKalle Valo 
170*d5c65159SKalle Valo 	spin_lock_bh(&ab->ce.ce_lock);
171*d5c65159SKalle Valo 	while (pipe->rx_buf_needed) {
172*d5c65159SKalle Valo 		skb = dev_alloc_skb(pipe->buf_sz);
173*d5c65159SKalle Valo 		if (!skb) {
174*d5c65159SKalle Valo 			ret = -ENOMEM;
175*d5c65159SKalle Valo 			goto exit;
176*d5c65159SKalle Valo 		}
177*d5c65159SKalle Valo 
178*d5c65159SKalle Valo 		WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
179*d5c65159SKalle Valo 
180*d5c65159SKalle Valo 		paddr = dma_map_single(ab->dev, skb->data,
181*d5c65159SKalle Valo 				       skb->len + skb_tailroom(skb),
182*d5c65159SKalle Valo 				       DMA_FROM_DEVICE);
183*d5c65159SKalle Valo 		if (unlikely(dma_mapping_error(ab->dev, paddr))) {
184*d5c65159SKalle Valo 			ath11k_warn(ab, "failed to dma map ce rx buf\n");
185*d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
186*d5c65159SKalle Valo 			ret = -EIO;
187*d5c65159SKalle Valo 			goto exit;
188*d5c65159SKalle Valo 		}
189*d5c65159SKalle Valo 
190*d5c65159SKalle Valo 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
191*d5c65159SKalle Valo 
192*d5c65159SKalle Valo 		ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
193*d5c65159SKalle Valo 
194*d5c65159SKalle Valo 		if (ret) {
195*d5c65159SKalle Valo 			ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
196*d5c65159SKalle Valo 			dma_unmap_single(ab->dev, paddr,
197*d5c65159SKalle Valo 					 skb->len + skb_tailroom(skb),
198*d5c65159SKalle Valo 					 DMA_FROM_DEVICE);
199*d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
200*d5c65159SKalle Valo 			goto exit;
201*d5c65159SKalle Valo 		}
202*d5c65159SKalle Valo 	}
203*d5c65159SKalle Valo 
204*d5c65159SKalle Valo exit:
205*d5c65159SKalle Valo 	spin_unlock_bh(&ab->ce.ce_lock);
206*d5c65159SKalle Valo 	return ret;
207*d5c65159SKalle Valo }
208*d5c65159SKalle Valo 
209*d5c65159SKalle Valo static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
210*d5c65159SKalle Valo 					 struct sk_buff **skb, int *nbytes)
211*d5c65159SKalle Valo {
212*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
213*d5c65159SKalle Valo 	struct hal_srng *srng;
214*d5c65159SKalle Valo 	unsigned int sw_index;
215*d5c65159SKalle Valo 	unsigned int nentries_mask;
216*d5c65159SKalle Valo 	u32 *desc;
217*d5c65159SKalle Valo 	int ret = 0;
218*d5c65159SKalle Valo 
219*d5c65159SKalle Valo 	spin_lock_bh(&ab->ce.ce_lock);
220*d5c65159SKalle Valo 
221*d5c65159SKalle Valo 	sw_index = pipe->dest_ring->sw_index;
222*d5c65159SKalle Valo 	nentries_mask = pipe->dest_ring->nentries_mask;
223*d5c65159SKalle Valo 
224*d5c65159SKalle Valo 	srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
225*d5c65159SKalle Valo 
226*d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
227*d5c65159SKalle Valo 
228*d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
229*d5c65159SKalle Valo 
230*d5c65159SKalle Valo 	desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
231*d5c65159SKalle Valo 	if (!desc) {
232*d5c65159SKalle Valo 		ret = -EIO;
233*d5c65159SKalle Valo 		goto err;
234*d5c65159SKalle Valo 	}
235*d5c65159SKalle Valo 
236*d5c65159SKalle Valo 	*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
237*d5c65159SKalle Valo 	if (*nbytes == 0) {
238*d5c65159SKalle Valo 		ret = -EIO;
239*d5c65159SKalle Valo 		goto err;
240*d5c65159SKalle Valo 	}
241*d5c65159SKalle Valo 
242*d5c65159SKalle Valo 	*skb = pipe->dest_ring->skb[sw_index];
243*d5c65159SKalle Valo 	pipe->dest_ring->skb[sw_index] = NULL;
244*d5c65159SKalle Valo 
245*d5c65159SKalle Valo 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
246*d5c65159SKalle Valo 	pipe->dest_ring->sw_index = sw_index;
247*d5c65159SKalle Valo 
248*d5c65159SKalle Valo 	pipe->rx_buf_needed++;
249*d5c65159SKalle Valo err:
250*d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
251*d5c65159SKalle Valo 
252*d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
253*d5c65159SKalle Valo 
254*d5c65159SKalle Valo 	spin_unlock_bh(&ab->ce.ce_lock);
255*d5c65159SKalle Valo 
256*d5c65159SKalle Valo 	return ret;
257*d5c65159SKalle Valo }
258*d5c65159SKalle Valo 
259*d5c65159SKalle Valo static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
260*d5c65159SKalle Valo {
261*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
262*d5c65159SKalle Valo 	struct sk_buff *skb;
263*d5c65159SKalle Valo 	struct sk_buff_head list;
264*d5c65159SKalle Valo 	unsigned int nbytes, max_nbytes;
265*d5c65159SKalle Valo 	int ret;
266*d5c65159SKalle Valo 
267*d5c65159SKalle Valo 	__skb_queue_head_init(&list);
268*d5c65159SKalle Valo 	while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
269*d5c65159SKalle Valo 		max_nbytes = skb->len + skb_tailroom(skb);
270*d5c65159SKalle Valo 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
271*d5c65159SKalle Valo 				 max_nbytes, DMA_FROM_DEVICE);
272*d5c65159SKalle Valo 
273*d5c65159SKalle Valo 		if (unlikely(max_nbytes < nbytes)) {
274*d5c65159SKalle Valo 			ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
275*d5c65159SKalle Valo 				    nbytes, max_nbytes);
276*d5c65159SKalle Valo 			dev_kfree_skb_any(skb);
277*d5c65159SKalle Valo 			continue;
278*d5c65159SKalle Valo 		}
279*d5c65159SKalle Valo 
280*d5c65159SKalle Valo 		skb_put(skb, nbytes);
281*d5c65159SKalle Valo 		__skb_queue_tail(&list, skb);
282*d5c65159SKalle Valo 	}
283*d5c65159SKalle Valo 
284*d5c65159SKalle Valo 	while ((skb = __skb_dequeue(&list))) {
285*d5c65159SKalle Valo 		ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
286*d5c65159SKalle Valo 			   pipe->pipe_num, skb->len);
287*d5c65159SKalle Valo 		pipe->recv_cb(ab, skb);
288*d5c65159SKalle Valo 	}
289*d5c65159SKalle Valo 
290*d5c65159SKalle Valo 	ret = ath11k_ce_rx_post_pipe(pipe);
291*d5c65159SKalle Valo 	if (ret && ret != -ENOSPC) {
292*d5c65159SKalle Valo 		ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
293*d5c65159SKalle Valo 			    pipe->pipe_num, ret);
294*d5c65159SKalle Valo 		mod_timer(&ab->rx_replenish_retry,
295*d5c65159SKalle Valo 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
296*d5c65159SKalle Valo 	}
297*d5c65159SKalle Valo }
298*d5c65159SKalle Valo 
299*d5c65159SKalle Valo static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
300*d5c65159SKalle Valo {
301*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
302*d5c65159SKalle Valo 	struct hal_srng *srng;
303*d5c65159SKalle Valo 	unsigned int sw_index;
304*d5c65159SKalle Valo 	unsigned int nentries_mask;
305*d5c65159SKalle Valo 	struct sk_buff *skb;
306*d5c65159SKalle Valo 	u32 *desc;
307*d5c65159SKalle Valo 
308*d5c65159SKalle Valo 	spin_lock_bh(&ab->ce.ce_lock);
309*d5c65159SKalle Valo 
310*d5c65159SKalle Valo 	sw_index = pipe->src_ring->sw_index;
311*d5c65159SKalle Valo 	nentries_mask = pipe->src_ring->nentries_mask;
312*d5c65159SKalle Valo 
313*d5c65159SKalle Valo 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
314*d5c65159SKalle Valo 
315*d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
316*d5c65159SKalle Valo 
317*d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
318*d5c65159SKalle Valo 
319*d5c65159SKalle Valo 	desc = ath11k_hal_srng_src_reap_next(ab, srng);
320*d5c65159SKalle Valo 	if (!desc) {
321*d5c65159SKalle Valo 		skb = ERR_PTR(-EIO);
322*d5c65159SKalle Valo 		goto err_unlock;
323*d5c65159SKalle Valo 	}
324*d5c65159SKalle Valo 
325*d5c65159SKalle Valo 	skb = pipe->src_ring->skb[sw_index];
326*d5c65159SKalle Valo 
327*d5c65159SKalle Valo 	pipe->src_ring->skb[sw_index] = NULL;
328*d5c65159SKalle Valo 
329*d5c65159SKalle Valo 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
330*d5c65159SKalle Valo 	pipe->src_ring->sw_index = sw_index;
331*d5c65159SKalle Valo 
332*d5c65159SKalle Valo err_unlock:
333*d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
334*d5c65159SKalle Valo 
335*d5c65159SKalle Valo 	spin_unlock_bh(&ab->ce.ce_lock);
336*d5c65159SKalle Valo 
337*d5c65159SKalle Valo 	return skb;
338*d5c65159SKalle Valo }
339*d5c65159SKalle Valo 
340*d5c65159SKalle Valo static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
341*d5c65159SKalle Valo {
342*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
343*d5c65159SKalle Valo 	struct sk_buff *skb;
344*d5c65159SKalle Valo 
345*d5c65159SKalle Valo 	while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
346*d5c65159SKalle Valo 		if (!skb)
347*d5c65159SKalle Valo 			continue;
348*d5c65159SKalle Valo 
349*d5c65159SKalle Valo 		dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
350*d5c65159SKalle Valo 				 DMA_TO_DEVICE);
351*d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
352*d5c65159SKalle Valo 	}
353*d5c65159SKalle Valo }
354*d5c65159SKalle Valo 
355*d5c65159SKalle Valo static int ath11k_ce_init_ring(struct ath11k_base *ab,
356*d5c65159SKalle Valo 			       struct ath11k_ce_ring *ce_ring,
357*d5c65159SKalle Valo 			       int ce_id, enum hal_ring_type type)
358*d5c65159SKalle Valo {
359*d5c65159SKalle Valo 	struct hal_srng_params params = { 0 };
360*d5c65159SKalle Valo 	int ret;
361*d5c65159SKalle Valo 
362*d5c65159SKalle Valo 	params.ring_base_paddr = ce_ring->base_addr_ce_space;
363*d5c65159SKalle Valo 	params.ring_base_vaddr = ce_ring->base_addr_owner_space;
364*d5c65159SKalle Valo 	params.num_entries = ce_ring->nentries;
365*d5c65159SKalle Valo 
366*d5c65159SKalle Valo 	switch (type) {
367*d5c65159SKalle Valo 	case HAL_CE_SRC:
368*d5c65159SKalle Valo 		if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
369*d5c65159SKalle Valo 			params.intr_batch_cntr_thres_entries = 1;
370*d5c65159SKalle Valo 		break;
371*d5c65159SKalle Valo 	case HAL_CE_DST:
372*d5c65159SKalle Valo 		params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
373*d5c65159SKalle Valo 		if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
374*d5c65159SKalle Valo 			params.intr_timer_thres_us = 1024;
375*d5c65159SKalle Valo 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
376*d5c65159SKalle Valo 			params.low_threshold = ce_ring->nentries - 3;
377*d5c65159SKalle Valo 		}
378*d5c65159SKalle Valo 		break;
379*d5c65159SKalle Valo 	case HAL_CE_DST_STATUS:
380*d5c65159SKalle Valo 		if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
381*d5c65159SKalle Valo 			params.intr_batch_cntr_thres_entries = 1;
382*d5c65159SKalle Valo 			params.intr_timer_thres_us = 0x1000;
383*d5c65159SKalle Valo 		}
384*d5c65159SKalle Valo 		break;
385*d5c65159SKalle Valo 	default:
386*d5c65159SKalle Valo 		ath11k_warn(ab, "Invalid CE ring type %d\n", type);
387*d5c65159SKalle Valo 		return -EINVAL;
388*d5c65159SKalle Valo 	}
389*d5c65159SKalle Valo 
390*d5c65159SKalle Valo 	/* TODO: Init other params needed by HAL to init the ring */
391*d5c65159SKalle Valo 
392*d5c65159SKalle Valo 	ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
393*d5c65159SKalle Valo 	if (ret < 0) {
394*d5c65159SKalle Valo 		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
395*d5c65159SKalle Valo 			    ret, ce_id);
396*d5c65159SKalle Valo 		return ret;
397*d5c65159SKalle Valo 	}
398*d5c65159SKalle Valo 	ce_ring->hal_ring_id = ret;
399*d5c65159SKalle Valo 
400*d5c65159SKalle Valo 	return 0;
401*d5c65159SKalle Valo }
402*d5c65159SKalle Valo 
403*d5c65159SKalle Valo static struct ath11k_ce_ring *
404*d5c65159SKalle Valo ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
405*d5c65159SKalle Valo {
406*d5c65159SKalle Valo 	struct ath11k_ce_ring *ce_ring;
407*d5c65159SKalle Valo 	dma_addr_t base_addr;
408*d5c65159SKalle Valo 
409*d5c65159SKalle Valo 	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
410*d5c65159SKalle Valo 	if (ce_ring == NULL)
411*d5c65159SKalle Valo 		return ERR_PTR(-ENOMEM);
412*d5c65159SKalle Valo 
413*d5c65159SKalle Valo 	ce_ring->nentries = nentries;
414*d5c65159SKalle Valo 	ce_ring->nentries_mask = nentries - 1;
415*d5c65159SKalle Valo 
416*d5c65159SKalle Valo 	/* Legacy platforms that do not support cache
417*d5c65159SKalle Valo 	 * coherent DMA are unsupported
418*d5c65159SKalle Valo 	 */
419*d5c65159SKalle Valo 	ce_ring->base_addr_owner_space_unaligned =
420*d5c65159SKalle Valo 		dma_alloc_coherent(ab->dev,
421*d5c65159SKalle Valo 				   nentries * desc_sz + CE_DESC_RING_ALIGN,
422*d5c65159SKalle Valo 				   &base_addr, GFP_KERNEL);
423*d5c65159SKalle Valo 	if (!ce_ring->base_addr_owner_space_unaligned) {
424*d5c65159SKalle Valo 		kfree(ce_ring);
425*d5c65159SKalle Valo 		return ERR_PTR(-ENOMEM);
426*d5c65159SKalle Valo 	}
427*d5c65159SKalle Valo 
428*d5c65159SKalle Valo 	ce_ring->base_addr_ce_space_unaligned = base_addr;
429*d5c65159SKalle Valo 
430*d5c65159SKalle Valo 	ce_ring->base_addr_owner_space = PTR_ALIGN(
431*d5c65159SKalle Valo 			ce_ring->base_addr_owner_space_unaligned,
432*d5c65159SKalle Valo 			CE_DESC_RING_ALIGN);
433*d5c65159SKalle Valo 	ce_ring->base_addr_ce_space = ALIGN(
434*d5c65159SKalle Valo 			ce_ring->base_addr_ce_space_unaligned,
435*d5c65159SKalle Valo 			CE_DESC_RING_ALIGN);
436*d5c65159SKalle Valo 
437*d5c65159SKalle Valo 	return ce_ring;
438*d5c65159SKalle Valo }
439*d5c65159SKalle Valo 
440*d5c65159SKalle Valo static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
441*d5c65159SKalle Valo {
442*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
443*d5c65159SKalle Valo 	const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
444*d5c65159SKalle Valo 	int nentries;
445*d5c65159SKalle Valo 	int desc_sz;
446*d5c65159SKalle Valo 
447*d5c65159SKalle Valo 	pipe->attr_flags = attr->flags;
448*d5c65159SKalle Valo 
449*d5c65159SKalle Valo 	if (attr->src_nentries) {
450*d5c65159SKalle Valo 		pipe->send_cb = ath11k_ce_send_done_cb;
451*d5c65159SKalle Valo 		nentries = roundup_pow_of_two(attr->src_nentries);
452*d5c65159SKalle Valo 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
453*d5c65159SKalle Valo 		pipe->src_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
454*d5c65159SKalle Valo 		if (!pipe->src_ring)
455*d5c65159SKalle Valo 			return -ENOMEM;
456*d5c65159SKalle Valo 	}
457*d5c65159SKalle Valo 
458*d5c65159SKalle Valo 	if (attr->dest_nentries) {
459*d5c65159SKalle Valo 		pipe->recv_cb = attr->recv_cb;
460*d5c65159SKalle Valo 		nentries = roundup_pow_of_two(attr->dest_nentries);
461*d5c65159SKalle Valo 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
462*d5c65159SKalle Valo 		pipe->dest_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
463*d5c65159SKalle Valo 
464*d5c65159SKalle Valo 		if (!pipe->dest_ring)
465*d5c65159SKalle Valo 			return -ENOMEM;
466*d5c65159SKalle Valo 
467*d5c65159SKalle Valo 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
468*d5c65159SKalle Valo 		pipe->status_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
469*d5c65159SKalle Valo 		if (!pipe->status_ring)
470*d5c65159SKalle Valo 			return -ENOMEM;
471*d5c65159SKalle Valo 	}
472*d5c65159SKalle Valo 
473*d5c65159SKalle Valo 	return 0;
474*d5c65159SKalle Valo }
475*d5c65159SKalle Valo 
476*d5c65159SKalle Valo void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
477*d5c65159SKalle Valo {
478*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
479*d5c65159SKalle Valo 
480*d5c65159SKalle Valo 	if (pipe->send_cb)
481*d5c65159SKalle Valo 		pipe->send_cb(pipe);
482*d5c65159SKalle Valo 
483*d5c65159SKalle Valo 	if (pipe->recv_cb)
484*d5c65159SKalle Valo 		ath11k_ce_recv_process_cb(pipe);
485*d5c65159SKalle Valo }
486*d5c65159SKalle Valo 
487*d5c65159SKalle Valo void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
488*d5c65159SKalle Valo {
489*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
490*d5c65159SKalle Valo 
491*d5c65159SKalle Valo 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
492*d5c65159SKalle Valo 		pipe->send_cb(pipe);
493*d5c65159SKalle Valo }
494*d5c65159SKalle Valo 
495*d5c65159SKalle Valo int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
496*d5c65159SKalle Valo 		   u16 transfer_id)
497*d5c65159SKalle Valo {
498*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
499*d5c65159SKalle Valo 	struct hal_srng *srng;
500*d5c65159SKalle Valo 	u32 *desc;
501*d5c65159SKalle Valo 	unsigned int write_index, sw_index;
502*d5c65159SKalle Valo 	unsigned int nentries_mask;
503*d5c65159SKalle Valo 	int ret = 0;
504*d5c65159SKalle Valo 	u8 byte_swap_data = 0;
505*d5c65159SKalle Valo 	int num_used;
506*d5c65159SKalle Valo 
507*d5c65159SKalle Valo 	/* Check if some entries could be regained by handling tx completion if
508*d5c65159SKalle Valo 	 * the CE has interrupts disabled and the used entries is more than the
509*d5c65159SKalle Valo 	 * defined usage threshold.
510*d5c65159SKalle Valo 	 */
511*d5c65159SKalle Valo 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
512*d5c65159SKalle Valo 		spin_lock_bh(&ab->ce.ce_lock);
513*d5c65159SKalle Valo 		write_index = pipe->src_ring->write_index;
514*d5c65159SKalle Valo 
515*d5c65159SKalle Valo 		sw_index = pipe->src_ring->sw_index;
516*d5c65159SKalle Valo 
517*d5c65159SKalle Valo 		if (write_index >= sw_index)
518*d5c65159SKalle Valo 			num_used = write_index - sw_index;
519*d5c65159SKalle Valo 		else
520*d5c65159SKalle Valo 			num_used = pipe->src_ring->nentries - sw_index +
521*d5c65159SKalle Valo 				   write_index;
522*d5c65159SKalle Valo 
523*d5c65159SKalle Valo 		spin_unlock_bh(&ab->ce.ce_lock);
524*d5c65159SKalle Valo 
525*d5c65159SKalle Valo 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
526*d5c65159SKalle Valo 			ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
527*d5c65159SKalle Valo 	}
528*d5c65159SKalle Valo 
529*d5c65159SKalle Valo 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
530*d5c65159SKalle Valo 		return -ESHUTDOWN;
531*d5c65159SKalle Valo 
532*d5c65159SKalle Valo 	spin_lock_bh(&ab->ce.ce_lock);
533*d5c65159SKalle Valo 
534*d5c65159SKalle Valo 	write_index = pipe->src_ring->write_index;
535*d5c65159SKalle Valo 	nentries_mask = pipe->src_ring->nentries_mask;
536*d5c65159SKalle Valo 
537*d5c65159SKalle Valo 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
538*d5c65159SKalle Valo 
539*d5c65159SKalle Valo 	spin_lock_bh(&srng->lock);
540*d5c65159SKalle Valo 
541*d5c65159SKalle Valo 	ath11k_hal_srng_access_begin(ab, srng);
542*d5c65159SKalle Valo 
543*d5c65159SKalle Valo 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
544*d5c65159SKalle Valo 		ath11k_hal_srng_access_end(ab, srng);
545*d5c65159SKalle Valo 		ret = -ENOBUFS;
546*d5c65159SKalle Valo 		goto err_unlock;
547*d5c65159SKalle Valo 	}
548*d5c65159SKalle Valo 
549*d5c65159SKalle Valo 	desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
550*d5c65159SKalle Valo 	if (!desc) {
551*d5c65159SKalle Valo 		ath11k_hal_srng_access_end(ab, srng);
552*d5c65159SKalle Valo 		ret = -ENOBUFS;
553*d5c65159SKalle Valo 		goto err_unlock;
554*d5c65159SKalle Valo 	}
555*d5c65159SKalle Valo 
556*d5c65159SKalle Valo 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
557*d5c65159SKalle Valo 		byte_swap_data = 1;
558*d5c65159SKalle Valo 
559*d5c65159SKalle Valo 	ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
560*d5c65159SKalle Valo 				   skb->len, transfer_id, byte_swap_data);
561*d5c65159SKalle Valo 
562*d5c65159SKalle Valo 	pipe->src_ring->skb[write_index] = skb;
563*d5c65159SKalle Valo 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
564*d5c65159SKalle Valo 						       write_index);
565*d5c65159SKalle Valo 
566*d5c65159SKalle Valo 	ath11k_hal_srng_access_end(ab, srng);
567*d5c65159SKalle Valo 
568*d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
569*d5c65159SKalle Valo 
570*d5c65159SKalle Valo 	spin_unlock_bh(&ab->ce.ce_lock);
571*d5c65159SKalle Valo 
572*d5c65159SKalle Valo 	return 0;
573*d5c65159SKalle Valo 
574*d5c65159SKalle Valo err_unlock:
575*d5c65159SKalle Valo 	spin_unlock_bh(&srng->lock);
576*d5c65159SKalle Valo 
577*d5c65159SKalle Valo 	spin_unlock_bh(&ab->ce.ce_lock);
578*d5c65159SKalle Valo 
579*d5c65159SKalle Valo 	return ret;
580*d5c65159SKalle Valo }
581*d5c65159SKalle Valo 
582*d5c65159SKalle Valo static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
583*d5c65159SKalle Valo {
584*d5c65159SKalle Valo 	struct ath11k_base *ab = pipe->ab;
585*d5c65159SKalle Valo 	struct ath11k_ce_ring *ring = pipe->dest_ring;
586*d5c65159SKalle Valo 	struct sk_buff *skb;
587*d5c65159SKalle Valo 	int i;
588*d5c65159SKalle Valo 
589*d5c65159SKalle Valo 	if (!(ring && pipe->buf_sz))
590*d5c65159SKalle Valo 		return;
591*d5c65159SKalle Valo 
592*d5c65159SKalle Valo 	for (i = 0; i < ring->nentries; i++) {
593*d5c65159SKalle Valo 		skb = ring->skb[i];
594*d5c65159SKalle Valo 		if (!skb)
595*d5c65159SKalle Valo 			continue;
596*d5c65159SKalle Valo 
597*d5c65159SKalle Valo 		ring->skb[i] = NULL;
598*d5c65159SKalle Valo 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
599*d5c65159SKalle Valo 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
600*d5c65159SKalle Valo 		dev_kfree_skb_any(skb);
601*d5c65159SKalle Valo 	}
602*d5c65159SKalle Valo }
603*d5c65159SKalle Valo 
604*d5c65159SKalle Valo void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
605*d5c65159SKalle Valo {
606*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe;
607*d5c65159SKalle Valo 	int pipe_num;
608*d5c65159SKalle Valo 
609*d5c65159SKalle Valo 	for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
610*d5c65159SKalle Valo 		pipe = &ab->ce.ce_pipe[pipe_num];
611*d5c65159SKalle Valo 		ath11k_ce_rx_pipe_cleanup(pipe);
612*d5c65159SKalle Valo 
613*d5c65159SKalle Valo 		/* Cleanup any src CE's which have interrupts disabled */
614*d5c65159SKalle Valo 		ath11k_ce_poll_send_completed(ab, pipe_num);
615*d5c65159SKalle Valo 
616*d5c65159SKalle Valo 		/* NOTE: Should we also clean up tx buffer in all pipes? */
617*d5c65159SKalle Valo 	}
618*d5c65159SKalle Valo }
619*d5c65159SKalle Valo 
620*d5c65159SKalle Valo void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
621*d5c65159SKalle Valo {
622*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe;
623*d5c65159SKalle Valo 	int i;
624*d5c65159SKalle Valo 	int ret;
625*d5c65159SKalle Valo 
626*d5c65159SKalle Valo 	for (i = 0; i < CE_COUNT; i++) {
627*d5c65159SKalle Valo 		pipe = &ab->ce.ce_pipe[i];
628*d5c65159SKalle Valo 		ret = ath11k_ce_rx_post_pipe(pipe);
629*d5c65159SKalle Valo 		if (ret) {
630*d5c65159SKalle Valo 			if (ret == -ENOSPC)
631*d5c65159SKalle Valo 				continue;
632*d5c65159SKalle Valo 
633*d5c65159SKalle Valo 			ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
634*d5c65159SKalle Valo 				    i, ret);
635*d5c65159SKalle Valo 			mod_timer(&ab->rx_replenish_retry,
636*d5c65159SKalle Valo 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
637*d5c65159SKalle Valo 
638*d5c65159SKalle Valo 			return;
639*d5c65159SKalle Valo 		}
640*d5c65159SKalle Valo 	}
641*d5c65159SKalle Valo }
642*d5c65159SKalle Valo 
643*d5c65159SKalle Valo void ath11k_ce_rx_replenish_retry(struct timer_list *t)
644*d5c65159SKalle Valo {
645*d5c65159SKalle Valo 	struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
646*d5c65159SKalle Valo 
647*d5c65159SKalle Valo 	ath11k_ce_rx_post_buf(ab);
648*d5c65159SKalle Valo }
649*d5c65159SKalle Valo 
650*d5c65159SKalle Valo int ath11k_ce_init_pipes(struct ath11k_base *ab)
651*d5c65159SKalle Valo {
652*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe;
653*d5c65159SKalle Valo 	int i;
654*d5c65159SKalle Valo 	int ret;
655*d5c65159SKalle Valo 
656*d5c65159SKalle Valo 	for (i = 0; i < CE_COUNT; i++) {
657*d5c65159SKalle Valo 		pipe = &ab->ce.ce_pipe[i];
658*d5c65159SKalle Valo 
659*d5c65159SKalle Valo 		if (pipe->src_ring) {
660*d5c65159SKalle Valo 			ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
661*d5c65159SKalle Valo 						  HAL_CE_SRC);
662*d5c65159SKalle Valo 			if (ret) {
663*d5c65159SKalle Valo 				ath11k_warn(ab, "failed to init src ring: %d\n",
664*d5c65159SKalle Valo 					    ret);
665*d5c65159SKalle Valo 				/* Should we clear any partial init */
666*d5c65159SKalle Valo 				return ret;
667*d5c65159SKalle Valo 			}
668*d5c65159SKalle Valo 
669*d5c65159SKalle Valo 			pipe->src_ring->write_index = 0;
670*d5c65159SKalle Valo 			pipe->src_ring->sw_index = 0;
671*d5c65159SKalle Valo 		}
672*d5c65159SKalle Valo 
673*d5c65159SKalle Valo 		if (pipe->dest_ring) {
674*d5c65159SKalle Valo 			ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
675*d5c65159SKalle Valo 						  HAL_CE_DST);
676*d5c65159SKalle Valo 			if (ret) {
677*d5c65159SKalle Valo 				ath11k_warn(ab, "failed to init dest ring: %d\n",
678*d5c65159SKalle Valo 					    ret);
679*d5c65159SKalle Valo 				/* Should we clear any partial init */
680*d5c65159SKalle Valo 				return ret;
681*d5c65159SKalle Valo 			}
682*d5c65159SKalle Valo 
683*d5c65159SKalle Valo 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
684*d5c65159SKalle Valo 					      pipe->dest_ring->nentries - 2 : 0;
685*d5c65159SKalle Valo 
686*d5c65159SKalle Valo 			pipe->dest_ring->write_index = 0;
687*d5c65159SKalle Valo 			pipe->dest_ring->sw_index = 0;
688*d5c65159SKalle Valo 		}
689*d5c65159SKalle Valo 
690*d5c65159SKalle Valo 		if (pipe->status_ring) {
691*d5c65159SKalle Valo 			ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
692*d5c65159SKalle Valo 						  HAL_CE_DST_STATUS);
693*d5c65159SKalle Valo 			if (ret) {
694*d5c65159SKalle Valo 				ath11k_warn(ab, "failed to init dest status ing: %d\n",
695*d5c65159SKalle Valo 					    ret);
696*d5c65159SKalle Valo 				/* Should we clear any partial init */
697*d5c65159SKalle Valo 				return ret;
698*d5c65159SKalle Valo 			}
699*d5c65159SKalle Valo 
700*d5c65159SKalle Valo 			pipe->status_ring->write_index = 0;
701*d5c65159SKalle Valo 			pipe->status_ring->sw_index = 0;
702*d5c65159SKalle Valo 		}
703*d5c65159SKalle Valo 	}
704*d5c65159SKalle Valo 
705*d5c65159SKalle Valo 	return 0;
706*d5c65159SKalle Valo }
707*d5c65159SKalle Valo 
708*d5c65159SKalle Valo void ath11k_ce_free_pipes(struct ath11k_base *ab)
709*d5c65159SKalle Valo {
710*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe;
711*d5c65159SKalle Valo 	int desc_sz;
712*d5c65159SKalle Valo 	int i;
713*d5c65159SKalle Valo 
714*d5c65159SKalle Valo 	for (i = 0; i < CE_COUNT; i++) {
715*d5c65159SKalle Valo 		pipe = &ab->ce.ce_pipe[i];
716*d5c65159SKalle Valo 
717*d5c65159SKalle Valo 		if (pipe->src_ring) {
718*d5c65159SKalle Valo 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
719*d5c65159SKalle Valo 			dma_free_coherent(ab->dev,
720*d5c65159SKalle Valo 					  pipe->src_ring->nentries * desc_sz +
721*d5c65159SKalle Valo 					  CE_DESC_RING_ALIGN,
722*d5c65159SKalle Valo 					  pipe->src_ring->base_addr_owner_space,
723*d5c65159SKalle Valo 					  pipe->src_ring->base_addr_ce_space);
724*d5c65159SKalle Valo 			kfree(pipe->src_ring);
725*d5c65159SKalle Valo 			pipe->src_ring = NULL;
726*d5c65159SKalle Valo 		}
727*d5c65159SKalle Valo 
728*d5c65159SKalle Valo 		if (pipe->dest_ring) {
729*d5c65159SKalle Valo 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
730*d5c65159SKalle Valo 			dma_free_coherent(ab->dev,
731*d5c65159SKalle Valo 					  pipe->dest_ring->nentries * desc_sz +
732*d5c65159SKalle Valo 					  CE_DESC_RING_ALIGN,
733*d5c65159SKalle Valo 					  pipe->dest_ring->base_addr_owner_space,
734*d5c65159SKalle Valo 					  pipe->dest_ring->base_addr_ce_space);
735*d5c65159SKalle Valo 			kfree(pipe->dest_ring);
736*d5c65159SKalle Valo 			pipe->dest_ring = NULL;
737*d5c65159SKalle Valo 		}
738*d5c65159SKalle Valo 
739*d5c65159SKalle Valo 		if (pipe->status_ring) {
740*d5c65159SKalle Valo 			desc_sz =
741*d5c65159SKalle Valo 			  ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
742*d5c65159SKalle Valo 			dma_free_coherent(ab->dev,
743*d5c65159SKalle Valo 					  pipe->status_ring->nentries * desc_sz +
744*d5c65159SKalle Valo 					  CE_DESC_RING_ALIGN,
745*d5c65159SKalle Valo 					  pipe->status_ring->base_addr_owner_space,
746*d5c65159SKalle Valo 					  pipe->status_ring->base_addr_ce_space);
747*d5c65159SKalle Valo 			kfree(pipe->status_ring);
748*d5c65159SKalle Valo 			pipe->status_ring = NULL;
749*d5c65159SKalle Valo 		}
750*d5c65159SKalle Valo 	}
751*d5c65159SKalle Valo }
752*d5c65159SKalle Valo 
753*d5c65159SKalle Valo int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
754*d5c65159SKalle Valo {
755*d5c65159SKalle Valo 	struct ath11k_ce_pipe *pipe;
756*d5c65159SKalle Valo 	int i;
757*d5c65159SKalle Valo 	int ret;
758*d5c65159SKalle Valo 	const struct ce_attr *attr;
759*d5c65159SKalle Valo 
760*d5c65159SKalle Valo 	spin_lock_init(&ab->ce.ce_lock);
761*d5c65159SKalle Valo 
762*d5c65159SKalle Valo 	for (i = 0; i < CE_COUNT; i++) {
763*d5c65159SKalle Valo 		attr = &host_ce_config_wlan[i];
764*d5c65159SKalle Valo 		pipe = &ab->ce.ce_pipe[i];
765*d5c65159SKalle Valo 		pipe->pipe_num = i;
766*d5c65159SKalle Valo 		pipe->ab = ab;
767*d5c65159SKalle Valo 		pipe->buf_sz = attr->src_sz_max;
768*d5c65159SKalle Valo 
769*d5c65159SKalle Valo 		ret = ath11k_ce_alloc_pipe(ab, i);
770*d5c65159SKalle Valo 		if (ret) {
771*d5c65159SKalle Valo 			/* Free any parial successful allocation */
772*d5c65159SKalle Valo 			ath11k_ce_free_pipes(ab);
773*d5c65159SKalle Valo 			return ret;
774*d5c65159SKalle Valo 		}
775*d5c65159SKalle Valo 	}
776*d5c65159SKalle Valo 
777*d5c65159SKalle Valo 	return 0;
778*d5c65159SKalle Valo }
779*d5c65159SKalle Valo 
780*d5c65159SKalle Valo /* For Big Endian Host, Copy Engine byte_swap is enabled
781*d5c65159SKalle Valo  * When Copy Engine does byte_swap, need to byte swap again for the
782*d5c65159SKalle Valo  * Host to get/put buffer content in the correct byte order
783*d5c65159SKalle Valo  */
784*d5c65159SKalle Valo void ath11k_ce_byte_swap(void *mem, u32 len)
785*d5c65159SKalle Valo {
786*d5c65159SKalle Valo 	int i;
787*d5c65159SKalle Valo 
788*d5c65159SKalle Valo 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
789*d5c65159SKalle Valo 		if (!mem)
790*d5c65159SKalle Valo 			return;
791*d5c65159SKalle Valo 
792*d5c65159SKalle Valo 		for (i = 0; i < (len / 4); i++) {
793*d5c65159SKalle Valo 			*(u32 *)mem = swab32(*(u32 *)mem);
794*d5c65159SKalle Valo 			mem += 4;
795*d5c65159SKalle Valo 		}
796*d5c65159SKalle Valo 	}
797*d5c65159SKalle Valo }
798*d5c65159SKalle Valo 
799*d5c65159SKalle Valo int ath11k_ce_get_attr_flags(int ce_id)
800*d5c65159SKalle Valo {
801*d5c65159SKalle Valo 	if (ce_id >= CE_COUNT)
802*d5c65159SKalle Valo 		return -EINVAL;
803*d5c65159SKalle Valo 
804*d5c65159SKalle Valo 	return host_ce_config_wlan[ce_id].flags;
805*d5c65159SKalle Valo }
806