xref: /openbmc/linux/drivers/net/wireless/ath/ath11k/ce.c (revision 36acd5e2)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 
6 #include "dp_rx.h"
7 #include "debug.h"
8 #include "hif.h"
9 
10 const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
11 	/* CE0: host->target HTC control and raw streams */
12 	{
13 		.flags = CE_ATTR_FLAGS,
14 		.src_nentries = 16,
15 		.src_sz_max = 2048,
16 		.dest_nentries = 0,
17 	},
18 
19 	/* CE1: target->host HTT + HTC control */
20 	{
21 		.flags = CE_ATTR_FLAGS,
22 		.src_nentries = 0,
23 		.src_sz_max = 2048,
24 		.dest_nentries = 512,
25 		.recv_cb = ath11k_htc_rx_completion_handler,
26 	},
27 
28 	/* CE2: target->host WMI */
29 	{
30 		.flags = CE_ATTR_FLAGS,
31 		.src_nentries = 0,
32 		.src_sz_max = 2048,
33 		.dest_nentries = 512,
34 		.recv_cb = ath11k_htc_rx_completion_handler,
35 	},
36 
37 	/* CE3: host->target WMI (mac0) */
38 	{
39 		.flags = CE_ATTR_FLAGS,
40 		.src_nentries = 32,
41 		.src_sz_max = 2048,
42 		.dest_nentries = 0,
43 	},
44 
45 	/* CE4: host->target HTT */
46 	{
47 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
48 		.src_nentries = 2048,
49 		.src_sz_max = 256,
50 		.dest_nentries = 0,
51 	},
52 
53 	/* CE5: target->host pktlog */
54 	{
55 		.flags = CE_ATTR_FLAGS,
56 		.src_nentries = 0,
57 		.src_sz_max = 2048,
58 		.dest_nentries = 512,
59 		.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
60 	},
61 
62 	/* CE6: target autonomous hif_memcpy */
63 	{
64 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
65 		.src_nentries = 0,
66 		.src_sz_max = 0,
67 		.dest_nentries = 0,
68 	},
69 
70 	/* CE7: host->target WMI (mac1) */
71 	{
72 		.flags = CE_ATTR_FLAGS,
73 		.src_nentries = 32,
74 		.src_sz_max = 2048,
75 		.dest_nentries = 0,
76 	},
77 
78 	/* CE8: target autonomous hif_memcpy */
79 	{
80 		.flags = CE_ATTR_FLAGS,
81 		.src_nentries = 0,
82 		.src_sz_max = 0,
83 		.dest_nentries = 0,
84 	},
85 
86 	/* CE9: host->target WMI (mac2) */
87 	{
88 		.flags = CE_ATTR_FLAGS,
89 		.src_nentries = 32,
90 		.src_sz_max = 2048,
91 		.dest_nentries = 0,
92 	},
93 
94 	/* CE10: target->host HTT */
95 	{
96 		.flags = CE_ATTR_FLAGS,
97 		.src_nentries = 0,
98 		.src_sz_max = 2048,
99 		.dest_nentries = 512,
100 		.recv_cb = ath11k_htc_rx_completion_handler,
101 	},
102 
103 	/* CE11: Not used */
104 	{
105 		.flags = CE_ATTR_FLAGS,
106 		.src_nentries = 0,
107 		.src_sz_max = 0,
108 		.dest_nentries = 0,
109 	},
110 };
111 
112 const struct ce_attr ath11k_host_ce_config_qca6390[] = {
113 	/* CE0: host->target HTC control and raw streams */
114 	{
115 		.flags = CE_ATTR_FLAGS,
116 		.src_nentries = 16,
117 		.src_sz_max = 2048,
118 		.dest_nentries = 0,
119 	},
120 
121 	/* CE1: target->host HTT + HTC control */
122 	{
123 		.flags = CE_ATTR_FLAGS,
124 		.src_nentries = 0,
125 		.src_sz_max = 2048,
126 		.dest_nentries = 512,
127 		.recv_cb = ath11k_htc_rx_completion_handler,
128 	},
129 
130 	/* CE2: target->host WMI */
131 	{
132 		.flags = CE_ATTR_FLAGS,
133 		.src_nentries = 0,
134 		.src_sz_max = 2048,
135 		.dest_nentries = 512,
136 		.recv_cb = ath11k_htc_rx_completion_handler,
137 	},
138 
139 	/* CE3: host->target WMI (mac0) */
140 	{
141 		.flags = CE_ATTR_FLAGS,
142 		.src_nentries = 32,
143 		.src_sz_max = 2048,
144 		.dest_nentries = 0,
145 	},
146 
147 	/* CE4: host->target HTT */
148 	{
149 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
150 		.src_nentries = 2048,
151 		.src_sz_max = 256,
152 		.dest_nentries = 0,
153 	},
154 
155 	/* CE5: target->host pktlog */
156 	{
157 		.flags = CE_ATTR_FLAGS,
158 		.src_nentries = 0,
159 		.src_sz_max = 2048,
160 		.dest_nentries = 512,
161 		.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
162 	},
163 
164 	/* CE6: target autonomous hif_memcpy */
165 	{
166 		.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
167 		.src_nentries = 0,
168 		.src_sz_max = 0,
169 		.dest_nentries = 0,
170 	},
171 
172 	/* CE7: host->target WMI (mac1) */
173 	{
174 		.flags = CE_ATTR_FLAGS,
175 		.src_nentries = 32,
176 		.src_sz_max = 2048,
177 		.dest_nentries = 0,
178 	},
179 
180 	/* CE8: target autonomous hif_memcpy */
181 	{
182 		.flags = CE_ATTR_FLAGS,
183 		.src_nentries = 0,
184 		.src_sz_max = 0,
185 		.dest_nentries = 0,
186 	},
187 
188 };
189 
190 static bool ath11k_ce_need_shadow_fix(int ce_id)
191 {
192 	/* only ce4 needs shadow workaroud*/
193 	if (ce_id == 4)
194 		return true;
195 	return false;
196 }
197 
198 void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
199 {
200 	int i;
201 
202 	if (!ab->hw_params.supports_shadow_regs)
203 		return;
204 
205 	for (i = 0; i < ab->hw_params.ce_count; i++)
206 		if (ath11k_ce_need_shadow_fix(i))
207 			ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
208 }
209 
210 static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
211 					 struct sk_buff *skb, dma_addr_t paddr)
212 {
213 	struct ath11k_base *ab = pipe->ab;
214 	struct ath11k_ce_ring *ring = pipe->dest_ring;
215 	struct hal_srng *srng;
216 	unsigned int write_index;
217 	unsigned int nentries_mask = ring->nentries_mask;
218 	u32 *desc;
219 	int ret;
220 
221 	lockdep_assert_held(&ab->ce.ce_lock);
222 
223 	write_index = ring->write_index;
224 
225 	srng = &ab->hal.srng_list[ring->hal_ring_id];
226 
227 	spin_lock_bh(&srng->lock);
228 
229 	ath11k_hal_srng_access_begin(ab, srng);
230 
231 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
232 		ret = -ENOSPC;
233 		goto exit;
234 	}
235 
236 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
237 	if (!desc) {
238 		ret = -ENOSPC;
239 		goto exit;
240 	}
241 
242 	ath11k_hal_ce_dst_set_desc(desc, paddr);
243 
244 	ring->skb[write_index] = skb;
245 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
246 	ring->write_index = write_index;
247 
248 	pipe->rx_buf_needed--;
249 
250 	ret = 0;
251 exit:
252 	ath11k_hal_srng_access_end(ab, srng);
253 
254 	spin_unlock_bh(&srng->lock);
255 
256 	return ret;
257 }
258 
259 static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
260 {
261 	struct ath11k_base *ab = pipe->ab;
262 	struct sk_buff *skb;
263 	dma_addr_t paddr;
264 	int ret = 0;
265 
266 	if (!(pipe->dest_ring || pipe->status_ring))
267 		return 0;
268 
269 	spin_lock_bh(&ab->ce.ce_lock);
270 	while (pipe->rx_buf_needed) {
271 		skb = dev_alloc_skb(pipe->buf_sz);
272 		if (!skb) {
273 			ret = -ENOMEM;
274 			goto exit;
275 		}
276 
277 		WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
278 
279 		paddr = dma_map_single(ab->dev, skb->data,
280 				       skb->len + skb_tailroom(skb),
281 				       DMA_FROM_DEVICE);
282 		if (unlikely(dma_mapping_error(ab->dev, paddr))) {
283 			ath11k_warn(ab, "failed to dma map ce rx buf\n");
284 			dev_kfree_skb_any(skb);
285 			ret = -EIO;
286 			goto exit;
287 		}
288 
289 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
290 
291 		ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
292 
293 		if (ret) {
294 			ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
295 			dma_unmap_single(ab->dev, paddr,
296 					 skb->len + skb_tailroom(skb),
297 					 DMA_FROM_DEVICE);
298 			dev_kfree_skb_any(skb);
299 			goto exit;
300 		}
301 	}
302 
303 exit:
304 	spin_unlock_bh(&ab->ce.ce_lock);
305 	return ret;
306 }
307 
308 static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
309 					 struct sk_buff **skb, int *nbytes)
310 {
311 	struct ath11k_base *ab = pipe->ab;
312 	struct hal_srng *srng;
313 	unsigned int sw_index;
314 	unsigned int nentries_mask;
315 	u32 *desc;
316 	int ret = 0;
317 
318 	spin_lock_bh(&ab->ce.ce_lock);
319 
320 	sw_index = pipe->dest_ring->sw_index;
321 	nentries_mask = pipe->dest_ring->nentries_mask;
322 
323 	srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
324 
325 	spin_lock_bh(&srng->lock);
326 
327 	ath11k_hal_srng_access_begin(ab, srng);
328 
329 	desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
330 	if (!desc) {
331 		ret = -EIO;
332 		goto err;
333 	}
334 
335 	*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
336 	if (*nbytes == 0) {
337 		ret = -EIO;
338 		goto err;
339 	}
340 
341 	*skb = pipe->dest_ring->skb[sw_index];
342 	pipe->dest_ring->skb[sw_index] = NULL;
343 
344 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
345 	pipe->dest_ring->sw_index = sw_index;
346 
347 	pipe->rx_buf_needed++;
348 err:
349 	ath11k_hal_srng_access_end(ab, srng);
350 
351 	spin_unlock_bh(&srng->lock);
352 
353 	spin_unlock_bh(&ab->ce.ce_lock);
354 
355 	return ret;
356 }
357 
358 static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
359 {
360 	struct ath11k_base *ab = pipe->ab;
361 	struct sk_buff *skb;
362 	struct sk_buff_head list;
363 	unsigned int nbytes, max_nbytes;
364 	int ret;
365 
366 	__skb_queue_head_init(&list);
367 	while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
368 		max_nbytes = skb->len + skb_tailroom(skb);
369 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
370 				 max_nbytes, DMA_FROM_DEVICE);
371 
372 		if (unlikely(max_nbytes < nbytes)) {
373 			ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
374 				    nbytes, max_nbytes);
375 			dev_kfree_skb_any(skb);
376 			continue;
377 		}
378 
379 		skb_put(skb, nbytes);
380 		__skb_queue_tail(&list, skb);
381 	}
382 
383 	while ((skb = __skb_dequeue(&list))) {
384 		ath11k_dbg(ab, ATH11K_DBG_AHB, "rx ce pipe %d len %d\n",
385 			   pipe->pipe_num, skb->len);
386 		pipe->recv_cb(ab, skb);
387 	}
388 
389 	ret = ath11k_ce_rx_post_pipe(pipe);
390 	if (ret && ret != -ENOSPC) {
391 		ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
392 			    pipe->pipe_num, ret);
393 		mod_timer(&ab->rx_replenish_retry,
394 			  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
395 	}
396 }
397 
398 static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
399 {
400 	struct ath11k_base *ab = pipe->ab;
401 	struct hal_srng *srng;
402 	unsigned int sw_index;
403 	unsigned int nentries_mask;
404 	struct sk_buff *skb;
405 	u32 *desc;
406 
407 	spin_lock_bh(&ab->ce.ce_lock);
408 
409 	sw_index = pipe->src_ring->sw_index;
410 	nentries_mask = pipe->src_ring->nentries_mask;
411 
412 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
413 
414 	spin_lock_bh(&srng->lock);
415 
416 	ath11k_hal_srng_access_begin(ab, srng);
417 
418 	desc = ath11k_hal_srng_src_reap_next(ab, srng);
419 	if (!desc) {
420 		skb = ERR_PTR(-EIO);
421 		goto err_unlock;
422 	}
423 
424 	skb = pipe->src_ring->skb[sw_index];
425 
426 	pipe->src_ring->skb[sw_index] = NULL;
427 
428 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
429 	pipe->src_ring->sw_index = sw_index;
430 
431 err_unlock:
432 	spin_unlock_bh(&srng->lock);
433 
434 	spin_unlock_bh(&ab->ce.ce_lock);
435 
436 	return skb;
437 }
438 
439 static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
440 {
441 	struct ath11k_base *ab = pipe->ab;
442 	struct sk_buff *skb;
443 
444 	while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
445 		if (!skb)
446 			continue;
447 
448 		dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
449 				 DMA_TO_DEVICE);
450 		dev_kfree_skb_any(skb);
451 	}
452 }
453 
454 static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
455 						 struct hal_srng_params *ring_params)
456 {
457 	u32 msi_data_start;
458 	u32 msi_data_count;
459 	u32 msi_irq_start;
460 	u32 addr_lo;
461 	u32 addr_hi;
462 	int ret;
463 
464 	ret = ath11k_get_user_msi_vector(ab, "CE",
465 					 &msi_data_count, &msi_data_start,
466 					 &msi_irq_start);
467 
468 	if (ret)
469 		return;
470 
471 	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
472 
473 	ring_params->msi_addr = addr_lo;
474 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
475 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
476 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
477 }
478 
479 static int ath11k_ce_init_ring(struct ath11k_base *ab,
480 			       struct ath11k_ce_ring *ce_ring,
481 			       int ce_id, enum hal_ring_type type)
482 {
483 	struct hal_srng_params params = { 0 };
484 	int ret;
485 
486 	params.ring_base_paddr = ce_ring->base_addr_ce_space;
487 	params.ring_base_vaddr = ce_ring->base_addr_owner_space;
488 	params.num_entries = ce_ring->nentries;
489 
490 	if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
491 		ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
492 
493 	switch (type) {
494 	case HAL_CE_SRC:
495 		if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
496 			params.intr_batch_cntr_thres_entries = 1;
497 		break;
498 	case HAL_CE_DST:
499 		params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
500 		if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
501 			params.intr_timer_thres_us = 1024;
502 			params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
503 			params.low_threshold = ce_ring->nentries - 3;
504 		}
505 		break;
506 	case HAL_CE_DST_STATUS:
507 		if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
508 			params.intr_batch_cntr_thres_entries = 1;
509 			params.intr_timer_thres_us = 0x1000;
510 		}
511 		break;
512 	default:
513 		ath11k_warn(ab, "Invalid CE ring type %d\n", type);
514 		return -EINVAL;
515 	}
516 
517 	/* TODO: Init other params needed by HAL to init the ring */
518 
519 	ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
520 	if (ret < 0) {
521 		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
522 			    ret, ce_id);
523 		return ret;
524 	}
525 
526 	ce_ring->hal_ring_id = ret;
527 
528 	if (ab->hw_params.supports_shadow_regs &&
529 	    ath11k_ce_need_shadow_fix(ce_id))
530 		ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
531 					    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
532 					    ce_ring->hal_ring_id);
533 
534 	return 0;
535 }
536 
537 static struct ath11k_ce_ring *
538 ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
539 {
540 	struct ath11k_ce_ring *ce_ring;
541 	dma_addr_t base_addr;
542 
543 	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
544 	if (ce_ring == NULL)
545 		return ERR_PTR(-ENOMEM);
546 
547 	ce_ring->nentries = nentries;
548 	ce_ring->nentries_mask = nentries - 1;
549 
550 	/* Legacy platforms that do not support cache
551 	 * coherent DMA are unsupported
552 	 */
553 	ce_ring->base_addr_owner_space_unaligned =
554 		dma_alloc_coherent(ab->dev,
555 				   nentries * desc_sz + CE_DESC_RING_ALIGN,
556 				   &base_addr, GFP_KERNEL);
557 	if (!ce_ring->base_addr_owner_space_unaligned) {
558 		kfree(ce_ring);
559 		return ERR_PTR(-ENOMEM);
560 	}
561 
562 	ce_ring->base_addr_ce_space_unaligned = base_addr;
563 
564 	ce_ring->base_addr_owner_space = PTR_ALIGN(
565 			ce_ring->base_addr_owner_space_unaligned,
566 			CE_DESC_RING_ALIGN);
567 	ce_ring->base_addr_ce_space = ALIGN(
568 			ce_ring->base_addr_ce_space_unaligned,
569 			CE_DESC_RING_ALIGN);
570 
571 	return ce_ring;
572 }
573 
574 static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
575 {
576 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
577 	const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
578 	struct ath11k_ce_ring *ring;
579 	int nentries;
580 	int desc_sz;
581 
582 	pipe->attr_flags = attr->flags;
583 
584 	if (attr->src_nentries) {
585 		pipe->send_cb = ath11k_ce_send_done_cb;
586 		nentries = roundup_pow_of_two(attr->src_nentries);
587 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
588 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
589 		if (IS_ERR(ring))
590 			return PTR_ERR(ring);
591 		pipe->src_ring = ring;
592 	}
593 
594 	if (attr->dest_nentries) {
595 		pipe->recv_cb = attr->recv_cb;
596 		nentries = roundup_pow_of_two(attr->dest_nentries);
597 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
598 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
599 		if (IS_ERR(ring))
600 			return PTR_ERR(ring);
601 		pipe->dest_ring = ring;
602 
603 		desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
604 		ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
605 		if (IS_ERR(ring))
606 			return PTR_ERR(ring);
607 		pipe->status_ring = ring;
608 	}
609 
610 	return 0;
611 }
612 
613 void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
614 {
615 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
616 
617 	if (pipe->send_cb)
618 		pipe->send_cb(pipe);
619 
620 	if (pipe->recv_cb)
621 		ath11k_ce_recv_process_cb(pipe);
622 }
623 
624 void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
625 {
626 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
627 
628 	if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
629 		pipe->send_cb(pipe);
630 }
631 EXPORT_SYMBOL(ath11k_ce_per_engine_service);
632 
633 int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
634 		   u16 transfer_id)
635 {
636 	struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
637 	struct hal_srng *srng;
638 	u32 *desc;
639 	unsigned int write_index, sw_index;
640 	unsigned int nentries_mask;
641 	int ret = 0;
642 	u8 byte_swap_data = 0;
643 	int num_used;
644 
645 	/* Check if some entries could be regained by handling tx completion if
646 	 * the CE has interrupts disabled and the used entries is more than the
647 	 * defined usage threshold.
648 	 */
649 	if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
650 		spin_lock_bh(&ab->ce.ce_lock);
651 		write_index = pipe->src_ring->write_index;
652 
653 		sw_index = pipe->src_ring->sw_index;
654 
655 		if (write_index >= sw_index)
656 			num_used = write_index - sw_index;
657 		else
658 			num_used = pipe->src_ring->nentries - sw_index +
659 				   write_index;
660 
661 		spin_unlock_bh(&ab->ce.ce_lock);
662 
663 		if (num_used > ATH11K_CE_USAGE_THRESHOLD)
664 			ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
665 	}
666 
667 	if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
668 		return -ESHUTDOWN;
669 
670 	spin_lock_bh(&ab->ce.ce_lock);
671 
672 	write_index = pipe->src_ring->write_index;
673 	nentries_mask = pipe->src_ring->nentries_mask;
674 
675 	srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
676 
677 	spin_lock_bh(&srng->lock);
678 
679 	ath11k_hal_srng_access_begin(ab, srng);
680 
681 	if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
682 		ath11k_hal_srng_access_end(ab, srng);
683 		ret = -ENOBUFS;
684 		goto err_unlock;
685 	}
686 
687 	desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
688 	if (!desc) {
689 		ath11k_hal_srng_access_end(ab, srng);
690 		ret = -ENOBUFS;
691 		goto err_unlock;
692 	}
693 
694 	if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
695 		byte_swap_data = 1;
696 
697 	ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
698 				   skb->len, transfer_id, byte_swap_data);
699 
700 	pipe->src_ring->skb[write_index] = skb;
701 	pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
702 						       write_index);
703 
704 	ath11k_hal_srng_access_end(ab, srng);
705 
706 	if (ath11k_ce_need_shadow_fix(pipe_id))
707 		ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
708 
709 	spin_unlock_bh(&srng->lock);
710 
711 	spin_unlock_bh(&ab->ce.ce_lock);
712 
713 	return 0;
714 
715 err_unlock:
716 	spin_unlock_bh(&srng->lock);
717 
718 	spin_unlock_bh(&ab->ce.ce_lock);
719 
720 	return ret;
721 }
722 
723 static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
724 {
725 	struct ath11k_base *ab = pipe->ab;
726 	struct ath11k_ce_ring *ring = pipe->dest_ring;
727 	struct sk_buff *skb;
728 	int i;
729 
730 	if (!(ring && pipe->buf_sz))
731 		return;
732 
733 	for (i = 0; i < ring->nentries; i++) {
734 		skb = ring->skb[i];
735 		if (!skb)
736 			continue;
737 
738 		ring->skb[i] = NULL;
739 		dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
740 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
741 		dev_kfree_skb_any(skb);
742 	}
743 }
744 
745 static void ath11k_ce_shadow_config(struct ath11k_base *ab)
746 {
747 	int i;
748 
749 	for (i = 0; i < ab->hw_params.ce_count; i++) {
750 		if (ab->hw_params.host_ce_config[i].src_nentries)
751 			ath11k_hal_srng_update_shadow_config(ab,
752 							     HAL_CE_SRC, i);
753 
754 		if (ab->hw_params.host_ce_config[i].dest_nentries) {
755 			ath11k_hal_srng_update_shadow_config(ab,
756 							     HAL_CE_DST, i);
757 
758 			ath11k_hal_srng_update_shadow_config(ab,
759 							     HAL_CE_DST_STATUS, i);
760 		}
761 	}
762 }
763 
764 void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
765 				 u32 **shadow_cfg, u32 *shadow_cfg_len)
766 {
767 	if (!ab->hw_params.supports_shadow_regs)
768 		return;
769 
770 	ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
771 
772 	/* shadow is already configured */
773 	if (*shadow_cfg_len)
774 		return;
775 
776 	/* shadow isn't configured yet, configure now.
777 	 * non-CE srngs are configured firstly, then
778 	 * all CE srngs.
779 	 */
780 	ath11k_hal_srng_shadow_config(ab);
781 	ath11k_ce_shadow_config(ab);
782 
783 	/* get the shadow configuration */
784 	ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
785 }
786 EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
787 
788 void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
789 {
790 	struct ath11k_ce_pipe *pipe;
791 	int pipe_num;
792 
793 	ath11k_ce_stop_shadow_timers(ab);
794 
795 	for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
796 		pipe = &ab->ce.ce_pipe[pipe_num];
797 		ath11k_ce_rx_pipe_cleanup(pipe);
798 
799 		/* Cleanup any src CE's which have interrupts disabled */
800 		ath11k_ce_poll_send_completed(ab, pipe_num);
801 
802 		/* NOTE: Should we also clean up tx buffer in all pipes? */
803 	}
804 }
805 EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
806 
807 void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
808 {
809 	struct ath11k_ce_pipe *pipe;
810 	int i;
811 	int ret;
812 
813 	for (i = 0; i < ab->hw_params.ce_count; i++) {
814 		pipe = &ab->ce.ce_pipe[i];
815 		ret = ath11k_ce_rx_post_pipe(pipe);
816 		if (ret) {
817 			if (ret == -ENOSPC)
818 				continue;
819 
820 			ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
821 				    i, ret);
822 			mod_timer(&ab->rx_replenish_retry,
823 				  jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
824 
825 			return;
826 		}
827 	}
828 }
829 EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
830 
831 void ath11k_ce_rx_replenish_retry(struct timer_list *t)
832 {
833 	struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
834 
835 	ath11k_ce_rx_post_buf(ab);
836 }
837 
838 int ath11k_ce_init_pipes(struct ath11k_base *ab)
839 {
840 	struct ath11k_ce_pipe *pipe;
841 	int i;
842 	int ret;
843 
844 	ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
845 				    &ab->qmi.ce_cfg.shadow_reg_v2_len);
846 
847 	for (i = 0; i < ab->hw_params.ce_count; i++) {
848 		pipe = &ab->ce.ce_pipe[i];
849 
850 		if (pipe->src_ring) {
851 			ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
852 						  HAL_CE_SRC);
853 			if (ret) {
854 				ath11k_warn(ab, "failed to init src ring: %d\n",
855 					    ret);
856 				/* Should we clear any partial init */
857 				return ret;
858 			}
859 
860 			pipe->src_ring->write_index = 0;
861 			pipe->src_ring->sw_index = 0;
862 		}
863 
864 		if (pipe->dest_ring) {
865 			ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
866 						  HAL_CE_DST);
867 			if (ret) {
868 				ath11k_warn(ab, "failed to init dest ring: %d\n",
869 					    ret);
870 				/* Should we clear any partial init */
871 				return ret;
872 			}
873 
874 			pipe->rx_buf_needed = pipe->dest_ring->nentries ?
875 					      pipe->dest_ring->nentries - 2 : 0;
876 
877 			pipe->dest_ring->write_index = 0;
878 			pipe->dest_ring->sw_index = 0;
879 		}
880 
881 		if (pipe->status_ring) {
882 			ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
883 						  HAL_CE_DST_STATUS);
884 			if (ret) {
885 				ath11k_warn(ab, "failed to init dest status ing: %d\n",
886 					    ret);
887 				/* Should we clear any partial init */
888 				return ret;
889 			}
890 
891 			pipe->status_ring->write_index = 0;
892 			pipe->status_ring->sw_index = 0;
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 void ath11k_ce_free_pipes(struct ath11k_base *ab)
900 {
901 	struct ath11k_ce_pipe *pipe;
902 	int desc_sz;
903 	int i;
904 
905 	for (i = 0; i < ab->hw_params.ce_count; i++) {
906 		pipe = &ab->ce.ce_pipe[i];
907 
908 		if (ath11k_ce_need_shadow_fix(i))
909 			ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
910 
911 		if (pipe->src_ring) {
912 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
913 			dma_free_coherent(ab->dev,
914 					  pipe->src_ring->nentries * desc_sz +
915 					  CE_DESC_RING_ALIGN,
916 					  pipe->src_ring->base_addr_owner_space,
917 					  pipe->src_ring->base_addr_ce_space);
918 			kfree(pipe->src_ring);
919 			pipe->src_ring = NULL;
920 		}
921 
922 		if (pipe->dest_ring) {
923 			desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
924 			dma_free_coherent(ab->dev,
925 					  pipe->dest_ring->nentries * desc_sz +
926 					  CE_DESC_RING_ALIGN,
927 					  pipe->dest_ring->base_addr_owner_space,
928 					  pipe->dest_ring->base_addr_ce_space);
929 			kfree(pipe->dest_ring);
930 			pipe->dest_ring = NULL;
931 		}
932 
933 		if (pipe->status_ring) {
934 			desc_sz =
935 			  ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
936 			dma_free_coherent(ab->dev,
937 					  pipe->status_ring->nentries * desc_sz +
938 					  CE_DESC_RING_ALIGN,
939 					  pipe->status_ring->base_addr_owner_space,
940 					  pipe->status_ring->base_addr_ce_space);
941 			kfree(pipe->status_ring);
942 			pipe->status_ring = NULL;
943 		}
944 	}
945 }
946 EXPORT_SYMBOL(ath11k_ce_free_pipes);
947 
948 int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
949 {
950 	struct ath11k_ce_pipe *pipe;
951 	int i;
952 	int ret;
953 	const struct ce_attr *attr;
954 
955 	spin_lock_init(&ab->ce.ce_lock);
956 
957 	for (i = 0; i < ab->hw_params.ce_count; i++) {
958 		attr = &ab->hw_params.host_ce_config[i];
959 		pipe = &ab->ce.ce_pipe[i];
960 		pipe->pipe_num = i;
961 		pipe->ab = ab;
962 		pipe->buf_sz = attr->src_sz_max;
963 
964 		ret = ath11k_ce_alloc_pipe(ab, i);
965 		if (ret) {
966 			/* Free any parial successful allocation */
967 			ath11k_ce_free_pipes(ab);
968 			return ret;
969 		}
970 	}
971 
972 	return 0;
973 }
974 EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
975 
976 /* For Big Endian Host, Copy Engine byte_swap is enabled
977  * When Copy Engine does byte_swap, need to byte swap again for the
978  * Host to get/put buffer content in the correct byte order
979  */
980 void ath11k_ce_byte_swap(void *mem, u32 len)
981 {
982 	int i;
983 
984 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
985 		if (!mem)
986 			return;
987 
988 		for (i = 0; i < (len / 4); i++) {
989 			*(u32 *)mem = swab32(*(u32 *)mem);
990 			mem += 4;
991 		}
992 	}
993 }
994 
995 int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
996 {
997 	if (ce_id >= ab->hw_params.ce_count)
998 		return -EINVAL;
999 
1000 	return ab->hw_params.host_ce_config[ce_id].flags;
1001 }
1002 EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1003