xref: /openbmc/linux/drivers/net/wireless/ath/ath11k/dp.c (revision 405db98b)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <crypto/hash.h>
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "hal_tx.h"
10 #include "hif.h"
11 #include "debug.h"
12 #include "dp_rx.h"
13 #include "peer.h"
14 
15 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
16 					  struct sk_buff *skb)
17 {
18 	dev_kfree_skb_any(skb);
19 }
20 
21 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
22 {
23 	struct ath11k_base *ab = ar->ab;
24 	struct ath11k_peer *peer;
25 
26 	/* TODO: Any other peer specific DP cleanup */
27 
28 	spin_lock_bh(&ab->base_lock);
29 	peer = ath11k_peer_find(ab, vdev_id, addr);
30 	if (!peer) {
31 		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
32 			    addr, vdev_id);
33 		spin_unlock_bh(&ab->base_lock);
34 		return;
35 	}
36 
37 	ath11k_peer_rx_tid_cleanup(ar, peer);
38 	crypto_free_shash(peer->tfm_mmic);
39 	spin_unlock_bh(&ab->base_lock);
40 }
41 
42 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
43 {
44 	struct ath11k_base *ab = ar->ab;
45 	struct ath11k_peer *peer;
46 	u32 reo_dest;
47 	int ret = 0, tid;
48 
49 	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
50 	reo_dest = ar->dp.mac_id + 1;
51 	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
52 					WMI_PEER_SET_DEFAULT_ROUTING,
53 					DP_RX_HASH_ENABLE | (reo_dest << 1));
54 
55 	if (ret) {
56 		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
57 			    ret, addr, vdev_id);
58 		return ret;
59 	}
60 
61 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
62 		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
63 					       HAL_PN_TYPE_NONE);
64 		if (ret) {
65 			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
66 				    tid, ret);
67 			goto peer_clean;
68 		}
69 	}
70 
71 	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
72 	if (ret) {
73 		ath11k_warn(ab, "failed to setup rx defrag context\n");
74 		return ret;
75 	}
76 
77 	/* TODO: Setup other peer specific resource used in data path */
78 
79 	return 0;
80 
81 peer_clean:
82 	spin_lock_bh(&ab->base_lock);
83 
84 	peer = ath11k_peer_find(ab, vdev_id, addr);
85 	if (!peer) {
86 		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
87 		spin_unlock_bh(&ab->base_lock);
88 		return -ENOENT;
89 	}
90 
91 	for (; tid >= 0; tid--)
92 		ath11k_peer_rx_tid_delete(ar, peer, tid);
93 
94 	spin_unlock_bh(&ab->base_lock);
95 
96 	return ret;
97 }
98 
99 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
100 {
101 	if (!ring->vaddr_unaligned)
102 		return;
103 
104 	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
105 			  ring->paddr_unaligned);
106 
107 	ring->vaddr_unaligned = NULL;
108 }
109 
110 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
111 {
112 	int ext_group_num;
113 	u8 mask = 1 << ring_num;
114 
115 	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
116 	     ext_group_num++) {
117 		if (mask & grp_mask[ext_group_num])
118 			return ext_group_num;
119 	}
120 
121 	return -ENOENT;
122 }
123 
124 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
125 					      enum hal_ring_type type, int ring_num)
126 {
127 	const u8 *grp_mask;
128 
129 	switch (type) {
130 	case HAL_WBM2SW_RELEASE:
131 		if (ring_num < 3) {
132 			grp_mask = &ab->hw_params.ring_mask->tx[0];
133 		} else if (ring_num == 3) {
134 			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
135 			ring_num = 0;
136 		} else {
137 			return -ENOENT;
138 		}
139 		break;
140 	case HAL_REO_EXCEPTION:
141 		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
142 		break;
143 	case HAL_REO_DST:
144 		grp_mask = &ab->hw_params.ring_mask->rx[0];
145 		break;
146 	case HAL_REO_STATUS:
147 		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
148 		break;
149 	case HAL_RXDMA_MONITOR_STATUS:
150 	case HAL_RXDMA_MONITOR_DST:
151 		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
152 		break;
153 	case HAL_RXDMA_DST:
154 		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
155 		break;
156 	case HAL_RXDMA_BUF:
157 		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
158 		break;
159 	case HAL_RXDMA_MONITOR_BUF:
160 	case HAL_TCL_DATA:
161 	case HAL_TCL_CMD:
162 	case HAL_REO_CMD:
163 	case HAL_SW2WBM_RELEASE:
164 	case HAL_WBM_IDLE_LINK:
165 	case HAL_TCL_STATUS:
166 	case HAL_REO_REINJECT:
167 	case HAL_CE_SRC:
168 	case HAL_CE_DST:
169 	case HAL_CE_DST_STATUS:
170 	default:
171 		return -ENOENT;
172 	}
173 
174 	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
175 }
176 
177 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
178 				     struct hal_srng_params *ring_params,
179 				     enum hal_ring_type type, int ring_num)
180 {
181 	int msi_group_number, msi_data_count;
182 	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
183 	int ret;
184 
185 	ret = ath11k_get_user_msi_vector(ab, "DP",
186 					 &msi_data_count, &msi_data_start,
187 					 &msi_irq_start);
188 	if (ret)
189 		return;
190 
191 	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
192 							      ring_num);
193 	if (msi_group_number < 0) {
194 		ath11k_dbg(ab, ATH11K_DBG_PCI,
195 			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
196 			   type, ring_num);
197 		ring_params->msi_addr = 0;
198 		ring_params->msi_data = 0;
199 		return;
200 	}
201 
202 	if (msi_group_number > msi_data_count) {
203 		ath11k_dbg(ab, ATH11K_DBG_PCI,
204 			   "multiple msi_groups share one msi, msi_group_num %d",
205 			   msi_group_number);
206 	}
207 
208 	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
209 
210 	ring_params->msi_addr = addr_lo;
211 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
212 	ring_params->msi_data = (msi_group_number % msi_data_count)
213 		+ msi_data_start;
214 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
215 }
216 
217 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
218 			 enum hal_ring_type type, int ring_num,
219 			 int mac_id, int num_entries)
220 {
221 	struct hal_srng_params params = { 0 };
222 	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
223 	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
224 	int ret;
225 
226 	if (max_entries < 0 || entry_sz < 0)
227 		return -EINVAL;
228 
229 	if (num_entries > max_entries)
230 		num_entries = max_entries;
231 
232 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
233 	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
234 						   &ring->paddr_unaligned,
235 						   GFP_KERNEL);
236 	if (!ring->vaddr_unaligned)
237 		return -ENOMEM;
238 
239 	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
240 	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
241 		      (unsigned long)ring->vaddr_unaligned);
242 
243 	params.ring_base_vaddr = ring->vaddr;
244 	params.ring_base_paddr = ring->paddr;
245 	params.num_entries = num_entries;
246 	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
247 
248 	switch (type) {
249 	case HAL_REO_DST:
250 		params.intr_batch_cntr_thres_entries =
251 					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
252 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
253 		break;
254 	case HAL_RXDMA_BUF:
255 	case HAL_RXDMA_MONITOR_BUF:
256 	case HAL_RXDMA_MONITOR_STATUS:
257 		params.low_threshold = num_entries >> 3;
258 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
259 		params.intr_batch_cntr_thres_entries = 0;
260 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
261 		break;
262 	case HAL_WBM2SW_RELEASE:
263 		if (ring_num < 3) {
264 			params.intr_batch_cntr_thres_entries =
265 					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
266 			params.intr_timer_thres_us =
267 					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
268 			break;
269 		}
270 		/* follow through when ring_num >= 3 */
271 		fallthrough;
272 	case HAL_REO_EXCEPTION:
273 	case HAL_REO_REINJECT:
274 	case HAL_REO_CMD:
275 	case HAL_REO_STATUS:
276 	case HAL_TCL_DATA:
277 	case HAL_TCL_CMD:
278 	case HAL_TCL_STATUS:
279 	case HAL_WBM_IDLE_LINK:
280 	case HAL_SW2WBM_RELEASE:
281 	case HAL_RXDMA_DST:
282 	case HAL_RXDMA_MONITOR_DST:
283 	case HAL_RXDMA_MONITOR_DESC:
284 		params.intr_batch_cntr_thres_entries =
285 					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
286 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
287 		break;
288 	case HAL_RXDMA_DIR_BUF:
289 		break;
290 	default:
291 		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
292 		return -EINVAL;
293 	}
294 
295 	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
296 	if (ret < 0) {
297 		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
298 			    ret, ring_num);
299 		return ret;
300 	}
301 
302 	ring->ring_id = ret;
303 
304 	return 0;
305 }
306 
307 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
308 {
309 	int i;
310 
311 	if (!ab->hw_params.supports_shadow_regs)
312 		return;
313 
314 	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
315 		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
316 
317 	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
318 }
319 
320 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
321 {
322 	struct ath11k_dp *dp = &ab->dp;
323 	int i;
324 
325 	ath11k_dp_stop_shadow_timers(ab);
326 	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
327 	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
328 	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
329 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
330 		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
331 		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
332 	}
333 	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
334 	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
335 	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
336 	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
337 	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
338 }
339 
340 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
341 {
342 	struct ath11k_dp *dp = &ab->dp;
343 	struct hal_srng *srng;
344 	int i, ret;
345 
346 	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
347 				   HAL_SW2WBM_RELEASE, 0, 0,
348 				   DP_WBM_RELEASE_RING_SIZE);
349 	if (ret) {
350 		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
351 			    ret);
352 		goto err;
353 	}
354 
355 	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
356 				   DP_TCL_CMD_RING_SIZE);
357 	if (ret) {
358 		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
359 		goto err;
360 	}
361 
362 	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
363 				   0, 0, DP_TCL_STATUS_RING_SIZE);
364 	if (ret) {
365 		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
366 		goto err;
367 	}
368 
369 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
370 		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
371 					   HAL_TCL_DATA, i, 0,
372 					   DP_TCL_DATA_RING_SIZE);
373 		if (ret) {
374 			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
375 				    i, ret);
376 			goto err;
377 		}
378 
379 		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
380 					   HAL_WBM2SW_RELEASE, i, 0,
381 					   DP_TX_COMP_RING_SIZE);
382 		if (ret) {
383 			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
384 				    i, ret);
385 			goto err;
386 		}
387 
388 		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
389 		ath11k_hal_tx_init_data_ring(ab, srng);
390 
391 		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
392 					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
393 					    dp->tx_ring[i].tcl_data_ring.ring_id);
394 	}
395 
396 	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
397 				   0, 0, DP_REO_REINJECT_RING_SIZE);
398 	if (ret) {
399 		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
400 			    ret);
401 		goto err;
402 	}
403 
404 	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
405 				   3, 0, DP_RX_RELEASE_RING_SIZE);
406 	if (ret) {
407 		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
408 		goto err;
409 	}
410 
411 	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
412 				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
413 	if (ret) {
414 		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
415 			    ret);
416 		goto err;
417 	}
418 
419 	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
420 				   0, 0, DP_REO_CMD_RING_SIZE);
421 	if (ret) {
422 		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
423 		goto err;
424 	}
425 
426 	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
427 	ath11k_hal_reo_init_cmd_ring(ab, srng);
428 
429 	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
430 				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
431 				    dp->reo_cmd_ring.ring_id);
432 
433 	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
434 				   0, 0, DP_REO_STATUS_RING_SIZE);
435 	if (ret) {
436 		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
437 		goto err;
438 	}
439 
440 	/* When hash based routing of rx packet is enabled, 32 entries to map
441 	 * the hash values to the ring will be configured.
442 	 */
443 	ab->hw_params.hw_ops->reo_setup(ab);
444 
445 	return 0;
446 
447 err:
448 	ath11k_dp_srng_common_cleanup(ab);
449 
450 	return ret;
451 }
452 
453 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
454 {
455 	struct ath11k_dp *dp = &ab->dp;
456 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
457 	int i;
458 
459 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
460 		if (!slist[i].vaddr)
461 			continue;
462 
463 		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
464 				  slist[i].vaddr, slist[i].paddr);
465 		slist[i].vaddr = NULL;
466 	}
467 }
468 
469 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
470 						  int size,
471 						  u32 n_link_desc_bank,
472 						  u32 n_link_desc,
473 						  u32 last_bank_sz)
474 {
475 	struct ath11k_dp *dp = &ab->dp;
476 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
477 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
478 	u32 n_entries_per_buf;
479 	int num_scatter_buf, scatter_idx;
480 	struct hal_wbm_link_desc *scatter_buf;
481 	int align_bytes, n_entries;
482 	dma_addr_t paddr;
483 	int rem_entries;
484 	int i;
485 	int ret = 0;
486 	u32 end_offset;
487 
488 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
489 		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
490 	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
491 
492 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
493 		return -EINVAL;
494 
495 	for (i = 0; i < num_scatter_buf; i++) {
496 		slist[i].vaddr = dma_alloc_coherent(ab->dev,
497 						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
498 						    &slist[i].paddr, GFP_KERNEL);
499 		if (!slist[i].vaddr) {
500 			ret = -ENOMEM;
501 			goto err;
502 		}
503 	}
504 
505 	scatter_idx = 0;
506 	scatter_buf = slist[scatter_idx].vaddr;
507 	rem_entries = n_entries_per_buf;
508 
509 	for (i = 0; i < n_link_desc_bank; i++) {
510 		align_bytes = link_desc_banks[i].vaddr -
511 			      link_desc_banks[i].vaddr_unaligned;
512 		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
513 			     HAL_LINK_DESC_SIZE;
514 		paddr = link_desc_banks[i].paddr;
515 		while (n_entries) {
516 			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
517 			n_entries--;
518 			paddr += HAL_LINK_DESC_SIZE;
519 			if (rem_entries) {
520 				rem_entries--;
521 				scatter_buf++;
522 				continue;
523 			}
524 
525 			rem_entries = n_entries_per_buf;
526 			scatter_idx++;
527 			scatter_buf = slist[scatter_idx].vaddr;
528 		}
529 	}
530 
531 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
532 		     sizeof(struct hal_wbm_link_desc);
533 	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
534 					n_link_desc, end_offset);
535 
536 	return 0;
537 
538 err:
539 	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
540 
541 	return ret;
542 }
543 
544 static void
545 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
546 			      struct dp_link_desc_bank *link_desc_banks)
547 {
548 	int i;
549 
550 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
551 		if (link_desc_banks[i].vaddr_unaligned) {
552 			dma_free_coherent(ab->dev,
553 					  link_desc_banks[i].size,
554 					  link_desc_banks[i].vaddr_unaligned,
555 					  link_desc_banks[i].paddr_unaligned);
556 			link_desc_banks[i].vaddr_unaligned = NULL;
557 		}
558 	}
559 }
560 
561 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
562 					  struct dp_link_desc_bank *desc_bank,
563 					  int n_link_desc_bank,
564 					  int last_bank_sz)
565 {
566 	struct ath11k_dp *dp = &ab->dp;
567 	int i;
568 	int ret = 0;
569 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
570 
571 	for (i = 0; i < n_link_desc_bank; i++) {
572 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
573 			desc_sz = last_bank_sz;
574 
575 		desc_bank[i].vaddr_unaligned =
576 					dma_alloc_coherent(ab->dev, desc_sz,
577 							   &desc_bank[i].paddr_unaligned,
578 							   GFP_KERNEL);
579 		if (!desc_bank[i].vaddr_unaligned) {
580 			ret = -ENOMEM;
581 			goto err;
582 		}
583 
584 		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
585 					       HAL_LINK_DESC_ALIGN);
586 		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
587 				     ((unsigned long)desc_bank[i].vaddr -
588 				      (unsigned long)desc_bank[i].vaddr_unaligned);
589 		desc_bank[i].size = desc_sz;
590 	}
591 
592 	return 0;
593 
594 err:
595 	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
596 
597 	return ret;
598 }
599 
600 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
601 				 struct dp_link_desc_bank *desc_bank,
602 				 u32 ring_type, struct dp_srng *ring)
603 {
604 	ath11k_dp_link_desc_bank_free(ab, desc_bank);
605 
606 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
607 		ath11k_dp_srng_cleanup(ab, ring);
608 		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
609 	}
610 }
611 
612 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
613 {
614 	struct ath11k_dp *dp = &ab->dp;
615 	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
616 	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
617 	int ret = 0;
618 
619 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
620 			   HAL_NUM_MPDUS_PER_LINK_DESC;
621 
622 	n_mpdu_queue_desc = n_mpdu_link_desc /
623 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
624 
625 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
626 			       DP_AVG_MSDUS_PER_FLOW) /
627 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
628 
629 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
630 			       DP_AVG_MSDUS_PER_MPDU) /
631 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
632 
633 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
634 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
635 
636 	if (*n_link_desc & (*n_link_desc - 1))
637 		*n_link_desc = 1 << fls(*n_link_desc);
638 
639 	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
640 				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
641 	if (ret) {
642 		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
643 		return ret;
644 	}
645 	return ret;
646 }
647 
648 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
649 			      struct dp_link_desc_bank *link_desc_banks,
650 			      u32 ring_type, struct hal_srng *srng,
651 			      u32 n_link_desc)
652 {
653 	u32 tot_mem_sz;
654 	u32 n_link_desc_bank, last_bank_sz;
655 	u32 entry_sz, align_bytes, n_entries;
656 	u32 paddr;
657 	u32 *desc;
658 	int i, ret;
659 
660 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
661 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
662 
663 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
664 		n_link_desc_bank = 1;
665 		last_bank_sz = tot_mem_sz;
666 	} else {
667 		n_link_desc_bank = tot_mem_sz /
668 				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
669 				    HAL_LINK_DESC_ALIGN);
670 		last_bank_sz = tot_mem_sz %
671 			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
672 				HAL_LINK_DESC_ALIGN);
673 
674 		if (last_bank_sz)
675 			n_link_desc_bank += 1;
676 	}
677 
678 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
679 		return -EINVAL;
680 
681 	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
682 					     n_link_desc_bank, last_bank_sz);
683 	if (ret)
684 		return ret;
685 
686 	/* Setup link desc idle list for HW internal usage */
687 	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
688 	tot_mem_sz = entry_sz * n_link_desc;
689 
690 	/* Setup scatter desc list when the total memory requirement is more */
691 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
692 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
693 		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
694 							     n_link_desc_bank,
695 							     n_link_desc,
696 							     last_bank_sz);
697 		if (ret) {
698 			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
699 				    ret);
700 			goto fail_desc_bank_free;
701 		}
702 
703 		return 0;
704 	}
705 
706 	spin_lock_bh(&srng->lock);
707 
708 	ath11k_hal_srng_access_begin(ab, srng);
709 
710 	for (i = 0; i < n_link_desc_bank; i++) {
711 		align_bytes = link_desc_banks[i].vaddr -
712 			      link_desc_banks[i].vaddr_unaligned;
713 		n_entries = (link_desc_banks[i].size - align_bytes) /
714 			    HAL_LINK_DESC_SIZE;
715 		paddr = link_desc_banks[i].paddr;
716 		while (n_entries &&
717 		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
718 			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
719 						      i, paddr);
720 			n_entries--;
721 			paddr += HAL_LINK_DESC_SIZE;
722 		}
723 	}
724 
725 	ath11k_hal_srng_access_end(ab, srng);
726 
727 	spin_unlock_bh(&srng->lock);
728 
729 	return 0;
730 
731 fail_desc_bank_free:
732 	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
733 
734 	return ret;
735 }
736 
737 int ath11k_dp_service_srng(struct ath11k_base *ab,
738 			   struct ath11k_ext_irq_grp *irq_grp,
739 			   int budget)
740 {
741 	struct napi_struct *napi = &irq_grp->napi;
742 	const struct ath11k_hw_hal_params *hal_params;
743 	int grp_id = irq_grp->grp_id;
744 	int work_done = 0;
745 	int i = 0, j;
746 	int tot_work_done = 0;
747 
748 	while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
749 		if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
750 			ath11k_dp_tx_completion_handler(ab, i);
751 		i++;
752 	}
753 
754 	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
755 		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
756 		budget -= work_done;
757 		tot_work_done += work_done;
758 		if (budget <= 0)
759 			goto done;
760 	}
761 
762 	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
763 		work_done = ath11k_dp_rx_process_wbm_err(ab,
764 							 napi,
765 							 budget);
766 		budget -= work_done;
767 		tot_work_done += work_done;
768 
769 		if (budget <= 0)
770 			goto done;
771 	}
772 
773 	if (ab->hw_params.ring_mask->rx[grp_id]) {
774 		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
775 		work_done = ath11k_dp_process_rx(ab, i, napi,
776 						 budget);
777 		budget -= work_done;
778 		tot_work_done += work_done;
779 		if (budget <= 0)
780 			goto done;
781 	}
782 
783 	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
784 		for (i = 0; i < ab->num_radios; i++) {
785 			for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
786 				int id = i * ab->hw_params.num_rxmda_per_pdev + j;
787 
788 				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
789 					BIT(id)) {
790 					work_done =
791 					ath11k_dp_rx_process_mon_rings(ab,
792 								       id,
793 								       napi, budget);
794 					budget -= work_done;
795 					tot_work_done += work_done;
796 
797 					if (budget <= 0)
798 						goto done;
799 				}
800 			}
801 		}
802 	}
803 
804 	if (ab->hw_params.ring_mask->reo_status[grp_id])
805 		ath11k_dp_process_reo_status(ab);
806 
807 	for (i = 0; i < ab->num_radios; i++) {
808 		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
809 			int id = i * ab->hw_params.num_rxmda_per_pdev + j;
810 
811 			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
812 				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
813 				budget -= work_done;
814 				tot_work_done += work_done;
815 			}
816 
817 			if (budget <= 0)
818 				goto done;
819 
820 			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
821 				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
822 				struct ath11k_pdev_dp *dp = &ar->dp;
823 				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
824 
825 				hal_params = ab->hw_params.hal_params;
826 				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
827 							   hal_params->rx_buf_rbm);
828 			}
829 		}
830 	}
831 	/* TODO: Implement handler for other interrupts */
832 
833 done:
834 	return tot_work_done;
835 }
836 EXPORT_SYMBOL(ath11k_dp_service_srng);
837 
838 void ath11k_dp_pdev_free(struct ath11k_base *ab)
839 {
840 	struct ath11k *ar;
841 	int i;
842 
843 	del_timer_sync(&ab->mon_reap_timer);
844 
845 	for (i = 0; i < ab->num_radios; i++) {
846 		ar = ab->pdevs[i].ar;
847 		ath11k_dp_rx_pdev_free(ab, i);
848 		ath11k_debugfs_unregister(ar);
849 		ath11k_dp_rx_pdev_mon_detach(ar);
850 	}
851 }
852 
853 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
854 {
855 	struct ath11k *ar;
856 	struct ath11k_pdev_dp *dp;
857 	int i;
858 	int j;
859 
860 	for (i = 0; i <  ab->num_radios; i++) {
861 		ar = ab->pdevs[i].ar;
862 		dp = &ar->dp;
863 		dp->mac_id = i;
864 		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
865 		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
866 		atomic_set(&dp->num_tx_pending, 0);
867 		init_waitqueue_head(&dp->tx_empty_waitq);
868 		for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
869 			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
870 			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
871 		}
872 		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
873 		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
874 	}
875 }
876 
877 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
878 {
879 	struct ath11k *ar;
880 	int ret;
881 	int i;
882 
883 	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
884 	for (i = 0; i < ab->num_radios; i++) {
885 		ar = ab->pdevs[i].ar;
886 		ret = ath11k_dp_rx_pdev_alloc(ab, i);
887 		if (ret) {
888 			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
889 				    i);
890 			goto err;
891 		}
892 		ret = ath11k_dp_rx_pdev_mon_attach(ar);
893 		if (ret) {
894 			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
895 				    i);
896 			goto err;
897 		}
898 	}
899 
900 	return 0;
901 
902 err:
903 	ath11k_dp_pdev_free(ab);
904 
905 	return ret;
906 }
907 
908 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
909 {
910 	struct ath11k_htc_svc_conn_req conn_req;
911 	struct ath11k_htc_svc_conn_resp conn_resp;
912 	int status;
913 
914 	memset(&conn_req, 0, sizeof(conn_req));
915 	memset(&conn_resp, 0, sizeof(conn_resp));
916 
917 	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
918 	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
919 
920 	/* connect to control service */
921 	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
922 
923 	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
924 					    &conn_resp);
925 
926 	if (status)
927 		return status;
928 
929 	dp->eid = conn_resp.eid;
930 
931 	return 0;
932 }
933 
934 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
935 {
936 	 /* When v2_map_support is true:for STA mode, enable address
937 	  * search index, tcl uses ast_hash value in the descriptor.
938 	  * When v2_map_support is false: for STA mode, dont' enable
939 	  * address search index.
940 	  */
941 	switch (arvif->vdev_type) {
942 	case WMI_VDEV_TYPE_STA:
943 		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
944 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
945 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
946 		} else {
947 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
948 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
949 		}
950 		break;
951 	case WMI_VDEV_TYPE_AP:
952 	case WMI_VDEV_TYPE_IBSS:
953 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
954 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
955 		break;
956 	case WMI_VDEV_TYPE_MONITOR:
957 	default:
958 		return;
959 	}
960 }
961 
962 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
963 {
964 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
965 			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
966 					  arvif->vdev_id) |
967 			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
968 					  ar->pdev->pdev_id);
969 
970 	/* set HTT extension valid bit to 0 by default */
971 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
972 
973 	ath11k_dp_update_vdev_search(arvif);
974 }
975 
976 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
977 {
978 	struct ath11k_base *ab = (struct ath11k_base *)ctx;
979 	struct sk_buff *msdu = skb;
980 
981 	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
982 			 DMA_TO_DEVICE);
983 
984 	dev_kfree_skb_any(msdu);
985 
986 	return 0;
987 }
988 
989 void ath11k_dp_free(struct ath11k_base *ab)
990 {
991 	struct ath11k_dp *dp = &ab->dp;
992 	int i;
993 
994 	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
995 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
996 
997 	ath11k_dp_srng_common_cleanup(ab);
998 
999 	ath11k_dp_reo_cmd_list_cleanup(ab);
1000 
1001 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1002 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1003 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1004 			     ath11k_dp_tx_pending_cleanup, ab);
1005 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1006 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1007 		kfree(dp->tx_ring[i].tx_status);
1008 	}
1009 
1010 	/* Deinit any SOC level resource */
1011 }
1012 
1013 int ath11k_dp_alloc(struct ath11k_base *ab)
1014 {
1015 	struct ath11k_dp *dp = &ab->dp;
1016 	struct hal_srng *srng = NULL;
1017 	size_t size = 0;
1018 	u32 n_link_desc = 0;
1019 	int ret;
1020 	int i;
1021 
1022 	dp->ab = ab;
1023 
1024 	INIT_LIST_HEAD(&dp->reo_cmd_list);
1025 	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1026 	spin_lock_init(&dp->reo_cmd_lock);
1027 
1028 	dp->reo_cmd_cache_flush_count = 0;
1029 
1030 	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1031 	if (ret) {
1032 		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1033 		return ret;
1034 	}
1035 
1036 	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1037 
1038 	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1039 					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1040 	if (ret) {
1041 		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1042 		return ret;
1043 	}
1044 
1045 	ret = ath11k_dp_srng_common_setup(ab);
1046 	if (ret)
1047 		goto fail_link_desc_cleanup;
1048 
1049 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1050 
1051 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1052 		idr_init(&dp->tx_ring[i].txbuf_idr);
1053 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1054 		dp->tx_ring[i].tcl_data_ring_id = i;
1055 
1056 		dp->tx_ring[i].tx_status_head = 0;
1057 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1058 		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1059 		if (!dp->tx_ring[i].tx_status) {
1060 			ret = -ENOMEM;
1061 			goto fail_cmn_srng_cleanup;
1062 		}
1063 	}
1064 
1065 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1066 		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1067 
1068 	/* Init any SOC level resource for DP */
1069 
1070 	return 0;
1071 
1072 fail_cmn_srng_cleanup:
1073 	ath11k_dp_srng_common_cleanup(ab);
1074 
1075 fail_link_desc_cleanup:
1076 	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1077 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1078 
1079 	return ret;
1080 }
1081 
1082 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1083 {
1084 	struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1085 								 t, timer);
1086 	struct ath11k_base *ab = update_timer->ab;
1087 	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1088 
1089 	spin_lock_bh(&srng->lock);
1090 
1091 	/* when the timer is fired, the handler checks whether there
1092 	 * are new TX happened. The handler updates HP only when there
1093 	 * are no TX operations during the timeout interval, and stop
1094 	 * the timer. Timer will be started again when TX happens again.
1095 	 */
1096 	if (update_timer->timer_tx_num != update_timer->tx_num) {
1097 		update_timer->timer_tx_num = update_timer->tx_num;
1098 		mod_timer(&update_timer->timer, jiffies +
1099 		  msecs_to_jiffies(update_timer->interval));
1100 	} else {
1101 		update_timer->started = false;
1102 		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1103 	}
1104 
1105 	spin_unlock_bh(&srng->lock);
1106 }
1107 
1108 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1109 				  struct hal_srng *srng,
1110 				  struct ath11k_hp_update_timer *update_timer)
1111 {
1112 	lockdep_assert_held(&srng->lock);
1113 
1114 	if (!ab->hw_params.supports_shadow_regs)
1115 		return;
1116 
1117 	update_timer->tx_num++;
1118 
1119 	if (update_timer->started)
1120 		return;
1121 
1122 	update_timer->started = true;
1123 	update_timer->timer_tx_num = update_timer->tx_num;
1124 	mod_timer(&update_timer->timer, jiffies +
1125 		  msecs_to_jiffies(update_timer->interval));
1126 }
1127 
1128 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1129 				 struct ath11k_hp_update_timer *update_timer)
1130 {
1131 	if (!ab->hw_params.supports_shadow_regs)
1132 		return;
1133 
1134 	if (!update_timer->init)
1135 		return;
1136 
1137 	del_timer_sync(&update_timer->timer);
1138 }
1139 
1140 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1141 				 struct ath11k_hp_update_timer *update_timer,
1142 				 u32 interval, u32 ring_id)
1143 {
1144 	if (!ab->hw_params.supports_shadow_regs)
1145 		return;
1146 
1147 	update_timer->tx_num = 0;
1148 	update_timer->timer_tx_num = 0;
1149 	update_timer->ab = ab;
1150 	update_timer->ring_id = ring_id;
1151 	update_timer->interval = interval;
1152 	update_timer->init = true;
1153 	timer_setup(&update_timer->timer,
1154 		    ath11k_dp_shadow_timer_handler, 0);
1155 }
1156