xref: /openbmc/linux/drivers/net/wireless/ath/ath12k/wmi.c (revision 13525645)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include "core.h"
18 #include "debug.h"
19 #include "mac.h"
20 #include "hw.h"
21 #include "peer.h"
22 
23 struct ath12k_wmi_svc_ready_parse {
24 	bool wmi_svc_bitmap_done;
25 };
26 
27 struct ath12k_wmi_dma_ring_caps_parse {
28 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
29 	u32 n_dma_ring_caps;
30 };
31 
32 struct ath12k_wmi_service_ext_arg {
33 	u32 default_conc_scan_config_bits;
34 	u32 default_fw_config_bits;
35 	struct ath12k_wmi_ppe_threshold_arg ppet;
36 	u32 he_cap_info;
37 	u32 mpdu_density;
38 	u32 max_bssid_rx_filters;
39 	u32 num_hw_modes;
40 	u32 num_phy;
41 };
42 
43 struct ath12k_wmi_svc_rdy_ext_parse {
44 	struct ath12k_wmi_service_ext_arg arg;
45 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
46 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
47 	u32 n_hw_mode_caps;
48 	u32 tot_phy_id;
49 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
50 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
51 	u32 n_mac_phy_caps;
52 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
53 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
54 	u32 n_ext_hal_reg_caps;
55 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
56 	bool hw_mode_done;
57 	bool mac_phy_done;
58 	bool ext_hal_reg_done;
59 	bool mac_phy_chainmask_combo_done;
60 	bool mac_phy_chainmask_cap_done;
61 	bool oem_dma_ring_cap_done;
62 	bool dma_ring_cap_done;
63 };
64 
65 struct ath12k_wmi_svc_rdy_ext2_parse {
66 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
67 	bool dma_ring_cap_done;
68 };
69 
70 struct ath12k_wmi_rdy_parse {
71 	u32 num_extra_mac_addr;
72 };
73 
74 struct ath12k_wmi_dma_buf_release_arg {
75 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
76 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
77 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
78 	u32 num_buf_entry;
79 	u32 num_meta;
80 	bool buf_entry_done;
81 	bool meta_data_done;
82 };
83 
84 struct ath12k_wmi_tlv_policy {
85 	size_t min_len;
86 };
87 
88 struct wmi_tlv_mgmt_rx_parse {
89 	const struct ath12k_wmi_mgmt_rx_params *fixed;
90 	const u8 *frame_buf;
91 	bool frame_buf_done;
92 };
93 
94 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
95 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
96 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
97 	[WMI_TAG_SERVICE_READY_EVENT] = {
98 		.min_len = sizeof(struct wmi_service_ready_event) },
99 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
100 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
101 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
102 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
103 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
104 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
105 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
106 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
107 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
108 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
109 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
110 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
111 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
112 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
113 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
114 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
115 	[WMI_TAG_MGMT_RX_HDR] = {
116 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
117 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
118 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
119 	[WMI_TAG_SCAN_EVENT] = {
120 		.min_len = sizeof(struct wmi_scan_event) },
121 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
122 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
123 	[WMI_TAG_ROAM_EVENT] = {
124 		.min_len = sizeof(struct wmi_roam_event) },
125 	[WMI_TAG_CHAN_INFO_EVENT] = {
126 		.min_len = sizeof(struct wmi_chan_info_event) },
127 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
128 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
129 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
130 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
131 	[WMI_TAG_READY_EVENT] = {
132 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
133 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
134 		.min_len = sizeof(struct wmi_service_available_event) },
135 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
136 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
137 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
138 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
139 	[WMI_TAG_HOST_SWFDA_EVENT] = {
140 		.min_len = sizeof(struct wmi_fils_discovery_event) },
141 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
142 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
143 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
144 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
145 };
146 
147 static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
148 {
149 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
150 		le32_encode_bits(len, WMI_TLV_LEN);
151 }
152 
153 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
154 {
155 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
156 }
157 
158 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
159 			     struct ath12k_wmi_resource_config_arg *config)
160 {
161 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
162 
163 	if (ab->num_radios == 2) {
164 		config->num_peers = TARGET_NUM_PEERS(DBS);
165 		config->num_tids = TARGET_NUM_TIDS(DBS);
166 	} else if (ab->num_radios == 3) {
167 		config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
168 		config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
169 	} else {
170 		/* Control should not reach here */
171 		config->num_peers = TARGET_NUM_PEERS(SINGLE);
172 		config->num_tids = TARGET_NUM_TIDS(SINGLE);
173 	}
174 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
175 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
176 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
177 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
178 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
179 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
180 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
181 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
182 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
183 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
184 
185 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
186 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
187 	else
188 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
189 
190 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
191 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
192 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
193 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
194 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
195 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
196 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
197 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
198 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
199 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
200 	config->rx_skip_defrag_timeout_dup_detection_check =
201 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
202 	config->vow_config = TARGET_VOW_CONFIG;
203 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
204 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
205 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
206 	config->rx_batchmode = TARGET_RX_BATCHMODE;
207 	/* Indicates host supports peer map v3 and unmap v2 support */
208 	config->peer_map_unmap_version = 0x32;
209 	config->twt_ap_pdev_count = ab->num_radios;
210 	config->twt_ap_sta_count = 1000;
211 }
212 
213 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
214 			     struct ath12k_wmi_resource_config_arg *config)
215 {
216 	config->num_vdevs = 4;
217 	config->num_peers = 16;
218 	config->num_tids = 32;
219 
220 	config->num_offload_peers = 3;
221 	config->num_offload_reorder_buffs = 3;
222 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
223 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
224 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
225 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
226 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
227 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
228 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
229 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
230 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
231 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
232 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
233 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
234 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
235 	config->num_mcast_groups = 0;
236 	config->num_mcast_table_elems = 0;
237 	config->mcast2ucast_mode = 0;
238 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
239 	config->num_wds_entries = 0;
240 	config->dma_burst_size = 0;
241 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
242 	config->vow_config = TARGET_VOW_CONFIG;
243 	config->gtk_offload_max_vdev = 2;
244 	config->num_msdu_desc = 0x400;
245 	config->beacon_tx_offload_max_vdev = 2;
246 	config->rx_batchmode = TARGET_RX_BATCHMODE;
247 
248 	config->peer_map_unmap_version = 0x1;
249 	config->use_pdev_id = 1;
250 	config->max_frag_entries = 0xa;
251 	config->num_tdls_vdevs = 0x1;
252 	config->num_tdls_conn_table_entries = 8;
253 	config->beacon_tx_offload_max_vdev = 0x2;
254 	config->num_multicast_filter_entries = 0x20;
255 	config->num_wow_filters = 0x16;
256 	config->num_keep_alive_pattern = 0;
257 }
258 
259 #define PRIMAP(_hw_mode_) \
260 	[_hw_mode_] = _hw_mode_##_PRI
261 
262 static const int ath12k_hw_mode_pri_map[] = {
263 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
264 	PRIMAP(WMI_HOST_HW_MODE_DBS),
265 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
266 	PRIMAP(WMI_HOST_HW_MODE_SBS),
267 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
268 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
269 	/* keep last */
270 	PRIMAP(WMI_HOST_HW_MODE_MAX),
271 };
272 
273 static int
274 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
275 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
276 				const void *ptr, void *data),
277 		    void *data)
278 {
279 	const void *begin = ptr;
280 	const struct wmi_tlv *tlv;
281 	u16 tlv_tag, tlv_len;
282 	int ret;
283 
284 	while (len > 0) {
285 		if (len < sizeof(*tlv)) {
286 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
287 				   ptr - begin, len, sizeof(*tlv));
288 			return -EINVAL;
289 		}
290 
291 		tlv = ptr;
292 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
293 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
294 		ptr += sizeof(*tlv);
295 		len -= sizeof(*tlv);
296 
297 		if (tlv_len > len) {
298 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
299 				   tlv_tag, ptr - begin, len, tlv_len);
300 			return -EINVAL;
301 		}
302 
303 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
304 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
305 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
306 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
307 				   tlv_tag, ptr - begin, tlv_len,
308 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
309 			return -EINVAL;
310 		}
311 
312 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
313 		if (ret)
314 			return ret;
315 
316 		ptr += tlv_len;
317 		len -= tlv_len;
318 	}
319 
320 	return 0;
321 }
322 
323 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
324 				     const void *ptr, void *data)
325 {
326 	const void **tb = data;
327 
328 	if (tag < WMI_TAG_MAX)
329 		tb[tag] = ptr;
330 
331 	return 0;
332 }
333 
334 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
335 				const void *ptr, size_t len)
336 {
337 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
338 				   (void *)tb);
339 }
340 
341 static const void **
342 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
343 			   size_t len, gfp_t gfp)
344 {
345 	const void **tb;
346 	int ret;
347 
348 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
349 	if (!tb)
350 		return ERR_PTR(-ENOMEM);
351 
352 	ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
353 	if (ret) {
354 		kfree(tb);
355 		return ERR_PTR(ret);
356 	}
357 
358 	return tb;
359 }
360 
361 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
362 				      u32 cmd_id)
363 {
364 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
365 	struct ath12k_base *ab = wmi->wmi_ab->ab;
366 	struct wmi_cmd_hdr *cmd_hdr;
367 	int ret;
368 
369 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
370 		return -ENOMEM;
371 
372 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
373 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
374 
375 	memset(skb_cb, 0, sizeof(*skb_cb));
376 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
377 
378 	if (ret)
379 		goto err_pull;
380 
381 	return 0;
382 
383 err_pull:
384 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
385 	return ret;
386 }
387 
388 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
389 			u32 cmd_id)
390 {
391 	struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab;
392 	int ret = -EOPNOTSUPP;
393 
394 	might_sleep();
395 
396 	wait_event_timeout(wmi_sc->tx_credits_wq, ({
397 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
398 
399 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
400 			ret = -ESHUTDOWN;
401 
402 		(ret != -EAGAIN);
403 	}), WMI_SEND_TIMEOUT_HZ);
404 
405 	if (ret == -EAGAIN)
406 		ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
407 
408 	return ret;
409 }
410 
411 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
412 				     const void *ptr,
413 				     struct ath12k_wmi_service_ext_arg *arg)
414 {
415 	const struct wmi_service_ready_ext_event *ev = ptr;
416 	int i;
417 
418 	if (!ev)
419 		return -EINVAL;
420 
421 	/* Move this to host based bitmap */
422 	arg->default_conc_scan_config_bits =
423 		le32_to_cpu(ev->default_conc_scan_config_bits);
424 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
425 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
426 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
427 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
428 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
429 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
430 
431 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
432 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
433 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
434 
435 	return 0;
436 }
437 
438 static int
439 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
440 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
441 				      u8 hw_mode_id, u8 phy_id,
442 				      struct ath12k_pdev *pdev)
443 {
444 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
445 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
446 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
447 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
448 	struct ath12k_band_cap *cap_band;
449 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
450 	u32 phy_map;
451 	u32 hw_idx, phy_idx = 0;
452 	int i;
453 
454 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
455 		return -EINVAL;
456 
457 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
458 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
459 			break;
460 
461 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
462 		phy_idx = fls(phy_map);
463 	}
464 
465 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
466 		return -EINVAL;
467 
468 	phy_idx += phy_id;
469 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
470 		return -EINVAL;
471 
472 	mac_caps = wmi_mac_phy_caps + phy_idx;
473 
474 	pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
475 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
476 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
477 
478 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
479 	 * band to band for a single radio, need to see how this should be
480 	 * handled.
481 	 */
482 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
483 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
484 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
485 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
486 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
487 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
488 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
489 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
490 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
491 	} else {
492 		return -EINVAL;
493 	}
494 
495 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
496 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
497 	 * mac and the remaing 4 chains can be used for the second mac or vice-versa.
498 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
499 	 * will be advertised for second mac or vice-versa. Compute the shift value
500 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
501 	 * mac80211.
502 	 */
503 	pdev_cap->tx_chain_mask_shift =
504 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
505 	pdev_cap->rx_chain_mask_shift =
506 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
507 
508 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
509 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
510 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
511 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
512 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
513 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
514 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
515 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
516 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
517 			cap_band->he_cap_phy_info[i] =
518 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
519 
520 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
521 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
522 
523 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
524 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
525 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
526 	}
527 
528 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
529 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
530 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
531 		cap_band->max_bw_supported =
532 			le32_to_cpu(mac_caps->max_bw_supported_5g);
533 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
534 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
535 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
536 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
537 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
538 			cap_band->he_cap_phy_info[i] =
539 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
540 
541 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
542 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
543 
544 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
545 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
546 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
547 
548 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
549 		cap_band->max_bw_supported =
550 			le32_to_cpu(mac_caps->max_bw_supported_5g);
551 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
552 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
553 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
554 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
555 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
556 			cap_band->he_cap_phy_info[i] =
557 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
558 
559 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
560 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
561 
562 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
563 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
564 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
565 	}
566 
567 	return 0;
568 }
569 
570 static int
571 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
572 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
573 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
574 				u8 phy_idx,
575 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
576 {
577 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
578 
579 	if (!reg_caps || !ext_caps)
580 		return -EINVAL;
581 
582 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
583 		return -EINVAL;
584 
585 	ext_reg_cap = &ext_caps[phy_idx];
586 
587 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
588 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
589 	param->eeprom_reg_domain_ext =
590 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
591 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
592 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
593 	/* check if param->wireless_mode is needed */
594 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
595 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
596 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
597 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
598 
599 	return 0;
600 }
601 
602 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
603 					 const void *evt_buf,
604 					 struct ath12k_wmi_target_cap_arg *cap)
605 {
606 	const struct wmi_service_ready_event *ev = evt_buf;
607 
608 	if (!ev) {
609 		ath12k_err(ab, "%s: failed by NULL param\n",
610 			   __func__);
611 		return -EINVAL;
612 	}
613 
614 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
615 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
616 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
617 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
618 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
619 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
620 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
621 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
622 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
623 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
624 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
625 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
626 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
627 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
628 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
629 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
630 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
631 
632 	return 0;
633 }
634 
635 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
636  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
637  * 4-byte word.
638  */
639 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
640 					   const u32 *wmi_svc_bm)
641 {
642 	int i, j;
643 
644 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
645 		do {
646 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
647 				set_bit(j, wmi->wmi_ab->svc_map);
648 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
649 	}
650 }
651 
652 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
653 				    const void *ptr, void *data)
654 {
655 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
656 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
657 	u16 expect_len;
658 
659 	switch (tag) {
660 	case WMI_TAG_SERVICE_READY_EVENT:
661 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
662 			return -EINVAL;
663 		break;
664 
665 	case WMI_TAG_ARRAY_UINT32:
666 		if (!svc_ready->wmi_svc_bitmap_done) {
667 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
668 			if (len < expect_len) {
669 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
670 					    len, tag);
671 				return -EINVAL;
672 			}
673 
674 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
675 
676 			svc_ready->wmi_svc_bitmap_done = true;
677 		}
678 		break;
679 	default:
680 		break;
681 	}
682 
683 	return 0;
684 }
685 
686 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
687 {
688 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
689 	int ret;
690 
691 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
692 				  ath12k_wmi_svc_rdy_parse,
693 				  &svc_ready);
694 	if (ret) {
695 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
696 		return ret;
697 	}
698 
699 	return 0;
700 }
701 
702 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len)
703 {
704 	struct sk_buff *skb;
705 	struct ath12k_base *ab = wmi_sc->ab;
706 	u32 round_len = roundup(len, 4);
707 
708 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
709 	if (!skb)
710 		return NULL;
711 
712 	skb_reserve(skb, WMI_SKB_HEADROOM);
713 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
714 		ath12k_warn(ab, "unaligned WMI skb data\n");
715 
716 	skb_put(skb, round_len);
717 	memset(skb->data, 0, round_len);
718 
719 	return skb;
720 }
721 
722 int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
723 			 struct sk_buff *frame)
724 {
725 	struct ath12k_wmi_pdev *wmi = ar->wmi;
726 	struct wmi_mgmt_send_cmd *cmd;
727 	struct wmi_tlv *frame_tlv;
728 	struct sk_buff *skb;
729 	u32 buf_len;
730 	int ret, len;
731 
732 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
733 
734 	len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
735 
736 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
737 	if (!skb)
738 		return -ENOMEM;
739 
740 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
741 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
742 						 sizeof(*cmd));
743 	cmd->vdev_id = cpu_to_le32(vdev_id);
744 	cmd->desc_id = cpu_to_le32(buf_id);
745 	cmd->chanfreq = 0;
746 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
747 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
748 	cmd->frame_len = cpu_to_le32(frame->len);
749 	cmd->buf_len = cpu_to_le32(buf_len);
750 	cmd->tx_params_valid = 0;
751 
752 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
753 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
754 
755 	memcpy(frame_tlv->value, frame->data, buf_len);
756 
757 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
758 	if (ret) {
759 		ath12k_warn(ar->ab,
760 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
761 		dev_kfree_skb(skb);
762 	}
763 
764 	return ret;
765 }
766 
767 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
768 			   struct ath12k_wmi_vdev_create_arg *args)
769 {
770 	struct ath12k_wmi_pdev *wmi = ar->wmi;
771 	struct wmi_vdev_create_cmd *cmd;
772 	struct sk_buff *skb;
773 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
774 	struct wmi_tlv *tlv;
775 	int ret, len;
776 	void *ptr;
777 
778 	/* It can be optimized my sending tx/rx chain configuration
779 	 * only for supported bands instead of always sending it for
780 	 * both the bands.
781 	 */
782 	len = sizeof(*cmd) + TLV_HDR_SIZE +
783 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
784 
785 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
786 	if (!skb)
787 		return -ENOMEM;
788 
789 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
790 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
791 						 sizeof(*cmd));
792 
793 	cmd->vdev_id = cpu_to_le32(args->if_id);
794 	cmd->vdev_type = cpu_to_le32(args->type);
795 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
796 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
797 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
798 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
799 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
800 
801 	ptr = skb->data + sizeof(*cmd);
802 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
803 
804 	tlv = ptr;
805 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
806 
807 	ptr += TLV_HDR_SIZE;
808 	txrx_streams = ptr;
809 	len = sizeof(*txrx_streams);
810 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
811 							  len);
812 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
813 	txrx_streams->supported_tx_streams =
814 				 args->chains[NL80211_BAND_2GHZ].tx;
815 	txrx_streams->supported_rx_streams =
816 				 args->chains[NL80211_BAND_2GHZ].rx;
817 
818 	txrx_streams++;
819 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
820 							  len);
821 	txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
822 	txrx_streams->supported_tx_streams =
823 				 args->chains[NL80211_BAND_5GHZ].tx;
824 	txrx_streams->supported_rx_streams =
825 				 args->chains[NL80211_BAND_5GHZ].rx;
826 
827 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
828 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
829 		   args->if_id, args->type, args->subtype,
830 		   macaddr, args->pdev_id);
831 
832 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
833 	if (ret) {
834 		ath12k_warn(ar->ab,
835 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
836 		dev_kfree_skb(skb);
837 	}
838 
839 	return ret;
840 }
841 
842 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
843 {
844 	struct ath12k_wmi_pdev *wmi = ar->wmi;
845 	struct wmi_vdev_delete_cmd *cmd;
846 	struct sk_buff *skb;
847 	int ret;
848 
849 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
850 	if (!skb)
851 		return -ENOMEM;
852 
853 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
854 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
855 						 sizeof(*cmd));
856 	cmd->vdev_id = cpu_to_le32(vdev_id);
857 
858 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
859 
860 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
861 	if (ret) {
862 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
863 		dev_kfree_skb(skb);
864 	}
865 
866 	return ret;
867 }
868 
869 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
870 {
871 	struct ath12k_wmi_pdev *wmi = ar->wmi;
872 	struct wmi_vdev_stop_cmd *cmd;
873 	struct sk_buff *skb;
874 	int ret;
875 
876 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
877 	if (!skb)
878 		return -ENOMEM;
879 
880 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
881 
882 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
883 						 sizeof(*cmd));
884 	cmd->vdev_id = cpu_to_le32(vdev_id);
885 
886 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
887 
888 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
889 	if (ret) {
890 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
891 		dev_kfree_skb(skb);
892 	}
893 
894 	return ret;
895 }
896 
897 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
898 {
899 	struct ath12k_wmi_pdev *wmi = ar->wmi;
900 	struct wmi_vdev_down_cmd *cmd;
901 	struct sk_buff *skb;
902 	int ret;
903 
904 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
905 	if (!skb)
906 		return -ENOMEM;
907 
908 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
909 
910 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
911 						 sizeof(*cmd));
912 	cmd->vdev_id = cpu_to_le32(vdev_id);
913 
914 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
915 
916 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
917 	if (ret) {
918 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
919 		dev_kfree_skb(skb);
920 	}
921 
922 	return ret;
923 }
924 
925 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
926 				       struct wmi_vdev_start_req_arg *arg)
927 {
928 	memset(chan, 0, sizeof(*chan));
929 
930 	chan->mhz = cpu_to_le32(arg->freq);
931 	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
932 	if (arg->mode == MODE_11AC_VHT80_80)
933 		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
934 	else
935 		chan->band_center_freq2 = 0;
936 
937 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
938 	if (arg->passive)
939 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
940 	if (arg->allow_ibss)
941 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
942 	if (arg->allow_ht)
943 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
944 	if (arg->allow_vht)
945 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
946 	if (arg->allow_he)
947 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
948 	if (arg->ht40plus)
949 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
950 	if (arg->chan_radar)
951 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
952 	if (arg->freq2_radar)
953 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
954 
955 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
956 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
957 		le32_encode_bits(arg->max_reg_power,
958 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
959 
960 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
961 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
962 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
963 }
964 
965 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
966 			  bool restart)
967 {
968 	struct ath12k_wmi_pdev *wmi = ar->wmi;
969 	struct wmi_vdev_start_request_cmd *cmd;
970 	struct sk_buff *skb;
971 	struct ath12k_wmi_channel_params *chan;
972 	struct wmi_tlv *tlv;
973 	void *ptr;
974 	int ret, len;
975 
976 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
977 		return -EINVAL;
978 
979 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
980 
981 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
982 	if (!skb)
983 		return -ENOMEM;
984 
985 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
986 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
987 						 sizeof(*cmd));
988 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
989 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
990 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
991 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
992 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
993 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
994 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
995 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
996 	cmd->regdomain = cpu_to_le32(arg->regdomain);
997 	cmd->he_ops = cpu_to_le32(arg->he_ops);
998 
999 	if (!restart) {
1000 		if (arg->ssid) {
1001 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1002 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1003 		}
1004 		if (arg->hidden_ssid)
1005 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1006 		if (arg->pmf_enabled)
1007 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1008 	}
1009 
1010 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1011 
1012 	ptr = skb->data + sizeof(*cmd);
1013 	chan = ptr;
1014 
1015 	ath12k_wmi_put_wmi_channel(chan, arg);
1016 
1017 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1018 						  sizeof(*chan));
1019 	ptr += sizeof(*chan);
1020 
1021 	tlv = ptr;
1022 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1023 
1024 	/* Note: This is a nested TLV containing:
1025 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1026 	 */
1027 
1028 	ptr += sizeof(*tlv);
1029 
1030 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1031 		   restart ? "restart" : "start", arg->vdev_id,
1032 		   arg->freq, arg->mode);
1033 
1034 	if (restart)
1035 		ret = ath12k_wmi_cmd_send(wmi, skb,
1036 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1037 	else
1038 		ret = ath12k_wmi_cmd_send(wmi, skb,
1039 					  WMI_VDEV_START_REQUEST_CMDID);
1040 	if (ret) {
1041 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1042 			    restart ? "restart" : "start");
1043 		dev_kfree_skb(skb);
1044 	}
1045 
1046 	return ret;
1047 }
1048 
1049 int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
1050 {
1051 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1052 	struct wmi_vdev_up_cmd *cmd;
1053 	struct sk_buff *skb;
1054 	int ret;
1055 
1056 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1057 	if (!skb)
1058 		return -ENOMEM;
1059 
1060 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1061 
1062 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1063 						 sizeof(*cmd));
1064 	cmd->vdev_id = cpu_to_le32(vdev_id);
1065 	cmd->vdev_assoc_id = cpu_to_le32(aid);
1066 
1067 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1068 
1069 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1070 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1071 		   vdev_id, aid, bssid);
1072 
1073 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1074 	if (ret) {
1075 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1076 		dev_kfree_skb(skb);
1077 	}
1078 
1079 	return ret;
1080 }
1081 
1082 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1083 				    struct ath12k_wmi_peer_create_arg *arg)
1084 {
1085 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1086 	struct wmi_peer_create_cmd *cmd;
1087 	struct sk_buff *skb;
1088 	int ret;
1089 
1090 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1091 	if (!skb)
1092 		return -ENOMEM;
1093 
1094 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1095 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1096 						 sizeof(*cmd));
1097 
1098 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1099 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1100 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1101 
1102 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1103 		   "WMI peer create vdev_id %d peer_addr %pM\n",
1104 		   arg->vdev_id, arg->peer_addr);
1105 
1106 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1107 	if (ret) {
1108 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1109 		dev_kfree_skb(skb);
1110 	}
1111 
1112 	return ret;
1113 }
1114 
1115 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1116 				    const u8 *peer_addr, u8 vdev_id)
1117 {
1118 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1119 	struct wmi_peer_delete_cmd *cmd;
1120 	struct sk_buff *skb;
1121 	int ret;
1122 
1123 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1124 	if (!skb)
1125 		return -ENOMEM;
1126 
1127 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1128 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1129 						 sizeof(*cmd));
1130 
1131 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1132 	cmd->vdev_id = cpu_to_le32(vdev_id);
1133 
1134 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1135 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1136 		   vdev_id,  peer_addr);
1137 
1138 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1139 	if (ret) {
1140 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1141 		dev_kfree_skb(skb);
1142 	}
1143 
1144 	return ret;
1145 }
1146 
1147 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1148 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1149 {
1150 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1151 	struct wmi_pdev_set_regdomain_cmd *cmd;
1152 	struct sk_buff *skb;
1153 	int ret;
1154 
1155 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1156 	if (!skb)
1157 		return -ENOMEM;
1158 
1159 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1160 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1161 						 sizeof(*cmd));
1162 
1163 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1164 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1165 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1166 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1167 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1168 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1169 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1170 
1171 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1172 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1173 		   arg->current_rd_in_use, arg->current_rd_2g,
1174 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1175 
1176 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1177 	if (ret) {
1178 		ath12k_warn(ar->ab,
1179 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1180 		dev_kfree_skb(skb);
1181 	}
1182 
1183 	return ret;
1184 }
1185 
1186 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1187 			      u32 vdev_id, u32 param_id, u32 param_val)
1188 {
1189 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1190 	struct wmi_peer_set_param_cmd *cmd;
1191 	struct sk_buff *skb;
1192 	int ret;
1193 
1194 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1195 	if (!skb)
1196 		return -ENOMEM;
1197 
1198 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1199 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1200 						 sizeof(*cmd));
1201 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1202 	cmd->vdev_id = cpu_to_le32(vdev_id);
1203 	cmd->param_id = cpu_to_le32(param_id);
1204 	cmd->param_value = cpu_to_le32(param_val);
1205 
1206 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1207 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1208 		   vdev_id, peer_addr, param_id, param_val);
1209 
1210 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1211 	if (ret) {
1212 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1213 		dev_kfree_skb(skb);
1214 	}
1215 
1216 	return ret;
1217 }
1218 
1219 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1220 					u8 peer_addr[ETH_ALEN],
1221 					u32 peer_tid_bitmap,
1222 					u8 vdev_id)
1223 {
1224 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1225 	struct wmi_peer_flush_tids_cmd *cmd;
1226 	struct sk_buff *skb;
1227 	int ret;
1228 
1229 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1230 	if (!skb)
1231 		return -ENOMEM;
1232 
1233 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1234 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1235 						 sizeof(*cmd));
1236 
1237 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1238 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1239 	cmd->vdev_id = cpu_to_le32(vdev_id);
1240 
1241 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1242 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1243 		   vdev_id, peer_addr, peer_tid_bitmap);
1244 
1245 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1246 	if (ret) {
1247 		ath12k_warn(ar->ab,
1248 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1249 		dev_kfree_skb(skb);
1250 	}
1251 
1252 	return ret;
1253 }
1254 
1255 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1256 					   int vdev_id, const u8 *addr,
1257 					   dma_addr_t paddr, u8 tid,
1258 					   u8 ba_window_size_valid,
1259 					   u32 ba_window_size)
1260 {
1261 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1262 	struct sk_buff *skb;
1263 	int ret;
1264 
1265 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1266 	if (!skb)
1267 		return -ENOMEM;
1268 
1269 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1270 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1271 						 sizeof(*cmd));
1272 
1273 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1274 	cmd->vdev_id = cpu_to_le32(vdev_id);
1275 	cmd->tid = cpu_to_le32(tid);
1276 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1277 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1278 	cmd->queue_no = cpu_to_le32(tid);
1279 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1280 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1281 
1282 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1283 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1284 		   addr, vdev_id, tid);
1285 
1286 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1287 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1288 	if (ret) {
1289 		ath12k_warn(ar->ab,
1290 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1291 		dev_kfree_skb(skb);
1292 	}
1293 
1294 	return ret;
1295 }
1296 
1297 int
1298 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1299 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1300 {
1301 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1302 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1303 	struct sk_buff *skb;
1304 	int ret;
1305 
1306 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1307 	if (!skb)
1308 		return -ENOMEM;
1309 
1310 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1311 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1312 						 sizeof(*cmd));
1313 
1314 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1315 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1316 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1317 
1318 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1319 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1320 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1321 
1322 	ret = ath12k_wmi_cmd_send(wmi, skb,
1323 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1324 	if (ret) {
1325 		ath12k_warn(ar->ab,
1326 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1327 		dev_kfree_skb(skb);
1328 	}
1329 
1330 	return ret;
1331 }
1332 
1333 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1334 			      u32 param_value, u8 pdev_id)
1335 {
1336 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1337 	struct wmi_pdev_set_param_cmd *cmd;
1338 	struct sk_buff *skb;
1339 	int ret;
1340 
1341 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1342 	if (!skb)
1343 		return -ENOMEM;
1344 
1345 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1346 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1347 						 sizeof(*cmd));
1348 	cmd->pdev_id = cpu_to_le32(pdev_id);
1349 	cmd->param_id = cpu_to_le32(param_id);
1350 	cmd->param_value = cpu_to_le32(param_value);
1351 
1352 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1353 		   "WMI pdev set param %d pdev id %d value %d\n",
1354 		   param_id, pdev_id, param_value);
1355 
1356 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1357 	if (ret) {
1358 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1359 		dev_kfree_skb(skb);
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1366 {
1367 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1368 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1369 	struct sk_buff *skb;
1370 	int ret;
1371 
1372 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1373 	if (!skb)
1374 		return -ENOMEM;
1375 
1376 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1377 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1378 						 sizeof(*cmd));
1379 	cmd->vdev_id = cpu_to_le32(vdev_id);
1380 	cmd->sta_ps_mode = cpu_to_le32(enable);
1381 
1382 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1383 		   "WMI vdev set psmode %d vdev id %d\n",
1384 		   enable, vdev_id);
1385 
1386 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1387 	if (ret) {
1388 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1389 		dev_kfree_skb(skb);
1390 	}
1391 
1392 	return ret;
1393 }
1394 
1395 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1396 			    u32 pdev_id)
1397 {
1398 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1399 	struct wmi_pdev_suspend_cmd *cmd;
1400 	struct sk_buff *skb;
1401 	int ret;
1402 
1403 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1404 	if (!skb)
1405 		return -ENOMEM;
1406 
1407 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1408 
1409 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1410 						 sizeof(*cmd));
1411 
1412 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1413 	cmd->pdev_id = cpu_to_le32(pdev_id);
1414 
1415 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1416 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1417 
1418 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1419 	if (ret) {
1420 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1421 		dev_kfree_skb(skb);
1422 	}
1423 
1424 	return ret;
1425 }
1426 
1427 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1428 {
1429 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1430 	struct wmi_pdev_resume_cmd *cmd;
1431 	struct sk_buff *skb;
1432 	int ret;
1433 
1434 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1435 	if (!skb)
1436 		return -ENOMEM;
1437 
1438 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1439 
1440 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1441 						 sizeof(*cmd));
1442 	cmd->pdev_id = cpu_to_le32(pdev_id);
1443 
1444 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1445 		   "WMI pdev resume pdev id %d\n", pdev_id);
1446 
1447 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1448 	if (ret) {
1449 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1450 		dev_kfree_skb(skb);
1451 	}
1452 
1453 	return ret;
1454 }
1455 
1456 /* TODO FW Support for the cmd is not available yet.
1457  * Can be tested once the command and corresponding
1458  * event is implemented in FW
1459  */
1460 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1461 					  enum wmi_bss_chan_info_req_type type)
1462 {
1463 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1464 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1465 	struct sk_buff *skb;
1466 	int ret;
1467 
1468 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1469 	if (!skb)
1470 		return -ENOMEM;
1471 
1472 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1473 
1474 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1475 						 sizeof(*cmd));
1476 	cmd->req_type = cpu_to_le32(type);
1477 
1478 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1479 		   "WMI bss chan info req type %d\n", type);
1480 
1481 	ret = ath12k_wmi_cmd_send(wmi, skb,
1482 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1483 	if (ret) {
1484 		ath12k_warn(ar->ab,
1485 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1486 		dev_kfree_skb(skb);
1487 	}
1488 
1489 	return ret;
1490 }
1491 
1492 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1493 					struct ath12k_wmi_ap_ps_arg *arg)
1494 {
1495 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1496 	struct wmi_ap_ps_peer_cmd *cmd;
1497 	struct sk_buff *skb;
1498 	int ret;
1499 
1500 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1501 	if (!skb)
1502 		return -ENOMEM;
1503 
1504 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1505 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1506 						 sizeof(*cmd));
1507 
1508 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1509 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1510 	cmd->param = cpu_to_le32(arg->param);
1511 	cmd->value = cpu_to_le32(arg->value);
1512 
1513 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1514 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1515 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1516 
1517 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1518 	if (ret) {
1519 		ath12k_warn(ar->ab,
1520 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1521 		dev_kfree_skb(skb);
1522 	}
1523 
1524 	return ret;
1525 }
1526 
1527 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1528 				u32 param, u32 param_value)
1529 {
1530 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1531 	struct wmi_sta_powersave_param_cmd *cmd;
1532 	struct sk_buff *skb;
1533 	int ret;
1534 
1535 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1536 	if (!skb)
1537 		return -ENOMEM;
1538 
1539 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1540 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1541 						 sizeof(*cmd));
1542 
1543 	cmd->vdev_id = cpu_to_le32(vdev_id);
1544 	cmd->param = cpu_to_le32(param);
1545 	cmd->value = cpu_to_le32(param_value);
1546 
1547 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1548 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1549 		   vdev_id, param, param_value);
1550 
1551 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1552 	if (ret) {
1553 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1554 		dev_kfree_skb(skb);
1555 	}
1556 
1557 	return ret;
1558 }
1559 
1560 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1561 {
1562 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1563 	struct wmi_force_fw_hang_cmd *cmd;
1564 	struct sk_buff *skb;
1565 	int ret, len;
1566 
1567 	len = sizeof(*cmd);
1568 
1569 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1570 	if (!skb)
1571 		return -ENOMEM;
1572 
1573 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1574 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1575 						 len);
1576 
1577 	cmd->type = cpu_to_le32(type);
1578 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1579 
1580 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1581 
1582 	if (ret) {
1583 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1584 		dev_kfree_skb(skb);
1585 	}
1586 	return ret;
1587 }
1588 
1589 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1590 				  u32 param_id, u32 param_value)
1591 {
1592 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1593 	struct wmi_vdev_set_param_cmd *cmd;
1594 	struct sk_buff *skb;
1595 	int ret;
1596 
1597 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1598 	if (!skb)
1599 		return -ENOMEM;
1600 
1601 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1602 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1603 						 sizeof(*cmd));
1604 
1605 	cmd->vdev_id = cpu_to_le32(vdev_id);
1606 	cmd->param_id = cpu_to_le32(param_id);
1607 	cmd->param_value = cpu_to_le32(param_value);
1608 
1609 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1610 		   "WMI vdev id 0x%x set param %d value %d\n",
1611 		   vdev_id, param_id, param_value);
1612 
1613 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1614 	if (ret) {
1615 		ath12k_warn(ar->ab,
1616 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1617 		dev_kfree_skb(skb);
1618 	}
1619 
1620 	return ret;
1621 }
1622 
1623 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1624 {
1625 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1626 	struct wmi_get_pdev_temperature_cmd *cmd;
1627 	struct sk_buff *skb;
1628 	int ret;
1629 
1630 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1631 	if (!skb)
1632 		return -ENOMEM;
1633 
1634 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1635 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1636 						 sizeof(*cmd));
1637 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1638 
1639 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1640 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1641 
1642 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1643 	if (ret) {
1644 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1645 		dev_kfree_skb(skb);
1646 	}
1647 
1648 	return ret;
1649 }
1650 
1651 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1652 					    u32 vdev_id, u32 bcn_ctrl_op)
1653 {
1654 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1655 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1656 	struct sk_buff *skb;
1657 	int ret;
1658 
1659 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1660 	if (!skb)
1661 		return -ENOMEM;
1662 
1663 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1664 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1665 						 sizeof(*cmd));
1666 
1667 	cmd->vdev_id = cpu_to_le32(vdev_id);
1668 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1669 
1670 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1671 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1672 		   vdev_id, bcn_ctrl_op);
1673 
1674 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1675 	if (ret) {
1676 		ath12k_warn(ar->ab,
1677 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1678 		dev_kfree_skb(skb);
1679 	}
1680 
1681 	return ret;
1682 }
1683 
1684 int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
1685 			struct ieee80211_mutable_offsets *offs,
1686 			struct sk_buff *bcn)
1687 {
1688 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1689 	struct wmi_bcn_tmpl_cmd *cmd;
1690 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
1691 	struct wmi_tlv *tlv;
1692 	struct sk_buff *skb;
1693 	void *ptr;
1694 	int ret, len;
1695 	size_t aligned_len = roundup(bcn->len, 4);
1696 
1697 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
1698 
1699 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1700 	if (!skb)
1701 		return -ENOMEM;
1702 
1703 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
1704 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
1705 						 sizeof(*cmd));
1706 	cmd->vdev_id = cpu_to_le32(vdev_id);
1707 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
1708 	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
1709 	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
1710 	cmd->buf_len = cpu_to_le32(bcn->len);
1711 
1712 	ptr = skb->data + sizeof(*cmd);
1713 
1714 	bcn_prb_info = ptr;
1715 	len = sizeof(*bcn_prb_info);
1716 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
1717 							  len);
1718 	bcn_prb_info->caps = 0;
1719 	bcn_prb_info->erp = 0;
1720 
1721 	ptr += sizeof(*bcn_prb_info);
1722 
1723 	tlv = ptr;
1724 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
1725 	memcpy(tlv->value, bcn->data, bcn->len);
1726 
1727 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
1728 	if (ret) {
1729 		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
1730 		dev_kfree_skb(skb);
1731 	}
1732 
1733 	return ret;
1734 }
1735 
1736 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
1737 				struct wmi_vdev_install_key_arg *arg)
1738 {
1739 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1740 	struct wmi_vdev_install_key_cmd *cmd;
1741 	struct wmi_tlv *tlv;
1742 	struct sk_buff *skb;
1743 	int ret, len, key_len_aligned;
1744 
1745 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
1746 	 * length is specifed in cmd->key_len.
1747 	 */
1748 	key_len_aligned = roundup(arg->key_len, 4);
1749 
1750 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
1751 
1752 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1753 	if (!skb)
1754 		return -ENOMEM;
1755 
1756 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
1757 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
1758 						 sizeof(*cmd));
1759 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1760 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1761 	cmd->key_idx = cpu_to_le32(arg->key_idx);
1762 	cmd->key_flags = cpu_to_le32(arg->key_flags);
1763 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
1764 	cmd->key_len = cpu_to_le32(arg->key_len);
1765 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
1766 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
1767 
1768 	if (arg->key_rsc_counter)
1769 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
1770 
1771 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
1772 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
1773 	memcpy(tlv->value, arg->key_data, arg->key_len);
1774 
1775 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1776 		   "WMI vdev install key idx %d cipher %d len %d\n",
1777 		   arg->key_idx, arg->key_cipher, arg->key_len);
1778 
1779 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
1780 	if (ret) {
1781 		ath12k_warn(ar->ab,
1782 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1783 		dev_kfree_skb(skb);
1784 	}
1785 
1786 	return ret;
1787 }
1788 
1789 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
1790 				       struct ath12k_wmi_peer_assoc_arg *arg,
1791 				       bool hw_crypto_disabled)
1792 {
1793 	cmd->peer_flags = 0;
1794 
1795 	if (arg->is_wme_set) {
1796 		if (arg->qos_flag)
1797 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
1798 		if (arg->apsd_flag)
1799 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
1800 		if (arg->ht_flag)
1801 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
1802 		if (arg->bw_40)
1803 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
1804 		if (arg->bw_80)
1805 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
1806 		if (arg->bw_160)
1807 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
1808 
1809 		/* Typically if STBC is enabled for VHT it should be enabled
1810 		 * for HT as well
1811 		 **/
1812 		if (arg->stbc_flag)
1813 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
1814 
1815 		/* Typically if LDPC is enabled for VHT it should be enabled
1816 		 * for HT as well
1817 		 **/
1818 		if (arg->ldpc_flag)
1819 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
1820 
1821 		if (arg->static_mimops_flag)
1822 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
1823 		if (arg->dynamic_mimops_flag)
1824 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
1825 		if (arg->spatial_mux_flag)
1826 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
1827 		if (arg->vht_flag)
1828 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
1829 		if (arg->he_flag)
1830 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
1831 		if (arg->twt_requester)
1832 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
1833 		if (arg->twt_responder)
1834 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
1835 	}
1836 
1837 	/* Suppress authorization for all AUTH modes that need 4-way handshake
1838 	 * (during re-association).
1839 	 * Authorization will be done for these modes on key installation.
1840 	 */
1841 	if (arg->auth_flag)
1842 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
1843 	if (arg->need_ptk_4_way) {
1844 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
1845 		if (!hw_crypto_disabled)
1846 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
1847 	}
1848 	if (arg->need_gtk_2_way)
1849 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
1850 	/* safe mode bypass the 4-way handshake */
1851 	if (arg->safe_mode_enabled)
1852 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
1853 						 WMI_PEER_NEED_GTK_2_WAY));
1854 
1855 	if (arg->is_pmf_enabled)
1856 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
1857 
1858 	/* Disable AMSDU for station transmit, if user configures it */
1859 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
1860 	 * it
1861 	 * if (arg->amsdu_disable) Add after FW support
1862 	 **/
1863 
1864 	/* Target asserts if node is marked HT and all MCS is set to 0.
1865 	 * Mark the node as non-HT if all the mcs rates are disabled through
1866 	 * iwpriv
1867 	 **/
1868 	if (arg->peer_ht_rates.num_rates == 0)
1869 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
1870 }
1871 
1872 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
1873 				   struct ath12k_wmi_peer_assoc_arg *arg)
1874 {
1875 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1876 	struct wmi_peer_assoc_complete_cmd *cmd;
1877 	struct ath12k_wmi_vht_rate_set_params *mcs;
1878 	struct ath12k_wmi_he_rate_set_params *he_mcs;
1879 	struct sk_buff *skb;
1880 	struct wmi_tlv *tlv;
1881 	void *ptr;
1882 	u32 peer_legacy_rates_align;
1883 	u32 peer_ht_rates_align;
1884 	int i, ret, len;
1885 
1886 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
1887 					  sizeof(u32));
1888 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
1889 				      sizeof(u32));
1890 
1891 	len = sizeof(*cmd) +
1892 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
1893 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
1894 	      sizeof(*mcs) + TLV_HDR_SIZE +
1895 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count);
1896 
1897 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1898 	if (!skb)
1899 		return -ENOMEM;
1900 
1901 	ptr = skb->data;
1902 
1903 	cmd = ptr;
1904 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
1905 						 sizeof(*cmd));
1906 
1907 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1908 
1909 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
1910 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
1911 
1912 	ath12k_wmi_copy_peer_flags(cmd, arg,
1913 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
1914 					    &ar->ab->dev_flags));
1915 
1916 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
1917 
1918 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
1919 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
1920 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
1921 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
1922 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
1923 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
1924 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
1925 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
1926 
1927 	/* Update 11ax capabilities */
1928 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
1929 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
1930 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
1931 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
1932 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
1933 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
1934 		cmd->peer_he_cap_phy[i] =
1935 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
1936 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
1937 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
1938 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
1939 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
1940 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
1941 
1942 	/* Update peer legacy rate information */
1943 	ptr += sizeof(*cmd);
1944 
1945 	tlv = ptr;
1946 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
1947 
1948 	ptr += TLV_HDR_SIZE;
1949 
1950 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
1951 	memcpy(ptr, arg->peer_legacy_rates.rates,
1952 	       arg->peer_legacy_rates.num_rates);
1953 
1954 	/* Update peer HT rate information */
1955 	ptr += peer_legacy_rates_align;
1956 
1957 	tlv = ptr;
1958 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
1959 	ptr += TLV_HDR_SIZE;
1960 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
1961 	memcpy(ptr, arg->peer_ht_rates.rates,
1962 	       arg->peer_ht_rates.num_rates);
1963 
1964 	/* VHT Rates */
1965 	ptr += peer_ht_rates_align;
1966 
1967 	mcs = ptr;
1968 
1969 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
1970 						 sizeof(*mcs));
1971 
1972 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
1973 
1974 	/* Update bandwidth-NSS mapping */
1975 	cmd->peer_bw_rxnss_override = 0;
1976 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
1977 
1978 	if (arg->vht_capable) {
1979 		mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
1980 		mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
1981 		mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
1982 		mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
1983 	}
1984 
1985 	/* HE Rates */
1986 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
1987 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
1988 
1989 	ptr += sizeof(*mcs);
1990 
1991 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
1992 
1993 	tlv = ptr;
1994 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
1995 	ptr += TLV_HDR_SIZE;
1996 
1997 	/* Loop through the HE rate set */
1998 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
1999 		he_mcs = ptr;
2000 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2001 							    sizeof(*he_mcs));
2002 
2003 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2004 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2005 		ptr += sizeof(*he_mcs);
2006 	}
2007 
2008 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2009 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
2010 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2011 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2012 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2013 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2014 		   cmd->peer_mpdu_density,
2015 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2016 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2017 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2018 		   cmd->peer_he_cap_phy[2],
2019 		   cmd->peer_bw_rxnss_override);
2020 
2021 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2022 	if (ret) {
2023 		ath12k_warn(ar->ab,
2024 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2025 		dev_kfree_skb(skb);
2026 	}
2027 
2028 	return ret;
2029 }
2030 
2031 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2032 				struct ath12k_wmi_scan_req_arg *arg)
2033 {
2034 	/* setup commonly used values */
2035 	arg->scan_req_id = 1;
2036 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2037 	arg->dwell_time_active = 50;
2038 	arg->dwell_time_active_2g = 0;
2039 	arg->dwell_time_passive = 150;
2040 	arg->dwell_time_active_6g = 40;
2041 	arg->dwell_time_passive_6g = 30;
2042 	arg->min_rest_time = 50;
2043 	arg->max_rest_time = 500;
2044 	arg->repeat_probe_time = 0;
2045 	arg->probe_spacing_time = 0;
2046 	arg->idle_time = 0;
2047 	arg->max_scan_time = 20000;
2048 	arg->probe_delay = 5;
2049 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2050 				  WMI_SCAN_EVENT_COMPLETED |
2051 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2052 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2053 				  WMI_SCAN_EVENT_DEQUEUED;
2054 	arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
2055 	arg->num_bssid = 1;
2056 
2057 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2058 	 * ZEROs in probe request
2059 	 */
2060 	eth_broadcast_addr(arg->bssid_list[0].addr);
2061 }
2062 
2063 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2064 						   struct ath12k_wmi_scan_req_arg *arg)
2065 {
2066 	/* Scan events subscription */
2067 	if (arg->scan_ev_started)
2068 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2069 	if (arg->scan_ev_completed)
2070 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2071 	if (arg->scan_ev_bss_chan)
2072 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2073 	if (arg->scan_ev_foreign_chan)
2074 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2075 	if (arg->scan_ev_dequeued)
2076 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2077 	if (arg->scan_ev_preempted)
2078 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2079 	if (arg->scan_ev_start_failed)
2080 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2081 	if (arg->scan_ev_restarted)
2082 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2083 	if (arg->scan_ev_foreign_chn_exit)
2084 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2085 	if (arg->scan_ev_suspended)
2086 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2087 	if (arg->scan_ev_resumed)
2088 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2089 
2090 	/** Set scan control flags */
2091 	cmd->scan_ctrl_flags = 0;
2092 	if (arg->scan_f_passive)
2093 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2094 	if (arg->scan_f_strict_passive_pch)
2095 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2096 	if (arg->scan_f_promisc_mode)
2097 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2098 	if (arg->scan_f_capture_phy_err)
2099 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2100 	if (arg->scan_f_half_rate)
2101 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2102 	if (arg->scan_f_quarter_rate)
2103 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2104 	if (arg->scan_f_cck_rates)
2105 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2106 	if (arg->scan_f_ofdm_rates)
2107 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2108 	if (arg->scan_f_chan_stat_evnt)
2109 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2110 	if (arg->scan_f_filter_prb_req)
2111 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2112 	if (arg->scan_f_bcast_probe)
2113 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2114 	if (arg->scan_f_offchan_mgmt_tx)
2115 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2116 	if (arg->scan_f_offchan_data_tx)
2117 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2118 	if (arg->scan_f_force_active_dfs_chn)
2119 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2120 	if (arg->scan_f_add_tpc_ie_in_probe)
2121 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2122 	if (arg->scan_f_add_ds_ie_in_probe)
2123 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2124 	if (arg->scan_f_add_spoofed_mac_in_probe)
2125 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2126 	if (arg->scan_f_add_rand_seq_in_probe)
2127 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2128 	if (arg->scan_f_en_ie_whitelist_in_probe)
2129 		cmd->scan_ctrl_flags |=
2130 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2131 
2132 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2133 						 WMI_SCAN_DWELL_MODE_MASK);
2134 }
2135 
2136 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2137 				   struct ath12k_wmi_scan_req_arg *arg)
2138 {
2139 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2140 	struct wmi_start_scan_cmd *cmd;
2141 	struct ath12k_wmi_ssid_params *ssid = NULL;
2142 	struct ath12k_wmi_mac_addr_params *bssid;
2143 	struct sk_buff *skb;
2144 	struct wmi_tlv *tlv;
2145 	void *ptr;
2146 	int i, ret, len;
2147 	u32 *tmp_ptr;
2148 	u8 extraie_len_with_pad = 0;
2149 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2150 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2151 
2152 	len = sizeof(*cmd);
2153 
2154 	len += TLV_HDR_SIZE;
2155 	if (arg->num_chan)
2156 		len += arg->num_chan * sizeof(u32);
2157 
2158 	len += TLV_HDR_SIZE;
2159 	if (arg->num_ssids)
2160 		len += arg->num_ssids * sizeof(*ssid);
2161 
2162 	len += TLV_HDR_SIZE;
2163 	if (arg->num_bssid)
2164 		len += sizeof(*bssid) * arg->num_bssid;
2165 
2166 	len += TLV_HDR_SIZE;
2167 	if (arg->extraie.len)
2168 		extraie_len_with_pad =
2169 			roundup(arg->extraie.len, sizeof(u32));
2170 	len += extraie_len_with_pad;
2171 
2172 	if (arg->num_hint_bssid)
2173 		len += TLV_HDR_SIZE +
2174 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2175 
2176 	if (arg->num_hint_s_ssid)
2177 		len += TLV_HDR_SIZE +
2178 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2179 
2180 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2181 	if (!skb)
2182 		return -ENOMEM;
2183 
2184 	ptr = skb->data;
2185 
2186 	cmd = ptr;
2187 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2188 						 sizeof(*cmd));
2189 
2190 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2191 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2192 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2193 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
2194 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2195 
2196 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2197 
2198 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2199 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2200 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2201 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2202 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2203 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2204 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2205 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2206 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2207 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2208 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2209 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2210 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2211 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2212 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2213 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2214 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2215 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2216 
2217 	ptr += sizeof(*cmd);
2218 
2219 	len = arg->num_chan * sizeof(u32);
2220 
2221 	tlv = ptr;
2222 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2223 	ptr += TLV_HDR_SIZE;
2224 	tmp_ptr = (u32 *)ptr;
2225 
2226 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2227 
2228 	ptr += len;
2229 
2230 	len = arg->num_ssids * sizeof(*ssid);
2231 	tlv = ptr;
2232 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2233 
2234 	ptr += TLV_HDR_SIZE;
2235 
2236 	if (arg->num_ssids) {
2237 		ssid = ptr;
2238 		for (i = 0; i < arg->num_ssids; ++i) {
2239 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2240 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2241 			       arg->ssid[i].ssid_len);
2242 			ssid++;
2243 		}
2244 	}
2245 
2246 	ptr += (arg->num_ssids * sizeof(*ssid));
2247 	len = arg->num_bssid * sizeof(*bssid);
2248 	tlv = ptr;
2249 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2250 
2251 	ptr += TLV_HDR_SIZE;
2252 	bssid = ptr;
2253 
2254 	if (arg->num_bssid) {
2255 		for (i = 0; i < arg->num_bssid; ++i) {
2256 			ether_addr_copy(bssid->addr,
2257 					arg->bssid_list[i].addr);
2258 			bssid++;
2259 		}
2260 	}
2261 
2262 	ptr += arg->num_bssid * sizeof(*bssid);
2263 
2264 	len = extraie_len_with_pad;
2265 	tlv = ptr;
2266 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2267 	ptr += TLV_HDR_SIZE;
2268 
2269 	if (arg->extraie.len)
2270 		memcpy(ptr, arg->extraie.ptr,
2271 		       arg->extraie.len);
2272 
2273 	ptr += extraie_len_with_pad;
2274 
2275 	if (arg->num_hint_s_ssid) {
2276 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2277 		tlv = ptr;
2278 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2279 		ptr += TLV_HDR_SIZE;
2280 		s_ssid = ptr;
2281 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2282 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2283 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2284 			s_ssid++;
2285 		}
2286 		ptr += len;
2287 	}
2288 
2289 	if (arg->num_hint_bssid) {
2290 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2291 		tlv = ptr;
2292 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2293 		ptr += TLV_HDR_SIZE;
2294 		hint_bssid = ptr;
2295 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2296 			hint_bssid->freq_flags =
2297 				arg->hint_bssid[i].freq_flags;
2298 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2299 					&hint_bssid->bssid.addr[0]);
2300 			hint_bssid++;
2301 		}
2302 	}
2303 
2304 	ret = ath12k_wmi_cmd_send(wmi, skb,
2305 				  WMI_START_SCAN_CMDID);
2306 	if (ret) {
2307 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2308 		dev_kfree_skb(skb);
2309 	}
2310 
2311 	return ret;
2312 }
2313 
2314 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2315 				  struct ath12k_wmi_scan_cancel_arg *arg)
2316 {
2317 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2318 	struct wmi_stop_scan_cmd *cmd;
2319 	struct sk_buff *skb;
2320 	int ret;
2321 
2322 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2323 	if (!skb)
2324 		return -ENOMEM;
2325 
2326 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2327 
2328 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2329 						 sizeof(*cmd));
2330 
2331 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2332 	cmd->requestor = cpu_to_le32(arg->requester);
2333 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2334 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2335 	/* stop the scan with the corresponding scan_id */
2336 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2337 		/* Cancelling all scans */
2338 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2339 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2340 		/* Cancelling VAP scans */
2341 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
2342 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
2343 		/* Cancelling specific scan */
2344 		cmd->req_type = WMI_SCAN_STOP_ONE;
2345 	} else {
2346 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
2347 			    arg->req_type);
2348 		dev_kfree_skb(skb);
2349 		return -EINVAL;
2350 	}
2351 
2352 	ret = ath12k_wmi_cmd_send(wmi, skb,
2353 				  WMI_STOP_SCAN_CMDID);
2354 	if (ret) {
2355 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
2356 		dev_kfree_skb(skb);
2357 	}
2358 
2359 	return ret;
2360 }
2361 
2362 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
2363 				       struct ath12k_wmi_scan_chan_list_arg *arg)
2364 {
2365 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2366 	struct wmi_scan_chan_list_cmd *cmd;
2367 	struct sk_buff *skb;
2368 	struct ath12k_wmi_channel_params *chan_info;
2369 	struct ath12k_wmi_channel_arg *channel_arg;
2370 	struct wmi_tlv *tlv;
2371 	void *ptr;
2372 	int i, ret, len;
2373 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
2374 	__le32 *reg1, *reg2;
2375 
2376 	channel_arg = &arg->channel[0];
2377 	while (arg->nallchans) {
2378 		len = sizeof(*cmd) + TLV_HDR_SIZE;
2379 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
2380 			sizeof(*chan_info);
2381 
2382 		num_send_chans = min(arg->nallchans, max_chan_limit);
2383 
2384 		arg->nallchans -= num_send_chans;
2385 		len += sizeof(*chan_info) * num_send_chans;
2386 
2387 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2388 		if (!skb)
2389 			return -ENOMEM;
2390 
2391 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
2392 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
2393 							 sizeof(*cmd));
2394 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2395 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
2396 		if (num_sends)
2397 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
2398 
2399 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2400 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2401 			   num_send_chans, len, cmd->pdev_id, num_sends);
2402 
2403 		ptr = skb->data + sizeof(*cmd);
2404 
2405 		len = sizeof(*chan_info) * num_send_chans;
2406 		tlv = ptr;
2407 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
2408 						     len);
2409 		ptr += TLV_HDR_SIZE;
2410 
2411 		for (i = 0; i < num_send_chans; ++i) {
2412 			chan_info = ptr;
2413 			memset(chan_info, 0, sizeof(*chan_info));
2414 			len = sizeof(*chan_info);
2415 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
2416 								       len);
2417 
2418 			reg1 = &chan_info->reg_info_1;
2419 			reg2 = &chan_info->reg_info_2;
2420 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
2421 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
2422 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
2423 
2424 			if (channel_arg->is_chan_passive)
2425 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
2426 			if (channel_arg->allow_he)
2427 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
2428 			else if (channel_arg->allow_vht)
2429 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
2430 			else if (channel_arg->allow_ht)
2431 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
2432 			if (channel_arg->half_rate)
2433 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
2434 			if (channel_arg->quarter_rate)
2435 				chan_info->info |=
2436 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
2437 
2438 			if (channel_arg->psc_channel)
2439 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
2440 
2441 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
2442 							    WMI_CHAN_INFO_MODE);
2443 			*reg1 |= le32_encode_bits(channel_arg->minpower,
2444 						  WMI_CHAN_REG_INFO1_MIN_PWR);
2445 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
2446 						  WMI_CHAN_REG_INFO1_MAX_PWR);
2447 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
2448 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
2449 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
2450 						  WMI_CHAN_REG_INFO1_REG_CLS);
2451 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
2452 						  WMI_CHAN_REG_INFO2_ANT_MAX);
2453 
2454 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2455 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2456 				   i, chan_info->mhz, chan_info->info);
2457 
2458 			ptr += sizeof(*chan_info);
2459 
2460 			channel_arg++;
2461 		}
2462 
2463 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
2464 		if (ret) {
2465 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2466 			dev_kfree_skb(skb);
2467 			return ret;
2468 		}
2469 
2470 		num_sends++;
2471 	}
2472 
2473 	return 0;
2474 }
2475 
2476 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
2477 				   struct wmi_wmm_params_all_arg *param)
2478 {
2479 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2480 	struct wmi_vdev_set_wmm_params_cmd *cmd;
2481 	struct wmi_wmm_params *wmm_param;
2482 	struct wmi_wmm_params_arg *wmi_wmm_arg;
2483 	struct sk_buff *skb;
2484 	int ret, ac;
2485 
2486 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2487 	if (!skb)
2488 		return -ENOMEM;
2489 
2490 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
2491 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2492 						 sizeof(*cmd));
2493 
2494 	cmd->vdev_id = cpu_to_le32(vdev_id);
2495 	cmd->wmm_param_type = 0;
2496 
2497 	for (ac = 0; ac < WME_NUM_AC; ac++) {
2498 		switch (ac) {
2499 		case WME_AC_BE:
2500 			wmi_wmm_arg = &param->ac_be;
2501 			break;
2502 		case WME_AC_BK:
2503 			wmi_wmm_arg = &param->ac_bk;
2504 			break;
2505 		case WME_AC_VI:
2506 			wmi_wmm_arg = &param->ac_vi;
2507 			break;
2508 		case WME_AC_VO:
2509 			wmi_wmm_arg = &param->ac_vo;
2510 			break;
2511 		}
2512 
2513 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
2514 		wmm_param->tlv_header =
2515 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
2516 					       sizeof(*wmm_param));
2517 
2518 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
2519 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
2520 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
2521 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
2522 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
2523 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
2524 
2525 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2526 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2527 			   ac, wmm_param->aifs, wmm_param->cwmin,
2528 			   wmm_param->cwmax, wmm_param->txoplimit,
2529 			   wmm_param->acm, wmm_param->no_ack);
2530 	}
2531 	ret = ath12k_wmi_cmd_send(wmi, skb,
2532 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
2533 	if (ret) {
2534 		ath12k_warn(ar->ab,
2535 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2536 		dev_kfree_skb(skb);
2537 	}
2538 
2539 	return ret;
2540 }
2541 
2542 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
2543 						  u32 pdev_id)
2544 {
2545 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2546 	struct wmi_dfs_phyerr_offload_cmd *cmd;
2547 	struct sk_buff *skb;
2548 	int ret;
2549 
2550 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2551 	if (!skb)
2552 		return -ENOMEM;
2553 
2554 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
2555 	cmd->tlv_header =
2556 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
2557 				       sizeof(*cmd));
2558 
2559 	cmd->pdev_id = cpu_to_le32(pdev_id);
2560 
2561 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2562 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
2563 
2564 	ret = ath12k_wmi_cmd_send(wmi, skb,
2565 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
2566 	if (ret) {
2567 		ath12k_warn(ar->ab,
2568 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2569 		dev_kfree_skb(skb);
2570 	}
2571 
2572 	return ret;
2573 }
2574 
2575 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2576 			  u32 tid, u32 initiator, u32 reason)
2577 {
2578 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2579 	struct wmi_delba_send_cmd *cmd;
2580 	struct sk_buff *skb;
2581 	int ret;
2582 
2583 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2584 	if (!skb)
2585 		return -ENOMEM;
2586 
2587 	cmd = (struct wmi_delba_send_cmd *)skb->data;
2588 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
2589 						 sizeof(*cmd));
2590 	cmd->vdev_id = cpu_to_le32(vdev_id);
2591 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2592 	cmd->tid = cpu_to_le32(tid);
2593 	cmd->initiator = cpu_to_le32(initiator);
2594 	cmd->reasoncode = cpu_to_le32(reason);
2595 
2596 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2597 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2598 		   vdev_id, mac, tid, initiator, reason);
2599 
2600 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
2601 
2602 	if (ret) {
2603 		ath12k_warn(ar->ab,
2604 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2605 		dev_kfree_skb(skb);
2606 	}
2607 
2608 	return ret;
2609 }
2610 
2611 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2612 			      u32 tid, u32 status)
2613 {
2614 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2615 	struct wmi_addba_setresponse_cmd *cmd;
2616 	struct sk_buff *skb;
2617 	int ret;
2618 
2619 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2620 	if (!skb)
2621 		return -ENOMEM;
2622 
2623 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
2624 	cmd->tlv_header =
2625 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
2626 				       sizeof(*cmd));
2627 	cmd->vdev_id = cpu_to_le32(vdev_id);
2628 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2629 	cmd->tid = cpu_to_le32(tid);
2630 	cmd->statuscode = cpu_to_le32(status);
2631 
2632 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2633 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2634 		   vdev_id, mac, tid, status);
2635 
2636 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
2637 
2638 	if (ret) {
2639 		ath12k_warn(ar->ab,
2640 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2641 		dev_kfree_skb(skb);
2642 	}
2643 
2644 	return ret;
2645 }
2646 
2647 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
2648 			  u32 tid, u32 buf_size)
2649 {
2650 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2651 	struct wmi_addba_send_cmd *cmd;
2652 	struct sk_buff *skb;
2653 	int ret;
2654 
2655 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2656 	if (!skb)
2657 		return -ENOMEM;
2658 
2659 	cmd = (struct wmi_addba_send_cmd *)skb->data;
2660 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
2661 						 sizeof(*cmd));
2662 	cmd->vdev_id = cpu_to_le32(vdev_id);
2663 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2664 	cmd->tid = cpu_to_le32(tid);
2665 	cmd->buffersize = cpu_to_le32(buf_size);
2666 
2667 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2668 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2669 		   vdev_id, mac, tid, buf_size);
2670 
2671 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
2672 
2673 	if (ret) {
2674 		ath12k_warn(ar->ab,
2675 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2676 		dev_kfree_skb(skb);
2677 	}
2678 
2679 	return ret;
2680 }
2681 
2682 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
2683 {
2684 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2685 	struct wmi_addba_clear_resp_cmd *cmd;
2686 	struct sk_buff *skb;
2687 	int ret;
2688 
2689 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2690 	if (!skb)
2691 		return -ENOMEM;
2692 
2693 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
2694 	cmd->tlv_header =
2695 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
2696 				       sizeof(*cmd));
2697 	cmd->vdev_id = cpu_to_le32(vdev_id);
2698 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2699 
2700 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2701 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2702 		   vdev_id, mac);
2703 
2704 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
2705 
2706 	if (ret) {
2707 		ath12k_warn(ar->ab,
2708 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2709 		dev_kfree_skb(skb);
2710 	}
2711 
2712 	return ret;
2713 }
2714 
2715 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
2716 				     struct ath12k_wmi_init_country_arg *arg)
2717 {
2718 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2719 	struct wmi_init_country_cmd *cmd;
2720 	struct sk_buff *skb;
2721 	int ret;
2722 
2723 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2724 	if (!skb)
2725 		return -ENOMEM;
2726 
2727 	cmd = (struct wmi_init_country_cmd *)skb->data;
2728 	cmd->tlv_header =
2729 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
2730 				       sizeof(*cmd));
2731 
2732 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
2733 
2734 	switch (arg->flags) {
2735 	case ALPHA_IS_SET:
2736 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
2737 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
2738 		break;
2739 	case CC_IS_SET:
2740 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
2741 		cmd->cc_info.country_code =
2742 			cpu_to_le32(arg->cc_info.country_code);
2743 		break;
2744 	case REGDMN_IS_SET:
2745 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
2746 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
2747 		break;
2748 	default:
2749 		ret = -EINVAL;
2750 		goto out;
2751 	}
2752 
2753 	ret = ath12k_wmi_cmd_send(wmi, skb,
2754 				  WMI_SET_INIT_COUNTRY_CMDID);
2755 
2756 out:
2757 	if (ret) {
2758 		ath12k_warn(ar->ab,
2759 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2760 			    ret);
2761 		dev_kfree_skb(skb);
2762 	}
2763 
2764 	return ret;
2765 }
2766 
2767 int
2768 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
2769 {
2770 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2771 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2772 	struct wmi_twt_enable_params_cmd *cmd;
2773 	struct sk_buff *skb;
2774 	int ret, len;
2775 
2776 	len = sizeof(*cmd);
2777 
2778 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2779 	if (!skb)
2780 		return -ENOMEM;
2781 
2782 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
2783 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
2784 						 len);
2785 	cmd->pdev_id = cpu_to_le32(pdev_id);
2786 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
2787 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
2788 	cmd->congestion_thresh_setup =
2789 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
2790 	cmd->congestion_thresh_teardown =
2791 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
2792 	cmd->congestion_thresh_critical =
2793 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
2794 	cmd->interference_thresh_teardown =
2795 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
2796 	cmd->interference_thresh_setup =
2797 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
2798 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
2799 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
2800 	cmd->no_of_bcast_mcast_slots =
2801 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
2802 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
2803 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
2804 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
2805 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
2806 	cmd->remove_sta_slot_interval =
2807 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
2808 	/* TODO add MBSSID support */
2809 	cmd->mbss_support = 0;
2810 
2811 	ret = ath12k_wmi_cmd_send(wmi, skb,
2812 				  WMI_TWT_ENABLE_CMDID);
2813 	if (ret) {
2814 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
2815 		dev_kfree_skb(skb);
2816 	}
2817 	return ret;
2818 }
2819 
2820 int
2821 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
2822 {
2823 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2824 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2825 	struct wmi_twt_disable_params_cmd *cmd;
2826 	struct sk_buff *skb;
2827 	int ret, len;
2828 
2829 	len = sizeof(*cmd);
2830 
2831 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2832 	if (!skb)
2833 		return -ENOMEM;
2834 
2835 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
2836 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
2837 						 len);
2838 	cmd->pdev_id = cpu_to_le32(pdev_id);
2839 
2840 	ret = ath12k_wmi_cmd_send(wmi, skb,
2841 				  WMI_TWT_DISABLE_CMDID);
2842 	if (ret) {
2843 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
2844 		dev_kfree_skb(skb);
2845 	}
2846 	return ret;
2847 }
2848 
2849 int
2850 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
2851 			     struct ieee80211_he_obss_pd *he_obss_pd)
2852 {
2853 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2854 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2855 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
2856 	struct sk_buff *skb;
2857 	int ret, len;
2858 
2859 	len = sizeof(*cmd);
2860 
2861 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2862 	if (!skb)
2863 		return -ENOMEM;
2864 
2865 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
2866 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
2867 						 len);
2868 	cmd->vdev_id = cpu_to_le32(vdev_id);
2869 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
2870 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
2871 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
2872 
2873 	ret = ath12k_wmi_cmd_send(wmi, skb,
2874 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
2875 	if (ret) {
2876 		ath12k_warn(ab,
2877 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2878 		dev_kfree_skb(skb);
2879 	}
2880 	return ret;
2881 }
2882 
2883 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
2884 				  u8 bss_color, u32 period,
2885 				  bool enable)
2886 {
2887 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2888 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2889 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
2890 	struct sk_buff *skb;
2891 	int ret, len;
2892 
2893 	len = sizeof(*cmd);
2894 
2895 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2896 	if (!skb)
2897 		return -ENOMEM;
2898 
2899 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
2900 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
2901 						 len);
2902 	cmd->vdev_id = cpu_to_le32(vdev_id);
2903 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
2904 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
2905 	cmd->current_bss_color = cpu_to_le32(bss_color);
2906 	cmd->detection_period_ms = cpu_to_le32(period);
2907 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
2908 	cmd->free_slot_expiry_time_ms = 0;
2909 	cmd->flags = 0;
2910 
2911 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2912 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
2913 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
2914 		   cmd->detection_period_ms, cmd->scan_period_ms);
2915 
2916 	ret = ath12k_wmi_cmd_send(wmi, skb,
2917 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
2918 	if (ret) {
2919 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
2920 		dev_kfree_skb(skb);
2921 	}
2922 	return ret;
2923 }
2924 
2925 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
2926 						bool enable)
2927 {
2928 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2929 	struct ath12k_base *ab = wmi->wmi_ab->ab;
2930 	struct wmi_bss_color_change_enable_params_cmd *cmd;
2931 	struct sk_buff *skb;
2932 	int ret, len;
2933 
2934 	len = sizeof(*cmd);
2935 
2936 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2937 	if (!skb)
2938 		return -ENOMEM;
2939 
2940 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
2941 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
2942 						 len);
2943 	cmd->vdev_id = cpu_to_le32(vdev_id);
2944 	cmd->enable = enable ? cpu_to_le32(1) : 0;
2945 
2946 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2947 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
2948 		   cmd->vdev_id, cmd->enable);
2949 
2950 	ret = ath12k_wmi_cmd_send(wmi, skb,
2951 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
2952 	if (ret) {
2953 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
2954 		dev_kfree_skb(skb);
2955 	}
2956 	return ret;
2957 }
2958 
2959 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
2960 				   struct sk_buff *tmpl)
2961 {
2962 	struct wmi_tlv *tlv;
2963 	struct sk_buff *skb;
2964 	void *ptr;
2965 	int ret, len;
2966 	size_t aligned_len;
2967 	struct wmi_fils_discovery_tmpl_cmd *cmd;
2968 
2969 	aligned_len = roundup(tmpl->len, 4);
2970 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
2971 
2972 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2973 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
2974 
2975 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
2976 	if (!skb)
2977 		return -ENOMEM;
2978 
2979 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
2980 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
2981 						 sizeof(*cmd));
2982 	cmd->vdev_id = cpu_to_le32(vdev_id);
2983 	cmd->buf_len = cpu_to_le32(tmpl->len);
2984 	ptr = skb->data + sizeof(*cmd);
2985 
2986 	tlv = ptr;
2987 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2988 	memcpy(tlv->value, tmpl->data, tmpl->len);
2989 
2990 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
2991 	if (ret) {
2992 		ath12k_warn(ar->ab,
2993 			    "WMI vdev %i failed to send FILS discovery template command\n",
2994 			    vdev_id);
2995 		dev_kfree_skb(skb);
2996 	}
2997 	return ret;
2998 }
2999 
3000 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3001 			       struct sk_buff *tmpl)
3002 {
3003 	struct wmi_probe_tmpl_cmd *cmd;
3004 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3005 	struct wmi_tlv *tlv;
3006 	struct sk_buff *skb;
3007 	void *ptr;
3008 	int ret, len;
3009 	size_t aligned_len = roundup(tmpl->len, 4);
3010 
3011 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3012 		   "WMI vdev %i set probe response template\n", vdev_id);
3013 
3014 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3015 
3016 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3017 	if (!skb)
3018 		return -ENOMEM;
3019 
3020 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3021 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3022 						 sizeof(*cmd));
3023 	cmd->vdev_id = cpu_to_le32(vdev_id);
3024 	cmd->buf_len = cpu_to_le32(tmpl->len);
3025 
3026 	ptr = skb->data + sizeof(*cmd);
3027 
3028 	probe_info = ptr;
3029 	len = sizeof(*probe_info);
3030 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3031 							len);
3032 	probe_info->caps = 0;
3033 	probe_info->erp = 0;
3034 
3035 	ptr += sizeof(*probe_info);
3036 
3037 	tlv = ptr;
3038 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3039 	memcpy(tlv->value, tmpl->data, tmpl->len);
3040 
3041 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3042 	if (ret) {
3043 		ath12k_warn(ar->ab,
3044 			    "WMI vdev %i failed to send probe response template command\n",
3045 			    vdev_id);
3046 		dev_kfree_skb(skb);
3047 	}
3048 	return ret;
3049 }
3050 
3051 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3052 			      bool unsol_bcast_probe_resp_enabled)
3053 {
3054 	struct sk_buff *skb;
3055 	int ret, len;
3056 	struct wmi_fils_discovery_cmd *cmd;
3057 
3058 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3059 		   "WMI vdev %i set %s interval to %u TU\n",
3060 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3061 		   "unsolicited broadcast probe response" : "FILS discovery",
3062 		   interval);
3063 
3064 	len = sizeof(*cmd);
3065 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3066 	if (!skb)
3067 		return -ENOMEM;
3068 
3069 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
3070 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
3071 						 len);
3072 	cmd->vdev_id = cpu_to_le32(vdev_id);
3073 	cmd->interval = cpu_to_le32(interval);
3074 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
3075 
3076 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
3077 	if (ret) {
3078 		ath12k_warn(ar->ab,
3079 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3080 			    vdev_id);
3081 		dev_kfree_skb(skb);
3082 	}
3083 	return ret;
3084 }
3085 
3086 static void
3087 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
3088 			      struct ath12k_wmi_pdev_band_arg *arg)
3089 {
3090 	u8 i;
3091 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
3092 	struct ath12k_pdev *pdev;
3093 
3094 	for (i = 0; i < soc->num_radios; i++) {
3095 		pdev = &soc->pdevs[i];
3096 		hal_reg_cap = &soc->hal_reg_cap[i];
3097 		arg[i].pdev_id = pdev->pdev_id;
3098 
3099 		switch (pdev->cap.supported_bands) {
3100 		case WMI_HOST_WLAN_2G_5G_CAP:
3101 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3102 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3103 			break;
3104 		case WMI_HOST_WLAN_2G_CAP:
3105 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
3106 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
3107 			break;
3108 		case WMI_HOST_WLAN_5G_CAP:
3109 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
3110 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
3111 			break;
3112 		default:
3113 			break;
3114 		}
3115 	}
3116 }
3117 
3118 static void
3119 ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
3120 				struct ath12k_wmi_resource_config_arg *tg_cfg)
3121 {
3122 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
3123 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
3124 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
3125 	wmi_cfg->num_offload_reorder_buffs =
3126 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
3127 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
3128 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
3129 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
3130 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
3131 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
3132 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
3133 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
3134 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
3135 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
3136 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
3137 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
3138 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
3139 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
3140 	wmi_cfg->roam_offload_max_ap_profiles =
3141 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
3142 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
3143 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
3144 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
3145 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
3146 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
3147 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
3148 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
3149 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
3150 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
3151 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
3152 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
3153 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
3154 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
3155 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
3156 	wmi_cfg->num_tdls_conn_table_entries =
3157 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
3158 	wmi_cfg->beacon_tx_offload_max_vdev =
3159 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
3160 	wmi_cfg->num_multicast_filter_entries =
3161 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
3162 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
3163 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
3164 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
3165 	wmi_cfg->max_tdls_concurrent_sleep_sta =
3166 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
3167 	wmi_cfg->max_tdls_concurrent_buffer_sta =
3168 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
3169 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
3170 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
3171 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
3172 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
3173 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
3174 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
3175 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
3176 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
3177 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
3178 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
3179 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
3180 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
3181 	wmi_cfg->host_service_flags =
3182 		cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
3183 }
3184 
3185 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
3186 				struct ath12k_wmi_init_cmd_arg *arg)
3187 {
3188 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3189 	struct sk_buff *skb;
3190 	struct wmi_init_cmd *cmd;
3191 	struct ath12k_wmi_resource_config_params *cfg;
3192 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
3193 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
3194 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
3195 	struct wmi_tlv *tlv;
3196 	size_t ret, len;
3197 	void *ptr;
3198 	u32 hw_mode_len = 0;
3199 	u16 idx;
3200 
3201 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
3202 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
3203 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
3204 
3205 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
3206 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
3207 
3208 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3209 	if (!skb)
3210 		return -ENOMEM;
3211 
3212 	cmd = (struct wmi_init_cmd *)skb->data;
3213 
3214 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
3215 						 sizeof(*cmd));
3216 
3217 	ptr = skb->data + sizeof(*cmd);
3218 	cfg = ptr;
3219 
3220 	ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
3221 
3222 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
3223 						 sizeof(*cfg));
3224 
3225 	ptr += sizeof(*cfg);
3226 	host_mem_chunks = ptr + TLV_HDR_SIZE;
3227 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
3228 
3229 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
3230 		host_mem_chunks[idx].tlv_header =
3231 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
3232 					   len);
3233 
3234 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
3235 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
3236 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
3237 
3238 		ath12k_dbg(ab, ATH12K_DBG_WMI,
3239 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3240 			   arg->mem_chunks[idx].req_id,
3241 			   (u64)arg->mem_chunks[idx].paddr,
3242 			   arg->mem_chunks[idx].len);
3243 	}
3244 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
3245 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
3246 
3247 	/* num_mem_chunks is zero */
3248 	tlv = ptr;
3249 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3250 	ptr += TLV_HDR_SIZE + len;
3251 
3252 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
3253 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
3254 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3255 							     sizeof(*hw_mode));
3256 
3257 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
3258 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
3259 
3260 		ptr += sizeof(*hw_mode);
3261 
3262 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
3263 		tlv = ptr;
3264 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
3265 
3266 		ptr += TLV_HDR_SIZE;
3267 		len = sizeof(*band_to_mac);
3268 
3269 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
3270 			band_to_mac = (void *)ptr;
3271 
3272 			band_to_mac->tlv_header =
3273 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
3274 						       len);
3275 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
3276 			band_to_mac->start_freq =
3277 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
3278 			band_to_mac->end_freq =
3279 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
3280 			ptr += sizeof(*band_to_mac);
3281 		}
3282 	}
3283 
3284 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
3285 	if (ret) {
3286 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
3287 		dev_kfree_skb(skb);
3288 	}
3289 
3290 	return ret;
3291 }
3292 
3293 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
3294 			    int pdev_id)
3295 {
3296 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
3297 	struct sk_buff *skb;
3298 	int ret;
3299 
3300 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3301 	if (!skb)
3302 		return -ENOMEM;
3303 
3304 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
3305 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
3306 						 sizeof(*cmd));
3307 
3308 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
3309 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
3310 
3311 	cmd->pdev_id = cpu_to_le32(pdev_id);
3312 
3313 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3314 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
3315 
3316 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
3317 	if (ret) {
3318 		ath12k_warn(ar->ab,
3319 			    "failed to send lro cfg req wmi cmd\n");
3320 		goto err;
3321 	}
3322 
3323 	return 0;
3324 err:
3325 	dev_kfree_skb(skb);
3326 	return ret;
3327 }
3328 
3329 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
3330 {
3331 	unsigned long time_left;
3332 
3333 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
3334 						WMI_SERVICE_READY_TIMEOUT_HZ);
3335 	if (!time_left)
3336 		return -ETIMEDOUT;
3337 
3338 	return 0;
3339 }
3340 
3341 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
3342 {
3343 	unsigned long time_left;
3344 
3345 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
3346 						WMI_SERVICE_READY_TIMEOUT_HZ);
3347 	if (!time_left)
3348 		return -ETIMEDOUT;
3349 
3350 	return 0;
3351 }
3352 
3353 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
3354 			   enum wmi_host_hw_mode_config_type mode)
3355 {
3356 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
3357 	struct sk_buff *skb;
3358 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3359 	int len;
3360 	int ret;
3361 
3362 	len = sizeof(*cmd);
3363 
3364 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3365 	if (!skb)
3366 		return -ENOMEM;
3367 
3368 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
3369 
3370 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
3371 						 sizeof(*cmd));
3372 
3373 	cmd->pdev_id = WMI_PDEV_ID_SOC;
3374 	cmd->hw_mode_index = cpu_to_le32(mode);
3375 
3376 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
3377 	if (ret) {
3378 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3379 		dev_kfree_skb(skb);
3380 	}
3381 
3382 	return ret;
3383 }
3384 
3385 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
3386 {
3387 	struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab;
3388 	struct ath12k_wmi_init_cmd_arg arg = {};
3389 
3390 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
3391 
3392 	arg.num_mem_chunks = wmi_sc->num_mem_chunks;
3393 	arg.hw_mode_id = wmi_sc->preferred_hw_mode;
3394 	arg.mem_chunks = wmi_sc->mem_chunks;
3395 
3396 	if (ab->hw_params->single_pdev_only)
3397 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
3398 
3399 	arg.num_band_to_mac = ab->num_radios;
3400 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
3401 
3402 	return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg);
3403 }
3404 
3405 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
3406 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
3407 {
3408 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
3409 	struct sk_buff *skb;
3410 	int ret;
3411 
3412 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3413 	if (!skb)
3414 		return -ENOMEM;
3415 
3416 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
3417 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
3418 						 sizeof(*cmd));
3419 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3420 	cmd->scan_count = cpu_to_le32(arg->scan_count);
3421 	cmd->scan_period = cpu_to_le32(arg->scan_period);
3422 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
3423 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
3424 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
3425 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
3426 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
3427 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
3428 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
3429 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
3430 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
3431 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
3432 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
3433 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
3434 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
3435 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
3436 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
3437 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
3438 
3439 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3440 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
3441 		   arg->vdev_id);
3442 
3443 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3444 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
3445 	if (ret) {
3446 		ath12k_warn(ar->ab,
3447 			    "failed to send spectral scan config wmi cmd\n");
3448 		goto err;
3449 	}
3450 
3451 	return 0;
3452 err:
3453 	dev_kfree_skb(skb);
3454 	return ret;
3455 }
3456 
3457 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
3458 				    u32 trigger, u32 enable)
3459 {
3460 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
3461 	struct sk_buff *skb;
3462 	int ret;
3463 
3464 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3465 	if (!skb)
3466 		return -ENOMEM;
3467 
3468 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
3469 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
3470 						 sizeof(*cmd));
3471 
3472 	cmd->vdev_id = cpu_to_le32(vdev_id);
3473 	cmd->trigger_cmd = cpu_to_le32(trigger);
3474 	cmd->enable_cmd = cpu_to_le32(enable);
3475 
3476 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3477 		   "WMI spectral enable cmd vdev id 0x%x\n",
3478 		   vdev_id);
3479 
3480 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3481 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
3482 	if (ret) {
3483 		ath12k_warn(ar->ab,
3484 			    "failed to send spectral enable wmi cmd\n");
3485 		goto err;
3486 	}
3487 
3488 	return 0;
3489 err:
3490 	dev_kfree_skb(skb);
3491 	return ret;
3492 }
3493 
3494 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
3495 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
3496 {
3497 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
3498 	struct sk_buff *skb;
3499 	int ret;
3500 
3501 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
3502 	if (!skb)
3503 		return -ENOMEM;
3504 
3505 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
3506 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
3507 						 sizeof(*cmd));
3508 
3509 	cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
3510 	cmd->module_id = cpu_to_le32(arg->module_id);
3511 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
3512 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
3513 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
3514 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
3515 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
3516 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
3517 	cmd->num_elems = cpu_to_le32(arg->num_elems);
3518 	cmd->buf_size = cpu_to_le32(arg->buf_size);
3519 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
3520 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
3521 
3522 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3523 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3524 		   arg->pdev_id);
3525 
3526 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
3527 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
3528 	if (ret) {
3529 		ath12k_warn(ar->ab,
3530 			    "failed to send dma ring cfg req wmi cmd\n");
3531 		goto err;
3532 	}
3533 
3534 	return 0;
3535 err:
3536 	dev_kfree_skb(skb);
3537 	return ret;
3538 }
3539 
3540 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
3541 					  u16 tag, u16 len,
3542 					  const void *ptr, void *data)
3543 {
3544 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3545 
3546 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
3547 		return -EPROTO;
3548 
3549 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
3550 		return -ENOBUFS;
3551 
3552 	arg->num_buf_entry++;
3553 	return 0;
3554 }
3555 
3556 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
3557 					 u16 tag, u16 len,
3558 					 const void *ptr, void *data)
3559 {
3560 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3561 
3562 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
3563 		return -EPROTO;
3564 
3565 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
3566 		return -ENOBUFS;
3567 
3568 	arg->num_meta++;
3569 
3570 	return 0;
3571 }
3572 
3573 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
3574 				    u16 tag, u16 len,
3575 				    const void *ptr, void *data)
3576 {
3577 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
3578 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
3579 	u32 pdev_id;
3580 	int ret;
3581 
3582 	switch (tag) {
3583 	case WMI_TAG_DMA_BUF_RELEASE:
3584 		fixed = ptr;
3585 		arg->fixed = *fixed;
3586 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
3587 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
3588 		break;
3589 	case WMI_TAG_ARRAY_STRUCT:
3590 		if (!arg->buf_entry_done) {
3591 			arg->num_buf_entry = 0;
3592 			arg->buf_entry = ptr;
3593 
3594 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3595 						  ath12k_wmi_dma_buf_entry_parse,
3596 						  arg);
3597 			if (ret) {
3598 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
3599 					    ret);
3600 				return ret;
3601 			}
3602 
3603 			arg->buf_entry_done = true;
3604 		} else if (!arg->meta_data_done) {
3605 			arg->num_meta = 0;
3606 			arg->meta_data = ptr;
3607 
3608 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3609 						  ath12k_wmi_dma_buf_meta_parse,
3610 						  arg);
3611 			if (ret) {
3612 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
3613 					    ret);
3614 				return ret;
3615 			}
3616 
3617 			arg->meta_data_done = true;
3618 		}
3619 		break;
3620 	default:
3621 		break;
3622 	}
3623 	return 0;
3624 }
3625 
3626 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
3627 						       struct sk_buff *skb)
3628 {
3629 	struct ath12k_wmi_dma_buf_release_arg arg = {};
3630 	struct ath12k_dbring_buf_release_event param;
3631 	int ret;
3632 
3633 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
3634 				  ath12k_wmi_dma_buf_parse,
3635 				  &arg);
3636 	if (ret) {
3637 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
3638 		return;
3639 	}
3640 
3641 	param.fixed = arg.fixed;
3642 	param.buf_entry = arg.buf_entry;
3643 	param.num_buf_entry = arg.num_buf_entry;
3644 	param.meta_data = arg.meta_data;
3645 	param.num_meta = arg.num_meta;
3646 
3647 	ret = ath12k_dbring_buffer_release_event(ab, &param);
3648 	if (ret) {
3649 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
3650 		return;
3651 	}
3652 }
3653 
3654 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
3655 					 u16 tag, u16 len,
3656 					 const void *ptr, void *data)
3657 {
3658 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3659 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
3660 	u32 phy_map = 0;
3661 
3662 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
3663 		return -EPROTO;
3664 
3665 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
3666 		return -ENOBUFS;
3667 
3668 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
3669 				   hw_mode_id);
3670 	svc_rdy_ext->n_hw_mode_caps++;
3671 
3672 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
3673 	svc_rdy_ext->tot_phy_id += fls(phy_map);
3674 
3675 	return 0;
3676 }
3677 
3678 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
3679 				   u16 len, const void *ptr, void *data)
3680 {
3681 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3682 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
3683 	enum wmi_host_hw_mode_config_type mode, pref;
3684 	u32 i;
3685 	int ret;
3686 
3687 	svc_rdy_ext->n_hw_mode_caps = 0;
3688 	svc_rdy_ext->hw_mode_caps = ptr;
3689 
3690 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3691 				  ath12k_wmi_hw_mode_caps_parse,
3692 				  svc_rdy_ext);
3693 	if (ret) {
3694 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3695 		return ret;
3696 	}
3697 
3698 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
3699 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
3700 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
3701 		pref = soc->wmi_ab.preferred_hw_mode;
3702 
3703 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
3704 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
3705 			soc->wmi_ab.preferred_hw_mode = mode;
3706 		}
3707 	}
3708 
3709 	ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
3710 		   soc->wmi_ab.preferred_hw_mode);
3711 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
3712 		return -EINVAL;
3713 
3714 	return 0;
3715 }
3716 
3717 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
3718 					 u16 tag, u16 len,
3719 					 const void *ptr, void *data)
3720 {
3721 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3722 
3723 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
3724 		return -EPROTO;
3725 
3726 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
3727 		return -ENOBUFS;
3728 
3729 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
3730 	if (!svc_rdy_ext->n_mac_phy_caps) {
3731 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
3732 						    GFP_ATOMIC);
3733 		if (!svc_rdy_ext->mac_phy_caps)
3734 			return -ENOMEM;
3735 	}
3736 
3737 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
3738 	svc_rdy_ext->n_mac_phy_caps++;
3739 	return 0;
3740 }
3741 
3742 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
3743 					     u16 tag, u16 len,
3744 					     const void *ptr, void *data)
3745 {
3746 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3747 
3748 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
3749 		return -EPROTO;
3750 
3751 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
3752 		return -ENOBUFS;
3753 
3754 	svc_rdy_ext->n_ext_hal_reg_caps++;
3755 	return 0;
3756 }
3757 
3758 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
3759 				       u16 len, const void *ptr, void *data)
3760 {
3761 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3762 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3763 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
3764 	int ret;
3765 	u32 i;
3766 
3767 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
3768 	svc_rdy_ext->ext_hal_reg_caps = ptr;
3769 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
3770 				  ath12k_wmi_ext_hal_reg_caps_parse,
3771 				  svc_rdy_ext);
3772 	if (ret) {
3773 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
3774 		return ret;
3775 	}
3776 
3777 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
3778 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
3779 						      svc_rdy_ext->soc_hal_reg_caps,
3780 						      svc_rdy_ext->ext_hal_reg_caps, i,
3781 						      &reg_cap);
3782 		if (ret) {
3783 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
3784 			return ret;
3785 		}
3786 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
3787 	}
3788 	return 0;
3789 }
3790 
3791 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
3792 						 u16 len, const void *ptr,
3793 						 void *data)
3794 {
3795 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
3796 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3797 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
3798 	u32 phy_id_map;
3799 	int pdev_index = 0;
3800 	int ret;
3801 
3802 	svc_rdy_ext->soc_hal_reg_caps = ptr;
3803 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
3804 
3805 	soc->num_radios = 0;
3806 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
3807 
3808 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
3809 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
3810 							    svc_rdy_ext,
3811 							    hw_mode_id, soc->num_radios,
3812 							    &soc->pdevs[pdev_index]);
3813 		if (ret) {
3814 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
3815 				    soc->num_radios);
3816 			return ret;
3817 		}
3818 
3819 		soc->num_radios++;
3820 
3821 		/* For single_pdev_only targets,
3822 		 * save mac_phy capability in the same pdev
3823 		 */
3824 		if (soc->hw_params->single_pdev_only)
3825 			pdev_index = 0;
3826 		else
3827 			pdev_index = soc->num_radios;
3828 
3829 		/* TODO: mac_phy_cap prints */
3830 		phy_id_map >>= 1;
3831 	}
3832 
3833 	if (soc->hw_params->single_pdev_only) {
3834 		soc->num_radios = 1;
3835 		soc->pdevs[0].pdev_id = 0;
3836 	}
3837 
3838 	return 0;
3839 }
3840 
3841 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
3842 					  u16 tag, u16 len,
3843 					  const void *ptr, void *data)
3844 {
3845 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
3846 
3847 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
3848 		return -EPROTO;
3849 
3850 	parse->n_dma_ring_caps++;
3851 	return 0;
3852 }
3853 
3854 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
3855 					u32 num_cap)
3856 {
3857 	size_t sz;
3858 	void *ptr;
3859 
3860 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
3861 	ptr = kzalloc(sz, GFP_ATOMIC);
3862 	if (!ptr)
3863 		return -ENOMEM;
3864 
3865 	ab->db_caps = ptr;
3866 	ab->num_db_cap = num_cap;
3867 
3868 	return 0;
3869 }
3870 
3871 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
3872 {
3873 	kfree(ab->db_caps);
3874 	ab->db_caps = NULL;
3875 }
3876 
3877 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
3878 				    u16 len, const void *ptr, void *data)
3879 {
3880 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
3881 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
3882 	struct ath12k_dbring_cap *dir_buff_caps;
3883 	int ret;
3884 	u32 i;
3885 
3886 	dma_caps_parse->n_dma_ring_caps = 0;
3887 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
3888 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3889 				  ath12k_wmi_dma_ring_caps_parse,
3890 				  dma_caps_parse);
3891 	if (ret) {
3892 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
3893 		return ret;
3894 	}
3895 
3896 	if (!dma_caps_parse->n_dma_ring_caps)
3897 		return 0;
3898 
3899 	if (ab->num_db_cap) {
3900 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
3901 		return 0;
3902 	}
3903 
3904 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
3905 	if (ret)
3906 		return ret;
3907 
3908 	dir_buff_caps = ab->db_caps;
3909 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
3910 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
3911 			ath12k_warn(ab, "Invalid module id %d\n",
3912 				    le32_to_cpu(dma_caps[i].module_id));
3913 			ret = -EINVAL;
3914 			goto free_dir_buff;
3915 		}
3916 
3917 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
3918 		dir_buff_caps[i].pdev_id =
3919 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
3920 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
3921 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
3922 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
3923 	}
3924 
3925 	return 0;
3926 
3927 free_dir_buff:
3928 	ath12k_wmi_free_dbring_caps(ab);
3929 	return ret;
3930 }
3931 
3932 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
3933 					u16 tag, u16 len,
3934 					const void *ptr, void *data)
3935 {
3936 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
3937 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
3938 	int ret;
3939 
3940 	switch (tag) {
3941 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
3942 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
3943 						&svc_rdy_ext->arg);
3944 		if (ret) {
3945 			ath12k_warn(ab, "unable to extract ext params\n");
3946 			return ret;
3947 		}
3948 		break;
3949 
3950 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
3951 		svc_rdy_ext->hw_caps = ptr;
3952 		svc_rdy_ext->arg.num_hw_modes =
3953 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
3954 		break;
3955 
3956 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
3957 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
3958 							    svc_rdy_ext);
3959 		if (ret)
3960 			return ret;
3961 		break;
3962 
3963 	case WMI_TAG_ARRAY_STRUCT:
3964 		if (!svc_rdy_ext->hw_mode_done) {
3965 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
3966 			if (ret)
3967 				return ret;
3968 
3969 			svc_rdy_ext->hw_mode_done = true;
3970 		} else if (!svc_rdy_ext->mac_phy_done) {
3971 			svc_rdy_ext->n_mac_phy_caps = 0;
3972 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
3973 						  ath12k_wmi_mac_phy_caps_parse,
3974 						  svc_rdy_ext);
3975 			if (ret) {
3976 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
3977 				return ret;
3978 			}
3979 
3980 			svc_rdy_ext->mac_phy_done = true;
3981 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
3982 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
3983 			if (ret)
3984 				return ret;
3985 
3986 			svc_rdy_ext->ext_hal_reg_done = true;
3987 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
3988 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
3989 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
3990 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
3991 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
3992 			svc_rdy_ext->oem_dma_ring_cap_done = true;
3993 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
3994 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
3995 						       &svc_rdy_ext->dma_caps_parse);
3996 			if (ret)
3997 				return ret;
3998 
3999 			svc_rdy_ext->dma_ring_cap_done = true;
4000 		}
4001 		break;
4002 
4003 	default:
4004 		break;
4005 	}
4006 	return 0;
4007 }
4008 
4009 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
4010 					  struct sk_buff *skb)
4011 {
4012 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
4013 	int ret;
4014 
4015 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4016 				  ath12k_wmi_svc_rdy_ext_parse,
4017 				  &svc_rdy_ext);
4018 	if (ret) {
4019 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
4020 		goto err;
4021 	}
4022 
4023 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
4024 		complete(&ab->wmi_ab.service_ready);
4025 
4026 	kfree(svc_rdy_ext.mac_phy_caps);
4027 	return 0;
4028 
4029 err:
4030 	ath12k_wmi_free_dbring_caps(ab);
4031 	return ret;
4032 }
4033 
4034 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
4035 					 u16 tag, u16 len,
4036 					 const void *ptr, void *data)
4037 {
4038 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
4039 	int ret;
4040 
4041 	switch (tag) {
4042 	case WMI_TAG_ARRAY_STRUCT:
4043 		if (!parse->dma_ring_cap_done) {
4044 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
4045 						       &parse->dma_caps_parse);
4046 			if (ret)
4047 				return ret;
4048 
4049 			parse->dma_ring_cap_done = true;
4050 		}
4051 		break;
4052 	default:
4053 		break;
4054 	}
4055 
4056 	return 0;
4057 }
4058 
4059 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
4060 					   struct sk_buff *skb)
4061 {
4062 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
4063 	int ret;
4064 
4065 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4066 				  ath12k_wmi_svc_rdy_ext2_parse,
4067 				  &svc_rdy_ext2);
4068 	if (ret) {
4069 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
4070 		goto err;
4071 	}
4072 
4073 	complete(&ab->wmi_ab.service_ready);
4074 
4075 	return 0;
4076 
4077 err:
4078 	ath12k_wmi_free_dbring_caps(ab);
4079 	return ret;
4080 }
4081 
4082 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4083 					   struct wmi_vdev_start_resp_event *vdev_rsp)
4084 {
4085 	const void **tb;
4086 	const struct wmi_vdev_start_resp_event *ev;
4087 	int ret;
4088 
4089 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4090 	if (IS_ERR(tb)) {
4091 		ret = PTR_ERR(tb);
4092 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4093 		return ret;
4094 	}
4095 
4096 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
4097 	if (!ev) {
4098 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
4099 		kfree(tb);
4100 		return -EPROTO;
4101 	}
4102 
4103 	*vdev_rsp = *ev;
4104 
4105 	kfree(tb);
4106 	return 0;
4107 }
4108 
4109 static struct ath12k_reg_rule
4110 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
4111 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
4112 {
4113 	struct ath12k_reg_rule *reg_rule_ptr;
4114 	u32 count;
4115 
4116 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
4117 			       GFP_ATOMIC);
4118 
4119 	if (!reg_rule_ptr)
4120 		return NULL;
4121 
4122 	for (count = 0; count < num_reg_rules; count++) {
4123 		reg_rule_ptr[count].start_freq =
4124 			le32_get_bits(wmi_reg_rule[count].freq_info,
4125 				      REG_RULE_START_FREQ);
4126 		reg_rule_ptr[count].end_freq =
4127 			le32_get_bits(wmi_reg_rule[count].freq_info,
4128 				      REG_RULE_END_FREQ);
4129 		reg_rule_ptr[count].max_bw =
4130 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4131 				      REG_RULE_MAX_BW);
4132 		reg_rule_ptr[count].reg_power =
4133 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4134 				      REG_RULE_REG_PWR);
4135 		reg_rule_ptr[count].ant_gain =
4136 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
4137 				      REG_RULE_ANT_GAIN);
4138 		reg_rule_ptr[count].flags =
4139 			le32_get_bits(wmi_reg_rule[count].flag_info,
4140 				      REG_RULE_FLAGS);
4141 		reg_rule_ptr[count].psd_flag =
4142 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4143 				      REG_RULE_PSD_INFO);
4144 		reg_rule_ptr[count].psd_eirp =
4145 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
4146 				      REG_RULE_PSD_EIRP);
4147 	}
4148 
4149 	return reg_rule_ptr;
4150 }
4151 
4152 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
4153 						   struct sk_buff *skb,
4154 						   struct ath12k_reg_info *reg_info)
4155 {
4156 	const void **tb;
4157 	const struct wmi_reg_chan_list_cc_ext_event *ev;
4158 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
4159 	u32 num_2g_reg_rules, num_5g_reg_rules;
4160 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
4161 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
4162 	u32 total_reg_rules = 0;
4163 	int ret, i, j;
4164 
4165 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
4166 
4167 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4168 	if (IS_ERR(tb)) {
4169 		ret = PTR_ERR(tb);
4170 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4171 		return ret;
4172 	}
4173 
4174 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
4175 	if (!ev) {
4176 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
4177 		kfree(tb);
4178 		return -EPROTO;
4179 	}
4180 
4181 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
4182 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
4183 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
4184 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
4185 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
4186 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
4187 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
4188 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
4189 
4190 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4191 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4192 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
4193 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4194 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
4195 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4196 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
4197 	}
4198 
4199 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
4200 	total_reg_rules += num_2g_reg_rules;
4201 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
4202 	total_reg_rules += num_5g_reg_rules;
4203 
4204 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
4205 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
4206 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
4207 		kfree(tb);
4208 		return -EINVAL;
4209 	}
4210 
4211 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4212 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
4213 
4214 		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
4215 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
4216 				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
4217 			kfree(tb);
4218 			return -EINVAL;
4219 		}
4220 
4221 		total_reg_rules += num_6g_reg_rules_ap[i];
4222 	}
4223 
4224 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4225 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
4226 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4227 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
4228 
4229 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
4230 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4231 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
4232 
4233 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
4234 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4235 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
4236 
4237 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
4238 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
4239 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
4240 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
4241 				    i);
4242 			kfree(tb);
4243 			return -EINVAL;
4244 		}
4245 	}
4246 
4247 	if (!total_reg_rules) {
4248 		ath12k_warn(ab, "No reg rules available\n");
4249 		kfree(tb);
4250 		return -EINVAL;
4251 	}
4252 
4253 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
4254 
4255 	/* FIXME: Currently FW includes 6G reg rule also in 5G rule
4256 	 * list for country US.
4257 	 * Having same 6G reg rule in 5G and 6G rules list causes
4258 	 * intersect check to be true, and same rules will be shown
4259 	 * multiple times in iw cmd. So added hack below to avoid
4260 	 * parsing 6G rule from 5G reg rule list, and this can be
4261 	 * removed later, after FW updates to remove 6G reg rule
4262 	 * from 5G rules list.
4263 	 */
4264 	if (memcmp(reg_info->alpha2, "US", 2) == 0) {
4265 		reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
4266 		num_5g_reg_rules = reg_info->num_5g_reg_rules;
4267 	}
4268 
4269 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
4270 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
4271 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
4272 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
4273 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
4274 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
4275 
4276 	switch (le32_to_cpu(ev->status_code)) {
4277 	case WMI_REG_SET_CC_STATUS_PASS:
4278 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
4279 		break;
4280 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
4281 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
4282 		break;
4283 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
4284 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
4285 		break;
4286 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
4287 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
4288 		break;
4289 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
4290 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
4291 		break;
4292 	case WMI_REG_SET_CC_STATUS_FAIL:
4293 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
4294 		break;
4295 	}
4296 
4297 	reg_info->is_ext_reg_event = true;
4298 
4299 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
4300 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
4301 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
4302 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
4303 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
4304 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
4305 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
4306 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
4307 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
4308 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
4309 
4310 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4311 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4312 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
4313 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
4314 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
4315 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4316 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
4317 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
4318 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
4319 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
4320 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
4321 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
4322 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
4323 	}
4324 
4325 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4326 		   "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
4327 		   __func__, reg_info->alpha2, reg_info->dfs_region,
4328 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
4329 		   reg_info->min_bw_5g, reg_info->max_bw_5g);
4330 
4331 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4332 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
4333 		   num_2g_reg_rules, num_5g_reg_rules);
4334 
4335 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4336 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
4337 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
4338 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
4339 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
4340 
4341 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4342 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4343 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
4344 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
4345 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
4346 
4347 	ath12k_dbg(ab, ATH12K_DBG_WMI,
4348 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
4349 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
4350 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
4351 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
4352 
4353 	ext_wmi_reg_rule =
4354 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
4355 			+ sizeof(*ev)
4356 			+ sizeof(struct wmi_tlv));
4357 
4358 	if (num_2g_reg_rules) {
4359 		reg_info->reg_rules_2g_ptr =
4360 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
4361 						      ext_wmi_reg_rule);
4362 
4363 		if (!reg_info->reg_rules_2g_ptr) {
4364 			kfree(tb);
4365 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
4366 			return -ENOMEM;
4367 		}
4368 	}
4369 
4370 	if (num_5g_reg_rules) {
4371 		ext_wmi_reg_rule += num_2g_reg_rules;
4372 		reg_info->reg_rules_5g_ptr =
4373 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
4374 						      ext_wmi_reg_rule);
4375 
4376 		if (!reg_info->reg_rules_5g_ptr) {
4377 			kfree(tb);
4378 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
4379 			return -ENOMEM;
4380 		}
4381 	}
4382 
4383 	ext_wmi_reg_rule += num_5g_reg_rules;
4384 
4385 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
4386 		reg_info->reg_rules_6g_ap_ptr[i] =
4387 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
4388 						      ext_wmi_reg_rule);
4389 
4390 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
4391 			kfree(tb);
4392 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
4393 			return -ENOMEM;
4394 		}
4395 
4396 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
4397 	}
4398 
4399 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
4400 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4401 			reg_info->reg_rules_6g_client_ptr[j][i] =
4402 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
4403 							      ext_wmi_reg_rule);
4404 
4405 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
4406 				kfree(tb);
4407 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
4408 				return -ENOMEM;
4409 			}
4410 
4411 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
4412 		}
4413 	}
4414 
4415 	reg_info->client_type = le32_to_cpu(ev->client_type);
4416 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
4417 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
4418 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
4419 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
4420 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
4421 		le32_to_cpu(ev->domain_code_6g_ap_sp);
4422 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
4423 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
4424 
4425 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
4426 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
4427 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
4428 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
4429 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
4430 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
4431 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
4432 	}
4433 
4434 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
4435 
4436 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
4437 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
4438 
4439 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
4440 
4441 	kfree(tb);
4442 	return 0;
4443 }
4444 
4445 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
4446 					struct wmi_peer_delete_resp_event *peer_del_resp)
4447 {
4448 	const void **tb;
4449 	const struct wmi_peer_delete_resp_event *ev;
4450 	int ret;
4451 
4452 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4453 	if (IS_ERR(tb)) {
4454 		ret = PTR_ERR(tb);
4455 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4456 		return ret;
4457 	}
4458 
4459 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
4460 	if (!ev) {
4461 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
4462 		kfree(tb);
4463 		return -EPROTO;
4464 	}
4465 
4466 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
4467 
4468 	peer_del_resp->vdev_id = ev->vdev_id;
4469 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
4470 			ev->peer_macaddr.addr);
4471 
4472 	kfree(tb);
4473 	return 0;
4474 }
4475 
4476 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
4477 					struct sk_buff *skb,
4478 					u32 *vdev_id)
4479 {
4480 	const void **tb;
4481 	const struct wmi_vdev_delete_resp_event *ev;
4482 	int ret;
4483 
4484 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4485 	if (IS_ERR(tb)) {
4486 		ret = PTR_ERR(tb);
4487 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4488 		return ret;
4489 	}
4490 
4491 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
4492 	if (!ev) {
4493 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
4494 		kfree(tb);
4495 		return -EPROTO;
4496 	}
4497 
4498 	*vdev_id = le32_to_cpu(ev->vdev_id);
4499 
4500 	kfree(tb);
4501 	return 0;
4502 }
4503 
4504 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
4505 					u32 len, u32 *vdev_id,
4506 					u32 *tx_status)
4507 {
4508 	const void **tb;
4509 	const struct wmi_bcn_tx_status_event *ev;
4510 	int ret;
4511 
4512 	tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
4513 	if (IS_ERR(tb)) {
4514 		ret = PTR_ERR(tb);
4515 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4516 		return ret;
4517 	}
4518 
4519 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
4520 	if (!ev) {
4521 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
4522 		kfree(tb);
4523 		return -EPROTO;
4524 	}
4525 
4526 	*vdev_id = le32_to_cpu(ev->vdev_id);
4527 	*tx_status = le32_to_cpu(ev->tx_status);
4528 
4529 	kfree(tb);
4530 	return 0;
4531 }
4532 
4533 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
4534 					      u32 *vdev_id)
4535 {
4536 	const void **tb;
4537 	const struct wmi_vdev_stopped_event *ev;
4538 	int ret;
4539 
4540 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4541 	if (IS_ERR(tb)) {
4542 		ret = PTR_ERR(tb);
4543 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4544 		return ret;
4545 	}
4546 
4547 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
4548 	if (!ev) {
4549 		ath12k_warn(ab, "failed to fetch vdev stop ev");
4550 		kfree(tb);
4551 		return -EPROTO;
4552 	}
4553 
4554 	*vdev_id = le32_to_cpu(ev->vdev_id);
4555 
4556 	kfree(tb);
4557 	return 0;
4558 }
4559 
4560 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
4561 					u16 tag, u16 len,
4562 					const void *ptr, void *data)
4563 {
4564 	struct wmi_tlv_mgmt_rx_parse *parse = data;
4565 
4566 	switch (tag) {
4567 	case WMI_TAG_MGMT_RX_HDR:
4568 		parse->fixed = ptr;
4569 		break;
4570 	case WMI_TAG_ARRAY_BYTE:
4571 		if (!parse->frame_buf_done) {
4572 			parse->frame_buf = ptr;
4573 			parse->frame_buf_done = true;
4574 		}
4575 		break;
4576 	}
4577 	return 0;
4578 }
4579 
4580 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
4581 					  struct sk_buff *skb,
4582 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
4583 {
4584 	struct wmi_tlv_mgmt_rx_parse parse = { };
4585 	const struct ath12k_wmi_mgmt_rx_params *ev;
4586 	const u8 *frame;
4587 	int i, ret;
4588 
4589 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4590 				  ath12k_wmi_tlv_mgmt_rx_parse,
4591 				  &parse);
4592 	if (ret) {
4593 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
4594 		return ret;
4595 	}
4596 
4597 	ev = parse.fixed;
4598 	frame = parse.frame_buf;
4599 
4600 	if (!ev || !frame) {
4601 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
4602 		return -EPROTO;
4603 	}
4604 
4605 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
4606 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
4607 	hdr->channel = le32_to_cpu(ev->channel);
4608 	hdr->snr = le32_to_cpu(ev->snr);
4609 	hdr->rate = le32_to_cpu(ev->rate);
4610 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
4611 	hdr->buf_len = le32_to_cpu(ev->buf_len);
4612 	hdr->status = le32_to_cpu(ev->status);
4613 	hdr->flags = le32_to_cpu(ev->flags);
4614 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
4615 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
4616 
4617 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
4618 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
4619 
4620 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
4621 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
4622 		return -EPROTO;
4623 	}
4624 
4625 	/* shift the sk_buff to point to `frame` */
4626 	skb_trim(skb, 0);
4627 	skb_put(skb, frame - skb->data);
4628 	skb_pull(skb, frame - skb->data);
4629 	skb_put(skb, hdr->buf_len);
4630 
4631 	return 0;
4632 }
4633 
4634 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
4635 				    u32 status)
4636 {
4637 	struct sk_buff *msdu;
4638 	struct ieee80211_tx_info *info;
4639 	struct ath12k_skb_cb *skb_cb;
4640 
4641 	spin_lock_bh(&ar->txmgmt_idr_lock);
4642 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
4643 
4644 	if (!msdu) {
4645 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
4646 			    desc_id);
4647 		spin_unlock_bh(&ar->txmgmt_idr_lock);
4648 		return -ENOENT;
4649 	}
4650 
4651 	idr_remove(&ar->txmgmt_idr, desc_id);
4652 	spin_unlock_bh(&ar->txmgmt_idr_lock);
4653 
4654 	skb_cb = ATH12K_SKB_CB(msdu);
4655 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
4656 
4657 	info = IEEE80211_SKB_CB(msdu);
4658 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
4659 		info->flags |= IEEE80211_TX_STAT_ACK;
4660 
4661 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
4662 
4663 	/* WARN when we received this event without doing any mgmt tx */
4664 	if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
4665 		WARN_ON_ONCE(1);
4666 
4667 	return 0;
4668 }
4669 
4670 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
4671 					       struct sk_buff *skb,
4672 					       struct wmi_mgmt_tx_compl_event *param)
4673 {
4674 	const void **tb;
4675 	const struct wmi_mgmt_tx_compl_event *ev;
4676 	int ret;
4677 
4678 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4679 	if (IS_ERR(tb)) {
4680 		ret = PTR_ERR(tb);
4681 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4682 		return ret;
4683 	}
4684 
4685 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
4686 	if (!ev) {
4687 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
4688 		kfree(tb);
4689 		return -EPROTO;
4690 	}
4691 
4692 	param->pdev_id = ev->pdev_id;
4693 	param->desc_id = ev->desc_id;
4694 	param->status = ev->status;
4695 
4696 	kfree(tb);
4697 	return 0;
4698 }
4699 
4700 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
4701 {
4702 	lockdep_assert_held(&ar->data_lock);
4703 
4704 	switch (ar->scan.state) {
4705 	case ATH12K_SCAN_IDLE:
4706 	case ATH12K_SCAN_RUNNING:
4707 	case ATH12K_SCAN_ABORTING:
4708 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
4709 			    ath12k_scan_state_str(ar->scan.state),
4710 			    ar->scan.state);
4711 		break;
4712 	case ATH12K_SCAN_STARTING:
4713 		ar->scan.state = ATH12K_SCAN_RUNNING;
4714 		complete(&ar->scan.started);
4715 		break;
4716 	}
4717 }
4718 
4719 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
4720 {
4721 	lockdep_assert_held(&ar->data_lock);
4722 
4723 	switch (ar->scan.state) {
4724 	case ATH12K_SCAN_IDLE:
4725 	case ATH12K_SCAN_RUNNING:
4726 	case ATH12K_SCAN_ABORTING:
4727 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
4728 			    ath12k_scan_state_str(ar->scan.state),
4729 			    ar->scan.state);
4730 		break;
4731 	case ATH12K_SCAN_STARTING:
4732 		complete(&ar->scan.started);
4733 		__ath12k_mac_scan_finish(ar);
4734 		break;
4735 	}
4736 }
4737 
4738 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
4739 {
4740 	lockdep_assert_held(&ar->data_lock);
4741 
4742 	switch (ar->scan.state) {
4743 	case ATH12K_SCAN_IDLE:
4744 	case ATH12K_SCAN_STARTING:
4745 		/* One suspected reason scan can be completed while starting is
4746 		 * if firmware fails to deliver all scan events to the host,
4747 		 * e.g. when transport pipe is full. This has been observed
4748 		 * with spectral scan phyerr events starving wmi transport
4749 		 * pipe. In such case the "scan completed" event should be (and
4750 		 * is) ignored by the host as it may be just firmware's scan
4751 		 * state machine recovering.
4752 		 */
4753 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
4754 			    ath12k_scan_state_str(ar->scan.state),
4755 			    ar->scan.state);
4756 		break;
4757 	case ATH12K_SCAN_RUNNING:
4758 	case ATH12K_SCAN_ABORTING:
4759 		__ath12k_mac_scan_finish(ar);
4760 		break;
4761 	}
4762 }
4763 
4764 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
4765 {
4766 	lockdep_assert_held(&ar->data_lock);
4767 
4768 	switch (ar->scan.state) {
4769 	case ATH12K_SCAN_IDLE:
4770 	case ATH12K_SCAN_STARTING:
4771 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
4772 			    ath12k_scan_state_str(ar->scan.state),
4773 			    ar->scan.state);
4774 		break;
4775 	case ATH12K_SCAN_RUNNING:
4776 	case ATH12K_SCAN_ABORTING:
4777 		ar->scan_channel = NULL;
4778 		break;
4779 	}
4780 }
4781 
4782 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
4783 {
4784 	lockdep_assert_held(&ar->data_lock);
4785 
4786 	switch (ar->scan.state) {
4787 	case ATH12K_SCAN_IDLE:
4788 	case ATH12K_SCAN_STARTING:
4789 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
4790 			    ath12k_scan_state_str(ar->scan.state),
4791 			    ar->scan.state);
4792 		break;
4793 	case ATH12K_SCAN_RUNNING:
4794 	case ATH12K_SCAN_ABORTING:
4795 		ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
4796 		break;
4797 	}
4798 }
4799 
4800 static const char *
4801 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
4802 			       enum wmi_scan_completion_reason reason)
4803 {
4804 	switch (type) {
4805 	case WMI_SCAN_EVENT_STARTED:
4806 		return "started";
4807 	case WMI_SCAN_EVENT_COMPLETED:
4808 		switch (reason) {
4809 		case WMI_SCAN_REASON_COMPLETED:
4810 			return "completed";
4811 		case WMI_SCAN_REASON_CANCELLED:
4812 			return "completed [cancelled]";
4813 		case WMI_SCAN_REASON_PREEMPTED:
4814 			return "completed [preempted]";
4815 		case WMI_SCAN_REASON_TIMEDOUT:
4816 			return "completed [timedout]";
4817 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
4818 			return "completed [internal err]";
4819 		case WMI_SCAN_REASON_MAX:
4820 			break;
4821 		}
4822 		return "completed [unknown]";
4823 	case WMI_SCAN_EVENT_BSS_CHANNEL:
4824 		return "bss channel";
4825 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
4826 		return "foreign channel";
4827 	case WMI_SCAN_EVENT_DEQUEUED:
4828 		return "dequeued";
4829 	case WMI_SCAN_EVENT_PREEMPTED:
4830 		return "preempted";
4831 	case WMI_SCAN_EVENT_START_FAILED:
4832 		return "start failed";
4833 	case WMI_SCAN_EVENT_RESTARTED:
4834 		return "restarted";
4835 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
4836 		return "foreign channel exit";
4837 	default:
4838 		return "unknown";
4839 	}
4840 }
4841 
4842 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
4843 			       struct wmi_scan_event *scan_evt_param)
4844 {
4845 	const void **tb;
4846 	const struct wmi_scan_event *ev;
4847 	int ret;
4848 
4849 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4850 	if (IS_ERR(tb)) {
4851 		ret = PTR_ERR(tb);
4852 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4853 		return ret;
4854 	}
4855 
4856 	ev = tb[WMI_TAG_SCAN_EVENT];
4857 	if (!ev) {
4858 		ath12k_warn(ab, "failed to fetch scan ev");
4859 		kfree(tb);
4860 		return -EPROTO;
4861 	}
4862 
4863 	scan_evt_param->event_type = ev->event_type;
4864 	scan_evt_param->reason = ev->reason;
4865 	scan_evt_param->channel_freq = ev->channel_freq;
4866 	scan_evt_param->scan_req_id = ev->scan_req_id;
4867 	scan_evt_param->scan_id = ev->scan_id;
4868 	scan_evt_param->vdev_id = ev->vdev_id;
4869 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
4870 
4871 	kfree(tb);
4872 	return 0;
4873 }
4874 
4875 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
4876 					   struct wmi_peer_sta_kickout_arg *arg)
4877 {
4878 	const void **tb;
4879 	const struct wmi_peer_sta_kickout_event *ev;
4880 	int ret;
4881 
4882 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4883 	if (IS_ERR(tb)) {
4884 		ret = PTR_ERR(tb);
4885 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4886 		return ret;
4887 	}
4888 
4889 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
4890 	if (!ev) {
4891 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
4892 		kfree(tb);
4893 		return -EPROTO;
4894 	}
4895 
4896 	arg->mac_addr = ev->peer_macaddr.addr;
4897 
4898 	kfree(tb);
4899 	return 0;
4900 }
4901 
4902 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
4903 			       struct wmi_roam_event *roam_ev)
4904 {
4905 	const void **tb;
4906 	const struct wmi_roam_event *ev;
4907 	int ret;
4908 
4909 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4910 	if (IS_ERR(tb)) {
4911 		ret = PTR_ERR(tb);
4912 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4913 		return ret;
4914 	}
4915 
4916 	ev = tb[WMI_TAG_ROAM_EVENT];
4917 	if (!ev) {
4918 		ath12k_warn(ab, "failed to fetch roam ev");
4919 		kfree(tb);
4920 		return -EPROTO;
4921 	}
4922 
4923 	roam_ev->vdev_id = ev->vdev_id;
4924 	roam_ev->reason = ev->reason;
4925 	roam_ev->rssi = ev->rssi;
4926 
4927 	kfree(tb);
4928 	return 0;
4929 }
4930 
4931 static int freq_to_idx(struct ath12k *ar, int freq)
4932 {
4933 	struct ieee80211_supported_band *sband;
4934 	int band, ch, idx = 0;
4935 
4936 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
4937 		sband = ar->hw->wiphy->bands[band];
4938 		if (!sband)
4939 			continue;
4940 
4941 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
4942 			if (sband->channels[ch].center_freq == freq)
4943 				goto exit;
4944 	}
4945 
4946 exit:
4947 	return idx;
4948 }
4949 
4950 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
4951 				    u32 len, struct wmi_chan_info_event *ch_info_ev)
4952 {
4953 	const void **tb;
4954 	const struct wmi_chan_info_event *ev;
4955 	int ret;
4956 
4957 	tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
4958 	if (IS_ERR(tb)) {
4959 		ret = PTR_ERR(tb);
4960 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
4961 		return ret;
4962 	}
4963 
4964 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
4965 	if (!ev) {
4966 		ath12k_warn(ab, "failed to fetch chan info ev");
4967 		kfree(tb);
4968 		return -EPROTO;
4969 	}
4970 
4971 	ch_info_ev->err_code = ev->err_code;
4972 	ch_info_ev->freq = ev->freq;
4973 	ch_info_ev->cmd_flags = ev->cmd_flags;
4974 	ch_info_ev->noise_floor = ev->noise_floor;
4975 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
4976 	ch_info_ev->cycle_count = ev->cycle_count;
4977 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
4978 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
4979 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
4980 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
4981 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
4982 	ch_info_ev->vdev_id = ev->vdev_id;
4983 
4984 	kfree(tb);
4985 	return 0;
4986 }
4987 
4988 static int
4989 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
4990 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
4991 {
4992 	const void **tb;
4993 	const struct wmi_pdev_bss_chan_info_event *ev;
4994 	int ret;
4995 
4996 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
4997 	if (IS_ERR(tb)) {
4998 		ret = PTR_ERR(tb);
4999 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5000 		return ret;
5001 	}
5002 
5003 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
5004 	if (!ev) {
5005 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
5006 		kfree(tb);
5007 		return -EPROTO;
5008 	}
5009 
5010 	bss_ch_info_ev->pdev_id = ev->pdev_id;
5011 	bss_ch_info_ev->freq = ev->freq;
5012 	bss_ch_info_ev->noise_floor = ev->noise_floor;
5013 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
5014 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
5015 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
5016 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
5017 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
5018 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
5019 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
5020 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
5021 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
5022 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
5023 
5024 	kfree(tb);
5025 	return 0;
5026 }
5027 
5028 static int
5029 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
5030 				      struct wmi_vdev_install_key_complete_arg *arg)
5031 {
5032 	const void **tb;
5033 	const struct wmi_vdev_install_key_compl_event *ev;
5034 	int ret;
5035 
5036 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5037 	if (IS_ERR(tb)) {
5038 		ret = PTR_ERR(tb);
5039 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5040 		return ret;
5041 	}
5042 
5043 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
5044 	if (!ev) {
5045 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
5046 		kfree(tb);
5047 		return -EPROTO;
5048 	}
5049 
5050 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
5051 	arg->macaddr = ev->peer_macaddr.addr;
5052 	arg->key_idx = le32_to_cpu(ev->key_idx);
5053 	arg->key_flags = le32_to_cpu(ev->key_flags);
5054 	arg->status = le32_to_cpu(ev->status);
5055 
5056 	kfree(tb);
5057 	return 0;
5058 }
5059 
5060 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
5061 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
5062 {
5063 	const void **tb;
5064 	const struct wmi_peer_assoc_conf_event *ev;
5065 	int ret;
5066 
5067 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5068 	if (IS_ERR(tb)) {
5069 		ret = PTR_ERR(tb);
5070 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5071 		return ret;
5072 	}
5073 
5074 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
5075 	if (!ev) {
5076 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
5077 		kfree(tb);
5078 		return -EPROTO;
5079 	}
5080 
5081 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
5082 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
5083 
5084 	kfree(tb);
5085 	return 0;
5086 }
5087 
5088 static int
5089 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
5090 			 u32 len, const struct wmi_pdev_temperature_event *ev)
5091 {
5092 	const void **tb;
5093 	int ret;
5094 
5095 	tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
5096 	if (IS_ERR(tb)) {
5097 		ret = PTR_ERR(tb);
5098 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5099 		return ret;
5100 	}
5101 
5102 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
5103 	if (!ev) {
5104 		ath12k_warn(ab, "failed to fetch pdev temp ev");
5105 		kfree(tb);
5106 		return -EPROTO;
5107 	}
5108 
5109 	kfree(tb);
5110 	return 0;
5111 }
5112 
5113 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
5114 {
5115 	/* try to send pending beacons first. they take priority */
5116 	wake_up(&ab->wmi_ab.tx_credits_wq);
5117 }
5118 
5119 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
5120 				       struct sk_buff *skb)
5121 {
5122 	dev_kfree_skb(skb);
5123 }
5124 
5125 static bool ath12k_reg_is_world_alpha(char *alpha)
5126 {
5127 	return alpha[0] == '0' && alpha[1] == '0';
5128 }
5129 
5130 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
5131 {
5132 	struct ath12k_reg_info *reg_info = NULL;
5133 	struct ieee80211_regdomain *regd = NULL;
5134 	bool intersect = false;
5135 	int ret = 0, pdev_idx, i, j;
5136 	struct ath12k *ar;
5137 
5138 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
5139 	if (!reg_info) {
5140 		ret = -ENOMEM;
5141 		goto fallback;
5142 	}
5143 
5144 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
5145 
5146 	if (ret) {
5147 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
5148 		goto fallback;
5149 	}
5150 
5151 	if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
5152 		/* In case of failure to set the requested ctry,
5153 		 * fw retains the current regd. We print a failure info
5154 		 * and return from here.
5155 		 */
5156 		ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
5157 		goto mem_free;
5158 	}
5159 
5160 	pdev_idx = reg_info->phy_id;
5161 
5162 	if (pdev_idx >= ab->num_radios) {
5163 		/* Process the event for phy0 only if single_pdev_only
5164 		 * is true. If pdev_idx is valid but not 0, discard the
5165 		 * event. Otherwise, it goes to fallback.
5166 		 */
5167 		if (ab->hw_params->single_pdev_only &&
5168 		    pdev_idx < ab->hw_params->num_rxmda_per_pdev)
5169 			goto mem_free;
5170 		else
5171 			goto fallback;
5172 	}
5173 
5174 	/* Avoid multiple overwrites to default regd, during core
5175 	 * stop-start after mac registration.
5176 	 */
5177 	if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
5178 	    !memcmp(ab->default_regd[pdev_idx]->alpha2,
5179 		    reg_info->alpha2, 2))
5180 		goto mem_free;
5181 
5182 	/* Intersect new rules with default regd if a new country setting was
5183 	 * requested, i.e a default regd was already set during initialization
5184 	 * and the regd coming from this event has a valid country info.
5185 	 */
5186 	if (ab->default_regd[pdev_idx] &&
5187 	    !ath12k_reg_is_world_alpha((char *)
5188 		ab->default_regd[pdev_idx]->alpha2) &&
5189 	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
5190 		intersect = true;
5191 
5192 	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
5193 	if (!regd) {
5194 		ath12k_warn(ab, "failed to build regd from reg_info\n");
5195 		goto fallback;
5196 	}
5197 
5198 	spin_lock(&ab->base_lock);
5199 	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
5200 		/* Once mac is registered, ar is valid and all CC events from
5201 		 * fw is considered to be received due to user requests
5202 		 * currently.
5203 		 * Free previously built regd before assigning the newly
5204 		 * generated regd to ar. NULL pointer handling will be
5205 		 * taken care by kfree itself.
5206 		 */
5207 		ar = ab->pdevs[pdev_idx].ar;
5208 		kfree(ab->new_regd[pdev_idx]);
5209 		ab->new_regd[pdev_idx] = regd;
5210 		ieee80211_queue_work(ar->hw, &ar->regd_update_work);
5211 	} else {
5212 		/* Multiple events for the same *ar is not expected. But we
5213 		 * can still clear any previously stored default_regd if we
5214 		 * are receiving this event for the same radio by mistake.
5215 		 * NULL pointer handling will be taken care by kfree itself.
5216 		 */
5217 		kfree(ab->default_regd[pdev_idx]);
5218 		/* This regd would be applied during mac registration */
5219 		ab->default_regd[pdev_idx] = regd;
5220 	}
5221 	ab->dfs_region = reg_info->dfs_region;
5222 	spin_unlock(&ab->base_lock);
5223 
5224 	goto mem_free;
5225 
5226 fallback:
5227 	/* Fallback to older reg (by sending previous country setting
5228 	 * again if fw has succeeded and we failed to process here.
5229 	 * The Regdomain should be uniform across driver and fw. Since the
5230 	 * FW has processed the command and sent a success status, we expect
5231 	 * this function to succeed as well. If it doesn't, CTRY needs to be
5232 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5233 	 */
5234 	/* TODO: This is rare, but still should also be handled */
5235 	WARN_ON(1);
5236 mem_free:
5237 	if (reg_info) {
5238 		kfree(reg_info->reg_rules_2g_ptr);
5239 		kfree(reg_info->reg_rules_5g_ptr);
5240 		if (reg_info->is_ext_reg_event) {
5241 			for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
5242 				kfree(reg_info->reg_rules_6g_ap_ptr[i]);
5243 
5244 			for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
5245 				for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
5246 					kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
5247 		}
5248 		kfree(reg_info);
5249 	}
5250 	return ret;
5251 }
5252 
5253 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
5254 				const void *ptr, void *data)
5255 {
5256 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
5257 	struct wmi_ready_event fixed_param;
5258 	struct ath12k_wmi_mac_addr_params *addr_list;
5259 	struct ath12k_pdev *pdev;
5260 	u32 num_mac_addr;
5261 	int i;
5262 
5263 	switch (tag) {
5264 	case WMI_TAG_READY_EVENT:
5265 		memset(&fixed_param, 0, sizeof(fixed_param));
5266 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
5267 		       min_t(u16, sizeof(fixed_param), len));
5268 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
5269 		rdy_parse->num_extra_mac_addr =
5270 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
5271 
5272 		ether_addr_copy(ab->mac_addr,
5273 				fixed_param.ready_event_min.mac_addr.addr);
5274 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
5275 		ab->wmi_ready = true;
5276 		break;
5277 	case WMI_TAG_ARRAY_FIXED_STRUCT:
5278 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
5279 		num_mac_addr = rdy_parse->num_extra_mac_addr;
5280 
5281 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
5282 			break;
5283 
5284 		for (i = 0; i < ab->num_radios; i++) {
5285 			pdev = &ab->pdevs[i];
5286 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
5287 		}
5288 		ab->pdevs_macaddr_valid = true;
5289 		break;
5290 	default:
5291 		break;
5292 	}
5293 
5294 	return 0;
5295 }
5296 
5297 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
5298 {
5299 	struct ath12k_wmi_rdy_parse rdy_parse = { };
5300 	int ret;
5301 
5302 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5303 				  ath12k_wmi_rdy_parse, &rdy_parse);
5304 	if (ret) {
5305 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5306 		return ret;
5307 	}
5308 
5309 	complete(&ab->wmi_ab.unified_ready);
5310 	return 0;
5311 }
5312 
5313 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5314 {
5315 	struct wmi_peer_delete_resp_event peer_del_resp;
5316 	struct ath12k *ar;
5317 
5318 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
5319 		ath12k_warn(ab, "failed to extract peer delete resp");
5320 		return;
5321 	}
5322 
5323 	rcu_read_lock();
5324 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
5325 	if (!ar) {
5326 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
5327 			    peer_del_resp.vdev_id);
5328 		rcu_read_unlock();
5329 		return;
5330 	}
5331 
5332 	complete(&ar->peer_delete_done);
5333 	rcu_read_unlock();
5334 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
5335 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
5336 }
5337 
5338 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
5339 					  struct sk_buff *skb)
5340 {
5341 	struct ath12k *ar;
5342 	u32 vdev_id = 0;
5343 
5344 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
5345 		ath12k_warn(ab, "failed to extract vdev delete resp");
5346 		return;
5347 	}
5348 
5349 	rcu_read_lock();
5350 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5351 	if (!ar) {
5352 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
5353 			    vdev_id);
5354 		rcu_read_unlock();
5355 		return;
5356 	}
5357 
5358 	complete(&ar->vdev_delete_done);
5359 
5360 	rcu_read_unlock();
5361 
5362 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
5363 		   vdev_id);
5364 }
5365 
5366 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
5367 {
5368 	switch (vdev_resp_status) {
5369 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
5370 		return "invalid vdev id";
5371 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
5372 		return "not supported";
5373 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
5374 		return "dfs violation";
5375 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
5376 		return "invalid regdomain";
5377 	default:
5378 		return "unknown";
5379 	}
5380 }
5381 
5382 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
5383 {
5384 	struct wmi_vdev_start_resp_event vdev_start_resp;
5385 	struct ath12k *ar;
5386 	u32 status;
5387 
5388 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
5389 		ath12k_warn(ab, "failed to extract vdev start resp");
5390 		return;
5391 	}
5392 
5393 	rcu_read_lock();
5394 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
5395 	if (!ar) {
5396 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
5397 			    vdev_start_resp.vdev_id);
5398 		rcu_read_unlock();
5399 		return;
5400 	}
5401 
5402 	ar->last_wmi_vdev_start_status = 0;
5403 
5404 	status = le32_to_cpu(vdev_start_resp.status);
5405 
5406 	if (WARN_ON_ONCE(status)) {
5407 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
5408 			    status, ath12k_wmi_vdev_resp_print(status));
5409 		ar->last_wmi_vdev_start_status = status;
5410 	}
5411 
5412 	complete(&ar->vdev_setup_done);
5413 
5414 	rcu_read_unlock();
5415 
5416 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
5417 		   vdev_start_resp.vdev_id);
5418 }
5419 
5420 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
5421 {
5422 	u32 vdev_id, tx_status;
5423 
5424 	if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
5425 					 &vdev_id, &tx_status) != 0) {
5426 		ath12k_warn(ab, "failed to extract bcn tx status");
5427 		return;
5428 	}
5429 }
5430 
5431 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
5432 {
5433 	struct ath12k *ar;
5434 	u32 vdev_id = 0;
5435 
5436 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
5437 		ath12k_warn(ab, "failed to extract vdev stopped event");
5438 		return;
5439 	}
5440 
5441 	rcu_read_lock();
5442 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
5443 	if (!ar) {
5444 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
5445 			    vdev_id);
5446 		rcu_read_unlock();
5447 		return;
5448 	}
5449 
5450 	complete(&ar->vdev_setup_done);
5451 
5452 	rcu_read_unlock();
5453 
5454 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
5455 }
5456 
5457 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
5458 {
5459 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
5460 	struct ath12k *ar;
5461 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5462 	struct ieee80211_hdr *hdr;
5463 	u16 fc;
5464 	struct ieee80211_supported_band *sband;
5465 
5466 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
5467 		ath12k_warn(ab, "failed to extract mgmt rx event");
5468 		dev_kfree_skb(skb);
5469 		return;
5470 	}
5471 
5472 	memset(status, 0, sizeof(*status));
5473 
5474 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
5475 		   rx_ev.status);
5476 
5477 	rcu_read_lock();
5478 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
5479 
5480 	if (!ar) {
5481 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
5482 			    rx_ev.pdev_id);
5483 		dev_kfree_skb(skb);
5484 		goto exit;
5485 	}
5486 
5487 	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
5488 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
5489 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
5490 			     WMI_RX_STATUS_ERR_CRC))) {
5491 		dev_kfree_skb(skb);
5492 		goto exit;
5493 	}
5494 
5495 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
5496 		status->flag |= RX_FLAG_MMIC_ERROR;
5497 
5498 	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
5499 		status->band = NL80211_BAND_6GHZ;
5500 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
5501 		status->band = NL80211_BAND_2GHZ;
5502 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
5503 		status->band = NL80211_BAND_5GHZ;
5504 	} else {
5505 		/* Shouldn't happen unless list of advertised channels to
5506 		 * mac80211 has been changed.
5507 		 */
5508 		WARN_ON_ONCE(1);
5509 		dev_kfree_skb(skb);
5510 		goto exit;
5511 	}
5512 
5513 	if (rx_ev.phy_mode == MODE_11B &&
5514 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
5515 		ath12k_dbg(ab, ATH12K_DBG_WMI,
5516 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
5517 
5518 	sband = &ar->mac.sbands[status->band];
5519 
5520 	status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
5521 						      status->band);
5522 	status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
5523 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
5524 
5525 	hdr = (struct ieee80211_hdr *)skb->data;
5526 	fc = le16_to_cpu(hdr->frame_control);
5527 
5528 	/* Firmware is guaranteed to report all essential management frames via
5529 	 * WMI while it can deliver some extra via HTT. Since there can be
5530 	 * duplicates split the reporting wrt monitor/sniffing.
5531 	 */
5532 	status->flag |= RX_FLAG_SKIP_MONITOR;
5533 
5534 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
5535 	 * including group privacy action frames.
5536 	 */
5537 	if (ieee80211_has_protected(hdr->frame_control)) {
5538 		status->flag |= RX_FLAG_DECRYPTED;
5539 
5540 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
5541 			status->flag |= RX_FLAG_IV_STRIPPED |
5542 					RX_FLAG_MMIC_STRIPPED;
5543 			hdr->frame_control = __cpu_to_le16(fc &
5544 					     ~IEEE80211_FCTL_PROTECTED);
5545 		}
5546 	}
5547 
5548 	/* TODO: Pending handle beacon implementation
5549 	 *if (ieee80211_is_beacon(hdr->frame_control))
5550 	 *	ath12k_mac_handle_beacon(ar, skb);
5551 	 */
5552 
5553 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5554 		   "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5555 		   skb, skb->len,
5556 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
5557 
5558 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5559 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5560 		   status->freq, status->band, status->signal,
5561 		   status->rate_idx);
5562 
5563 	ieee80211_rx_ni(ar->hw, skb);
5564 
5565 exit:
5566 	rcu_read_unlock();
5567 }
5568 
5569 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
5570 {
5571 	struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
5572 	struct ath12k *ar;
5573 
5574 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
5575 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
5576 		return;
5577 	}
5578 
5579 	rcu_read_lock();
5580 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
5581 	if (!ar) {
5582 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
5583 			    tx_compl_param.pdev_id);
5584 		goto exit;
5585 	}
5586 
5587 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
5588 				 le32_to_cpu(tx_compl_param.status));
5589 
5590 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
5591 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
5592 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
5593 		   tx_compl_param.status);
5594 
5595 exit:
5596 	rcu_read_unlock();
5597 }
5598 
5599 static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab,
5600 						  u32 vdev_id)
5601 {
5602 	int i;
5603 	struct ath12k_pdev *pdev;
5604 	struct ath12k *ar;
5605 
5606 	for (i = 0; i < ab->num_radios; i++) {
5607 		pdev = rcu_dereference(ab->pdevs_active[i]);
5608 		if (pdev && pdev->ar) {
5609 			ar = pdev->ar;
5610 
5611 			spin_lock_bh(&ar->data_lock);
5612 			if (ar->scan.state == ATH12K_SCAN_ABORTING &&
5613 			    ar->scan.vdev_id == vdev_id) {
5614 				spin_unlock_bh(&ar->data_lock);
5615 				return ar;
5616 			}
5617 			spin_unlock_bh(&ar->data_lock);
5618 		}
5619 	}
5620 	return NULL;
5621 }
5622 
5623 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
5624 {
5625 	struct ath12k *ar;
5626 	struct wmi_scan_event scan_ev = {0};
5627 
5628 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
5629 		ath12k_warn(ab, "failed to extract scan event");
5630 		return;
5631 	}
5632 
5633 	rcu_read_lock();
5634 
5635 	/* In case the scan was cancelled, ex. during interface teardown,
5636 	 * the interface will not be found in active interfaces.
5637 	 * Rather, in such scenarios, iterate over the active pdev's to
5638 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
5639 	 * aborting scan's vdev id matches this event info.
5640 	 */
5641 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
5642 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED)
5643 		ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id));
5644 	else
5645 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
5646 
5647 	if (!ar) {
5648 		ath12k_warn(ab, "Received scan event for unknown vdev");
5649 		rcu_read_unlock();
5650 		return;
5651 	}
5652 
5653 	spin_lock_bh(&ar->data_lock);
5654 
5655 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5656 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
5657 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
5658 						  le32_to_cpu(scan_ev.reason)),
5659 		   le32_to_cpu(scan_ev.event_type),
5660 		   le32_to_cpu(scan_ev.reason),
5661 		   le32_to_cpu(scan_ev.channel_freq),
5662 		   le32_to_cpu(scan_ev.scan_req_id),
5663 		   le32_to_cpu(scan_ev.scan_id),
5664 		   le32_to_cpu(scan_ev.vdev_id),
5665 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
5666 
5667 	switch (le32_to_cpu(scan_ev.event_type)) {
5668 	case WMI_SCAN_EVENT_STARTED:
5669 		ath12k_wmi_event_scan_started(ar);
5670 		break;
5671 	case WMI_SCAN_EVENT_COMPLETED:
5672 		ath12k_wmi_event_scan_completed(ar);
5673 		break;
5674 	case WMI_SCAN_EVENT_BSS_CHANNEL:
5675 		ath12k_wmi_event_scan_bss_chan(ar);
5676 		break;
5677 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
5678 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
5679 		break;
5680 	case WMI_SCAN_EVENT_START_FAILED:
5681 		ath12k_warn(ab, "received scan start failure event\n");
5682 		ath12k_wmi_event_scan_start_failed(ar);
5683 		break;
5684 	case WMI_SCAN_EVENT_DEQUEUED:
5685 	case WMI_SCAN_EVENT_PREEMPTED:
5686 	case WMI_SCAN_EVENT_RESTARTED:
5687 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
5688 	default:
5689 		break;
5690 	}
5691 
5692 	spin_unlock_bh(&ar->data_lock);
5693 
5694 	rcu_read_unlock();
5695 }
5696 
5697 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
5698 {
5699 	struct wmi_peer_sta_kickout_arg arg = {};
5700 	struct ieee80211_sta *sta;
5701 	struct ath12k_peer *peer;
5702 	struct ath12k *ar;
5703 
5704 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
5705 		ath12k_warn(ab, "failed to extract peer sta kickout event");
5706 		return;
5707 	}
5708 
5709 	rcu_read_lock();
5710 
5711 	spin_lock_bh(&ab->base_lock);
5712 
5713 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
5714 
5715 	if (!peer) {
5716 		ath12k_warn(ab, "peer not found %pM\n",
5717 			    arg.mac_addr);
5718 		goto exit;
5719 	}
5720 
5721 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
5722 	if (!ar) {
5723 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
5724 			    peer->vdev_id);
5725 		goto exit;
5726 	}
5727 
5728 	sta = ieee80211_find_sta_by_ifaddr(ar->hw,
5729 					   arg.mac_addr, NULL);
5730 	if (!sta) {
5731 		ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
5732 			    arg.mac_addr);
5733 		goto exit;
5734 	}
5735 
5736 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
5737 		   arg.mac_addr);
5738 
5739 	ieee80211_report_low_ack(sta, 10);
5740 
5741 exit:
5742 	spin_unlock_bh(&ab->base_lock);
5743 	rcu_read_unlock();
5744 }
5745 
5746 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
5747 {
5748 	struct wmi_roam_event roam_ev = {};
5749 	struct ath12k *ar;
5750 
5751 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
5752 		ath12k_warn(ab, "failed to extract roam event");
5753 		return;
5754 	}
5755 
5756 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5757 		   "wmi roam event vdev %u reason 0x%08x rssi %d\n",
5758 		   roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
5759 
5760 	rcu_read_lock();
5761 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
5762 	if (!ar) {
5763 		ath12k_warn(ab, "invalid vdev id in roam ev %d",
5764 			    roam_ev.vdev_id);
5765 		rcu_read_unlock();
5766 		return;
5767 	}
5768 
5769 	if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
5770 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
5771 			    roam_ev.reason, roam_ev.vdev_id);
5772 
5773 	switch (le32_to_cpu(roam_ev.reason)) {
5774 	case WMI_ROAM_REASON_BEACON_MISS:
5775 		/* TODO: Pending beacon miss and connection_loss_work
5776 		 * implementation
5777 		 * ath12k_mac_handle_beacon_miss(ar, vdev_id);
5778 		 */
5779 		break;
5780 	case WMI_ROAM_REASON_BETTER_AP:
5781 	case WMI_ROAM_REASON_LOW_RSSI:
5782 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
5783 	case WMI_ROAM_REASON_HO_FAILED:
5784 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
5785 			    roam_ev.reason, roam_ev.vdev_id);
5786 		break;
5787 	}
5788 
5789 	rcu_read_unlock();
5790 }
5791 
5792 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
5793 {
5794 	struct wmi_chan_info_event ch_info_ev = {0};
5795 	struct ath12k *ar;
5796 	struct survey_info *survey;
5797 	int idx;
5798 	/* HW channel counters frequency value in hertz */
5799 	u32 cc_freq_hz = ab->cc_freq_hz;
5800 
5801 	if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
5802 		ath12k_warn(ab, "failed to extract chan info event");
5803 		return;
5804 	}
5805 
5806 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5807 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
5808 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
5809 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
5810 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
5811 		   ch_info_ev.mac_clk_mhz);
5812 
5813 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
5814 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
5815 		return;
5816 	}
5817 
5818 	rcu_read_lock();
5819 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
5820 	if (!ar) {
5821 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
5822 			    ch_info_ev.vdev_id);
5823 		rcu_read_unlock();
5824 		return;
5825 	}
5826 	spin_lock_bh(&ar->data_lock);
5827 
5828 	switch (ar->scan.state) {
5829 	case ATH12K_SCAN_IDLE:
5830 	case ATH12K_SCAN_STARTING:
5831 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
5832 		goto exit;
5833 	case ATH12K_SCAN_RUNNING:
5834 	case ATH12K_SCAN_ABORTING:
5835 		break;
5836 	}
5837 
5838 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
5839 	if (idx >= ARRAY_SIZE(ar->survey)) {
5840 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
5841 			    ch_info_ev.freq, idx);
5842 		goto exit;
5843 	}
5844 
5845 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
5846 	 * HW channel counters frequency value
5847 	 */
5848 	if (ch_info_ev.mac_clk_mhz)
5849 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
5850 
5851 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
5852 		survey = &ar->survey[idx];
5853 		memset(survey, 0, sizeof(*survey));
5854 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
5855 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
5856 				 SURVEY_INFO_TIME_BUSY;
5857 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
5858 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
5859 					    cc_freq_hz);
5860 	}
5861 exit:
5862 	spin_unlock_bh(&ar->data_lock);
5863 	rcu_read_unlock();
5864 }
5865 
5866 static void
5867 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
5868 {
5869 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
5870 	struct survey_info *survey;
5871 	struct ath12k *ar;
5872 	u32 cc_freq_hz = ab->cc_freq_hz;
5873 	u64 busy, total, tx, rx, rx_bss;
5874 	int idx;
5875 
5876 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
5877 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
5878 		return;
5879 	}
5880 
5881 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
5882 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
5883 
5884 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
5885 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
5886 
5887 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
5888 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
5889 
5890 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
5891 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
5892 
5893 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
5894 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
5895 
5896 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5897 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5898 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
5899 		   bss_ch_info_ev.noise_floor, busy, total,
5900 		   tx, rx, rx_bss);
5901 
5902 	rcu_read_lock();
5903 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
5904 
5905 	if (!ar) {
5906 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
5907 			    bss_ch_info_ev.pdev_id);
5908 		rcu_read_unlock();
5909 		return;
5910 	}
5911 
5912 	spin_lock_bh(&ar->data_lock);
5913 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
5914 	if (idx >= ARRAY_SIZE(ar->survey)) {
5915 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5916 			    bss_ch_info_ev.freq, idx);
5917 		goto exit;
5918 	}
5919 
5920 	survey = &ar->survey[idx];
5921 
5922 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
5923 	survey->time      = div_u64(total, cc_freq_hz);
5924 	survey->time_busy = div_u64(busy, cc_freq_hz);
5925 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
5926 	survey->time_tx   = div_u64(tx, cc_freq_hz);
5927 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
5928 			     SURVEY_INFO_TIME |
5929 			     SURVEY_INFO_TIME_BUSY |
5930 			     SURVEY_INFO_TIME_RX |
5931 			     SURVEY_INFO_TIME_TX);
5932 exit:
5933 	spin_unlock_bh(&ar->data_lock);
5934 	complete(&ar->bss_survey_done);
5935 
5936 	rcu_read_unlock();
5937 }
5938 
5939 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
5940 						struct sk_buff *skb)
5941 {
5942 	struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
5943 	struct ath12k *ar;
5944 
5945 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
5946 		ath12k_warn(ab, "failed to extract install key compl event");
5947 		return;
5948 	}
5949 
5950 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5951 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
5952 		   install_key_compl.key_idx, install_key_compl.key_flags,
5953 		   install_key_compl.macaddr, install_key_compl.status);
5954 
5955 	rcu_read_lock();
5956 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
5957 	if (!ar) {
5958 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
5959 			    install_key_compl.vdev_id);
5960 		rcu_read_unlock();
5961 		return;
5962 	}
5963 
5964 	ar->install_key_status = 0;
5965 
5966 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
5967 		ath12k_warn(ab, "install key failed for %pM status %d\n",
5968 			    install_key_compl.macaddr, install_key_compl.status);
5969 		ar->install_key_status = install_key_compl.status;
5970 	}
5971 
5972 	complete(&ar->install_key_done);
5973 	rcu_read_unlock();
5974 }
5975 
5976 static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
5977 {
5978 	const void **tb;
5979 	const struct wmi_service_available_event *ev;
5980 	int ret;
5981 	int i, j;
5982 
5983 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
5984 	if (IS_ERR(tb)) {
5985 		ret = PTR_ERR(tb);
5986 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5987 		return;
5988 	}
5989 
5990 	ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
5991 	if (!ev) {
5992 		ath12k_warn(ab, "failed to fetch svc available ev");
5993 		kfree(tb);
5994 		return;
5995 	}
5996 
5997 	/* TODO: Use wmi_service_segment_offset information to get the service
5998 	 * especially when more services are advertised in multiple sevice
5999 	 * available events.
6000 	 */
6001 	for (i = 0, j = WMI_MAX_SERVICE;
6002 	     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
6003 	     i++) {
6004 		do {
6005 			if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
6006 			    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
6007 				set_bit(j, ab->wmi_ab.svc_map);
6008 		} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
6009 	}
6010 
6011 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6012 		   "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
6013 		   ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
6014 		   ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
6015 
6016 	kfree(tb);
6017 }
6018 
6019 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
6020 {
6021 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
6022 	struct ath12k *ar;
6023 
6024 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
6025 		ath12k_warn(ab, "failed to extract peer assoc conf event");
6026 		return;
6027 	}
6028 
6029 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6030 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
6031 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
6032 
6033 	rcu_read_lock();
6034 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
6035 
6036 	if (!ar) {
6037 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
6038 			    peer_assoc_conf.vdev_id);
6039 		rcu_read_unlock();
6040 		return;
6041 	}
6042 
6043 	complete(&ar->peer_assoc_done);
6044 	rcu_read_unlock();
6045 }
6046 
6047 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
6048 {
6049 }
6050 
6051 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6052  * is not part of BDF CTL(Conformance test limits) table entries.
6053  */
6054 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
6055 						 struct sk_buff *skb)
6056 {
6057 	const void **tb;
6058 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
6059 	int ret;
6060 
6061 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6062 	if (IS_ERR(tb)) {
6063 		ret = PTR_ERR(tb);
6064 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6065 		return;
6066 	}
6067 
6068 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
6069 	if (!ev) {
6070 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
6071 		kfree(tb);
6072 		return;
6073 	}
6074 
6075 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6076 		   "pdev ctl failsafe check ev status %d\n",
6077 		   ev->ctl_failsafe_status);
6078 
6079 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6080 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6081 	 */
6082 	if (ev->ctl_failsafe_status != 0)
6083 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
6084 			    ev->ctl_failsafe_status);
6085 
6086 	kfree(tb);
6087 }
6088 
6089 static void
6090 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
6091 					  const struct ath12k_wmi_pdev_csa_event *ev,
6092 					  const u32 *vdev_ids)
6093 {
6094 	int i;
6095 	struct ath12k_vif *arvif;
6096 
6097 	/* Finish CSA once the switch count becomes NULL */
6098 	if (ev->current_switch_count)
6099 		return;
6100 
6101 	rcu_read_lock();
6102 	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
6103 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
6104 
6105 		if (!arvif) {
6106 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
6107 				    vdev_ids[i]);
6108 			continue;
6109 		}
6110 
6111 		if (arvif->is_up && arvif->vif->bss_conf.csa_active)
6112 			ieee80211_csa_finish(arvif->vif);
6113 	}
6114 	rcu_read_unlock();
6115 }
6116 
6117 static void
6118 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
6119 					      struct sk_buff *skb)
6120 {
6121 	const void **tb;
6122 	const struct ath12k_wmi_pdev_csa_event *ev;
6123 	const u32 *vdev_ids;
6124 	int ret;
6125 
6126 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6127 	if (IS_ERR(tb)) {
6128 		ret = PTR_ERR(tb);
6129 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6130 		return;
6131 	}
6132 
6133 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
6134 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
6135 
6136 	if (!ev || !vdev_ids) {
6137 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
6138 		kfree(tb);
6139 		return;
6140 	}
6141 
6142 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6143 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
6144 		   ev->current_switch_count, ev->pdev_id,
6145 		   ev->num_vdevs);
6146 
6147 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
6148 
6149 	kfree(tb);
6150 }
6151 
6152 static void
6153 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
6154 {
6155 	const void **tb;
6156 	const struct ath12k_wmi_pdev_radar_event *ev;
6157 	struct ath12k *ar;
6158 	int ret;
6159 
6160 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6161 	if (IS_ERR(tb)) {
6162 		ret = PTR_ERR(tb);
6163 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6164 		return;
6165 	}
6166 
6167 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
6168 
6169 	if (!ev) {
6170 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
6171 		kfree(tb);
6172 		return;
6173 	}
6174 
6175 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6176 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6177 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
6178 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
6179 		   ev->freq_offset, ev->sidx);
6180 
6181 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
6182 
6183 	if (!ar) {
6184 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
6185 			    ev->pdev_id);
6186 		goto exit;
6187 	}
6188 
6189 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
6190 		   ev->pdev_id);
6191 
6192 	if (ar->dfs_block_radar_events)
6193 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
6194 	else
6195 		ieee80211_radar_detected(ar->hw);
6196 
6197 exit:
6198 	kfree(tb);
6199 }
6200 
6201 static void
6202 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
6203 				  struct sk_buff *skb)
6204 {
6205 	struct ath12k *ar;
6206 	struct wmi_pdev_temperature_event ev = {0};
6207 
6208 	if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
6209 		ath12k_warn(ab, "failed to extract pdev temperature event");
6210 		return;
6211 	}
6212 
6213 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6214 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
6215 
6216 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
6217 	if (!ar) {
6218 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
6219 		return;
6220 	}
6221 }
6222 
6223 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
6224 					struct sk_buff *skb)
6225 {
6226 	const void **tb;
6227 	const struct wmi_fils_discovery_event *ev;
6228 	int ret;
6229 
6230 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6231 	if (IS_ERR(tb)) {
6232 		ret = PTR_ERR(tb);
6233 		ath12k_warn(ab,
6234 			    "failed to parse FILS discovery event tlv %d\n",
6235 			    ret);
6236 		return;
6237 	}
6238 
6239 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
6240 	if (!ev) {
6241 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
6242 		kfree(tb);
6243 		return;
6244 	}
6245 
6246 	ath12k_warn(ab,
6247 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6248 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
6249 
6250 	kfree(tb);
6251 }
6252 
6253 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
6254 					      struct sk_buff *skb)
6255 {
6256 	const void **tb;
6257 	const struct wmi_probe_resp_tx_status_event *ev;
6258 	int ret;
6259 
6260 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
6261 	if (IS_ERR(tb)) {
6262 		ret = PTR_ERR(tb);
6263 		ath12k_warn(ab,
6264 			    "failed to parse probe response transmission status event tlv: %d\n",
6265 			    ret);
6266 		return;
6267 	}
6268 
6269 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
6270 	if (!ev) {
6271 		ath12k_warn(ab,
6272 			    "failed to fetch probe response transmission status event");
6273 		kfree(tb);
6274 		return;
6275 	}
6276 
6277 	if (ev->tx_status)
6278 		ath12k_warn(ab,
6279 			    "Probe response transmission failed for vdev_id %u, status %u\n",
6280 			    ev->vdev_id, ev->tx_status);
6281 
6282 	kfree(tb);
6283 }
6284 
6285 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
6286 {
6287 	struct wmi_cmd_hdr *cmd_hdr;
6288 	enum wmi_tlv_event_id id;
6289 
6290 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
6291 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
6292 
6293 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
6294 		goto out;
6295 
6296 	switch (id) {
6297 		/* Process all the WMI events here */
6298 	case WMI_SERVICE_READY_EVENTID:
6299 		ath12k_service_ready_event(ab, skb);
6300 		break;
6301 	case WMI_SERVICE_READY_EXT_EVENTID:
6302 		ath12k_service_ready_ext_event(ab, skb);
6303 		break;
6304 	case WMI_SERVICE_READY_EXT2_EVENTID:
6305 		ath12k_service_ready_ext2_event(ab, skb);
6306 		break;
6307 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
6308 		ath12k_reg_chan_list_event(ab, skb);
6309 		break;
6310 	case WMI_READY_EVENTID:
6311 		ath12k_ready_event(ab, skb);
6312 		break;
6313 	case WMI_PEER_DELETE_RESP_EVENTID:
6314 		ath12k_peer_delete_resp_event(ab, skb);
6315 		break;
6316 	case WMI_VDEV_START_RESP_EVENTID:
6317 		ath12k_vdev_start_resp_event(ab, skb);
6318 		break;
6319 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
6320 		ath12k_bcn_tx_status_event(ab, skb);
6321 		break;
6322 	case WMI_VDEV_STOPPED_EVENTID:
6323 		ath12k_vdev_stopped_event(ab, skb);
6324 		break;
6325 	case WMI_MGMT_RX_EVENTID:
6326 		ath12k_mgmt_rx_event(ab, skb);
6327 		/* mgmt_rx_event() owns the skb now! */
6328 		return;
6329 	case WMI_MGMT_TX_COMPLETION_EVENTID:
6330 		ath12k_mgmt_tx_compl_event(ab, skb);
6331 		break;
6332 	case WMI_SCAN_EVENTID:
6333 		ath12k_scan_event(ab, skb);
6334 		break;
6335 	case WMI_PEER_STA_KICKOUT_EVENTID:
6336 		ath12k_peer_sta_kickout_event(ab, skb);
6337 		break;
6338 	case WMI_ROAM_EVENTID:
6339 		ath12k_roam_event(ab, skb);
6340 		break;
6341 	case WMI_CHAN_INFO_EVENTID:
6342 		ath12k_chan_info_event(ab, skb);
6343 		break;
6344 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
6345 		ath12k_pdev_bss_chan_info_event(ab, skb);
6346 		break;
6347 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
6348 		ath12k_vdev_install_key_compl_event(ab, skb);
6349 		break;
6350 	case WMI_SERVICE_AVAILABLE_EVENTID:
6351 		ath12k_service_available_event(ab, skb);
6352 		break;
6353 	case WMI_PEER_ASSOC_CONF_EVENTID:
6354 		ath12k_peer_assoc_conf_event(ab, skb);
6355 		break;
6356 	case WMI_UPDATE_STATS_EVENTID:
6357 		ath12k_update_stats_event(ab, skb);
6358 		break;
6359 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
6360 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
6361 		break;
6362 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
6363 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
6364 		break;
6365 	case WMI_PDEV_TEMPERATURE_EVENTID:
6366 		ath12k_wmi_pdev_temperature_event(ab, skb);
6367 		break;
6368 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
6369 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
6370 		break;
6371 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
6372 		ath12k_fils_discovery_event(ab, skb);
6373 		break;
6374 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
6375 		ath12k_probe_resp_tx_status_event(ab, skb);
6376 		break;
6377 	/* add Unsupported events here */
6378 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
6379 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
6380 	case WMI_TWT_ENABLE_EVENTID:
6381 	case WMI_TWT_DISABLE_EVENTID:
6382 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
6383 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6384 			   "ignoring unsupported event 0x%x\n", id);
6385 		break;
6386 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
6387 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
6388 		break;
6389 	case WMI_VDEV_DELETE_RESP_EVENTID:
6390 		ath12k_vdev_delete_resp_event(ab, skb);
6391 		break;
6392 	/* TODO: Add remaining events */
6393 	default:
6394 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
6395 		break;
6396 	}
6397 
6398 out:
6399 	dev_kfree_skb(skb);
6400 }
6401 
6402 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
6403 					   u32 pdev_idx)
6404 {
6405 	int status;
6406 	u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
6407 			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
6408 			 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
6409 	struct ath12k_htc_svc_conn_req conn_req = {};
6410 	struct ath12k_htc_svc_conn_resp conn_resp = {};
6411 
6412 	/* these fields are the same for all service endpoints */
6413 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
6414 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
6415 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
6416 
6417 	/* connect to control service */
6418 	conn_req.service_id = svc_id[pdev_idx];
6419 
6420 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
6421 	if (status) {
6422 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
6423 			    status);
6424 		return status;
6425 	}
6426 
6427 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
6428 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
6429 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
6430 
6431 	return 0;
6432 }
6433 
6434 static int
6435 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
6436 			      struct wmi_unit_test_cmd ut_cmd,
6437 			      u32 *test_args)
6438 {
6439 	struct ath12k_wmi_pdev *wmi = ar->wmi;
6440 	struct wmi_unit_test_cmd *cmd;
6441 	struct sk_buff *skb;
6442 	struct wmi_tlv *tlv;
6443 	void *ptr;
6444 	u32 *ut_cmd_args;
6445 	int buf_len, arg_len;
6446 	int ret;
6447 	int i;
6448 
6449 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
6450 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
6451 
6452 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
6453 	if (!skb)
6454 		return -ENOMEM;
6455 
6456 	cmd = (struct wmi_unit_test_cmd *)skb->data;
6457 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
6458 						 sizeof(ut_cmd));
6459 
6460 	cmd->vdev_id = ut_cmd.vdev_id;
6461 	cmd->module_id = ut_cmd.module_id;
6462 	cmd->num_args = ut_cmd.num_args;
6463 	cmd->diag_token = ut_cmd.diag_token;
6464 
6465 	ptr = skb->data + sizeof(ut_cmd);
6466 
6467 	tlv = ptr;
6468 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
6469 
6470 	ptr += TLV_HDR_SIZE;
6471 
6472 	ut_cmd_args = ptr;
6473 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
6474 		ut_cmd_args[i] = test_args[i];
6475 
6476 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
6477 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
6478 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
6479 		   cmd->diag_token);
6480 
6481 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
6482 
6483 	if (ret) {
6484 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
6485 			    ret);
6486 		dev_kfree_skb(skb);
6487 	}
6488 
6489 	return ret;
6490 }
6491 
6492 int ath12k_wmi_simulate_radar(struct ath12k *ar)
6493 {
6494 	struct ath12k_vif *arvif;
6495 	u32 dfs_args[DFS_MAX_TEST_ARGS];
6496 	struct wmi_unit_test_cmd wmi_ut;
6497 	bool arvif_found = false;
6498 
6499 	list_for_each_entry(arvif, &ar->arvifs, list) {
6500 		if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
6501 			arvif_found = true;
6502 			break;
6503 		}
6504 	}
6505 
6506 	if (!arvif_found)
6507 		return -EINVAL;
6508 
6509 	dfs_args[DFS_TEST_CMDID] = 0;
6510 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
6511 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
6512 	 * freq offset (b3 - b10) to unit test. For simulation
6513 	 * purpose this can be set to 0 which is valid.
6514 	 */
6515 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
6516 
6517 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
6518 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
6519 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
6520 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
6521 
6522 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
6523 
6524 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
6525 }
6526 
6527 int ath12k_wmi_connect(struct ath12k_base *ab)
6528 {
6529 	u32 i;
6530 	u8 wmi_ep_count;
6531 
6532 	wmi_ep_count = ab->htc.wmi_ep_count;
6533 	if (wmi_ep_count > ab->hw_params->max_radios)
6534 		return -1;
6535 
6536 	for (i = 0; i < wmi_ep_count; i++)
6537 		ath12k_connect_pdev_htc_service(ab, i);
6538 
6539 	return 0;
6540 }
6541 
6542 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
6543 {
6544 	if (WARN_ON(pdev_id >= MAX_RADIOS))
6545 		return;
6546 
6547 	/* TODO: Deinit any pdev specific wmi resource */
6548 }
6549 
6550 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
6551 			   u8 pdev_id)
6552 {
6553 	struct ath12k_wmi_pdev *wmi_handle;
6554 
6555 	if (pdev_id >= ab->hw_params->max_radios)
6556 		return -EINVAL;
6557 
6558 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
6559 
6560 	wmi_handle->wmi_ab = &ab->wmi_ab;
6561 
6562 	ab->wmi_ab.ab = ab;
6563 	/* TODO: Init remaining resource specific to pdev */
6564 
6565 	return 0;
6566 }
6567 
6568 int ath12k_wmi_attach(struct ath12k_base *ab)
6569 {
6570 	int ret;
6571 
6572 	ret = ath12k_wmi_pdev_attach(ab, 0);
6573 	if (ret)
6574 		return ret;
6575 
6576 	ab->wmi_ab.ab = ab;
6577 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
6578 
6579 	/* It's overwritten when service_ext_ready is handled */
6580 	if (ab->hw_params->single_pdev_only)
6581 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
6582 
6583 	/* TODO: Init remaining wmi soc resources required */
6584 	init_completion(&ab->wmi_ab.service_ready);
6585 	init_completion(&ab->wmi_ab.unified_ready);
6586 
6587 	return 0;
6588 }
6589 
6590 void ath12k_wmi_detach(struct ath12k_base *ab)
6591 {
6592 	int i;
6593 
6594 	/* TODO: Deinit wmi resource specific to SOC as required */
6595 
6596 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
6597 		ath12k_wmi_pdev_detach(ab, i);
6598 
6599 	ath12k_wmi_free_dbring_caps(ab);
6600 }
6601