1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include "core.h"
18 #include "debug.h"
19 #include "mac.h"
20 #include "hw.h"
21 #include "mac.h"
22 #include "wmi.h"
23 #include "wmi-ops.h"
24 #include "wmi-tlv.h"
25 #include "p2p.h"
26 #include "testmode.h"
27 
28 /***************/
29 /* TLV helpers */
30 /**************/
31 
32 struct wmi_tlv_policy {
33 	size_t min_len;
34 };
35 
36 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
37 	[WMI_TLV_TAG_ARRAY_BYTE]
38 		= { .min_len = 0 },
39 	[WMI_TLV_TAG_ARRAY_UINT32]
40 		= { .min_len = 0 },
41 	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
42 		= { .min_len = sizeof(struct wmi_scan_event) },
43 	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
44 		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
45 	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
46 		= { .min_len = sizeof(struct wmi_chan_info_event) },
47 	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
48 		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
49 	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
50 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
51 	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
52 		= { .min_len = sizeof(struct wmi_host_swba_event) },
53 	[WMI_TLV_TAG_STRUCT_TIM_INFO]
54 		= { .min_len = sizeof(struct wmi_tim_info) },
55 	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
56 		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
57 	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
58 		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
59 	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
60 		= { .min_len = sizeof(struct hal_reg_capabilities) },
61 	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
62 		= { .min_len = sizeof(struct wlan_host_mem_req) },
63 	[WMI_TLV_TAG_STRUCT_READY_EVENT]
64 		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
65 	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
66 		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
67 	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
68 		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
69 	[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
70 		= { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
71 	[WMI_TLV_TAG_STRUCT_ROAM_EVENT]
72 		= { .min_len = sizeof(struct wmi_tlv_roam_ev) },
73 	[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
74 		= { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
75 	[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
76 		= { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
77 };
78 
79 static int
80 ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
81 		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
82 				const void *ptr, void *data),
83 		    void *data)
84 {
85 	const void *begin = ptr;
86 	const struct wmi_tlv *tlv;
87 	u16 tlv_tag, tlv_len;
88 	int ret;
89 
90 	while (len > 0) {
91 		if (len < sizeof(*tlv)) {
92 			ath10k_dbg(ar, ATH10K_DBG_WMI,
93 				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
94 				   ptr - begin, len, sizeof(*tlv));
95 			return -EINVAL;
96 		}
97 
98 		tlv = ptr;
99 		tlv_tag = __le16_to_cpu(tlv->tag);
100 		tlv_len = __le16_to_cpu(tlv->len);
101 		ptr += sizeof(*tlv);
102 		len -= sizeof(*tlv);
103 
104 		if (tlv_len > len) {
105 			ath10k_dbg(ar, ATH10K_DBG_WMI,
106 				   "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
107 				   tlv_tag, ptr - begin, len, tlv_len);
108 			return -EINVAL;
109 		}
110 
111 		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
112 		    wmi_tlv_policies[tlv_tag].min_len &&
113 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
114 			ath10k_dbg(ar, ATH10K_DBG_WMI,
115 				   "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
116 				   tlv_tag, ptr - begin, tlv_len,
117 				   wmi_tlv_policies[tlv_tag].min_len);
118 			return -EINVAL;
119 		}
120 
121 		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
122 		if (ret)
123 			return ret;
124 
125 		ptr += tlv_len;
126 		len -= tlv_len;
127 	}
128 
129 	return 0;
130 }
131 
132 static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
133 				     const void *ptr, void *data)
134 {
135 	const void **tb = data;
136 
137 	if (tag < WMI_TLV_TAG_MAX)
138 		tb[tag] = ptr;
139 
140 	return 0;
141 }
142 
143 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
144 				const void *ptr, size_t len)
145 {
146 	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
147 				   (void *)tb);
148 }
149 
150 static const void **
151 ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
152 			   size_t len, gfp_t gfp)
153 {
154 	const void **tb;
155 	int ret;
156 
157 	tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
158 	if (!tb)
159 		return ERR_PTR(-ENOMEM);
160 
161 	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
162 	if (ret) {
163 		kfree(tb);
164 		return ERR_PTR(ret);
165 	}
166 
167 	return tb;
168 }
169 
170 static u16 ath10k_wmi_tlv_len(const void *ptr)
171 {
172 	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
173 }
174 
175 /**************/
176 /* TLV events */
177 /**************/
178 static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
179 					      struct sk_buff *skb)
180 {
181 	const void **tb;
182 	const struct wmi_tlv_bcn_tx_status_ev *ev;
183 	struct ath10k_vif *arvif;
184 	u32 vdev_id, tx_status;
185 	int ret;
186 
187 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
188 	if (IS_ERR(tb)) {
189 		ret = PTR_ERR(tb);
190 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
191 		return ret;
192 	}
193 
194 	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
195 	if (!ev) {
196 		kfree(tb);
197 		return -EPROTO;
198 	}
199 
200 	tx_status = __le32_to_cpu(ev->tx_status);
201 	vdev_id = __le32_to_cpu(ev->vdev_id);
202 
203 	switch (tx_status) {
204 	case WMI_TLV_BCN_TX_STATUS_OK:
205 		break;
206 	case WMI_TLV_BCN_TX_STATUS_XRETRY:
207 	case WMI_TLV_BCN_TX_STATUS_DROP:
208 	case WMI_TLV_BCN_TX_STATUS_FILTERED:
209 		/* FIXME: It's probably worth telling mac80211 to stop the
210 		 * interface as it is crippled.
211 		 */
212 		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
213 			    vdev_id, tx_status);
214 		break;
215 	}
216 
217 	arvif = ath10k_get_arvif(ar, vdev_id);
218 	if (arvif && arvif->is_up && arvif->vif->csa_active)
219 		ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
220 
221 	kfree(tb);
222 	return 0;
223 }
224 
225 static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
226 					  struct sk_buff *skb)
227 {
228 	const void **tb;
229 	const struct wmi_tlv_diag_data_ev *ev;
230 	const struct wmi_tlv_diag_item *item;
231 	const void *data;
232 	int ret, num_items, len;
233 
234 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
235 	if (IS_ERR(tb)) {
236 		ret = PTR_ERR(tb);
237 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
238 		return ret;
239 	}
240 
241 	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
242 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
243 	if (!ev || !data) {
244 		kfree(tb);
245 		return -EPROTO;
246 	}
247 
248 	num_items = __le32_to_cpu(ev->num_items);
249 	len = ath10k_wmi_tlv_len(data);
250 
251 	while (num_items--) {
252 		if (len == 0)
253 			break;
254 		if (len < sizeof(*item)) {
255 			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
256 			break;
257 		}
258 
259 		item = data;
260 
261 		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
262 			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
263 			break;
264 		}
265 
266 		trace_ath10k_wmi_diag_container(ar,
267 						item->type,
268 						__le32_to_cpu(item->timestamp),
269 						__le32_to_cpu(item->code),
270 						__le16_to_cpu(item->len),
271 						item->payload);
272 
273 		len -= sizeof(*item);
274 		len -= roundup(__le16_to_cpu(item->len), 4);
275 
276 		data += sizeof(*item);
277 		data += roundup(__le16_to_cpu(item->len), 4);
278 	}
279 
280 	if (num_items != -1 || len != 0)
281 		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
282 			    num_items, len);
283 
284 	kfree(tb);
285 	return 0;
286 }
287 
288 static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
289 				     struct sk_buff *skb)
290 {
291 	const void **tb;
292 	const void *data;
293 	int ret, len;
294 
295 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
296 	if (IS_ERR(tb)) {
297 		ret = PTR_ERR(tb);
298 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
299 		return ret;
300 	}
301 
302 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
303 	if (!data) {
304 		kfree(tb);
305 		return -EPROTO;
306 	}
307 	len = ath10k_wmi_tlv_len(data);
308 
309 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
310 	trace_ath10k_wmi_diag(ar, data, len);
311 
312 	kfree(tb);
313 	return 0;
314 }
315 
316 static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
317 					struct sk_buff *skb)
318 {
319 	const void **tb;
320 	const struct wmi_tlv_p2p_noa_ev *ev;
321 	const struct wmi_p2p_noa_info *noa;
322 	int ret, vdev_id;
323 
324 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
325 	if (IS_ERR(tb)) {
326 		ret = PTR_ERR(tb);
327 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
328 		return ret;
329 	}
330 
331 	ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
332 	noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
333 
334 	if (!ev || !noa) {
335 		kfree(tb);
336 		return -EPROTO;
337 	}
338 
339 	vdev_id = __le32_to_cpu(ev->vdev_id);
340 
341 	ath10k_dbg(ar, ATH10K_DBG_WMI,
342 		   "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
343 		   vdev_id, noa->num_descriptors);
344 
345 	ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
346 	kfree(tb);
347 	return 0;
348 }
349 
350 static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
351 					 struct sk_buff *skb)
352 {
353 	const void **tb;
354 	const struct wmi_tlv_tx_pause_ev *ev;
355 	int ret, vdev_id;
356 	u32 pause_id, action, vdev_map, peer_id, tid_map;
357 
358 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
359 	if (IS_ERR(tb)) {
360 		ret = PTR_ERR(tb);
361 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
362 		return ret;
363 	}
364 
365 	ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
366 	if (!ev) {
367 		kfree(tb);
368 		return -EPROTO;
369 	}
370 
371 	pause_id = __le32_to_cpu(ev->pause_id);
372 	action = __le32_to_cpu(ev->action);
373 	vdev_map = __le32_to_cpu(ev->vdev_map);
374 	peer_id = __le32_to_cpu(ev->peer_id);
375 	tid_map = __le32_to_cpu(ev->tid_map);
376 
377 	ath10k_dbg(ar, ATH10K_DBG_WMI,
378 		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
379 		   pause_id, action, vdev_map, peer_id, tid_map);
380 
381 	switch (pause_id) {
382 	case WMI_TLV_TX_PAUSE_ID_MCC:
383 	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
384 	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
385 	case WMI_TLV_TX_PAUSE_ID_AP_PS:
386 	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
387 		for (vdev_id = 0; vdev_map; vdev_id++) {
388 			if (!(vdev_map & BIT(vdev_id)))
389 				continue;
390 
391 			vdev_map &= ~BIT(vdev_id);
392 			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
393 							action);
394 		}
395 		break;
396 	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
397 	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
398 	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
399 	case WMI_TLV_TX_PAUSE_ID_HOST:
400 		ath10k_dbg(ar, ATH10K_DBG_MAC,
401 			   "mac ignoring unsupported tx pause id %d\n",
402 			   pause_id);
403 		break;
404 	default:
405 		ath10k_dbg(ar, ATH10K_DBG_MAC,
406 			   "mac ignoring unknown tx pause vdev %d\n",
407 			   pause_id);
408 		break;
409 	}
410 
411 	kfree(tb);
412 	return 0;
413 }
414 
415 /***********/
416 /* TLV ops */
417 /***********/
418 
419 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
420 {
421 	struct wmi_cmd_hdr *cmd_hdr;
422 	enum wmi_tlv_event_id id;
423 	bool consumed;
424 
425 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
426 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
427 
428 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
429 		goto out;
430 
431 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
432 
433 	consumed = ath10k_tm_event_wmi(ar, id, skb);
434 
435 	/* Ready event must be handled normally also in UTF mode so that we
436 	 * know the UTF firmware has booted, others we are just bypass WMI
437 	 * events to testmode.
438 	 */
439 	if (consumed && id != WMI_TLV_READY_EVENTID) {
440 		ath10k_dbg(ar, ATH10K_DBG_WMI,
441 			   "wmi tlv testmode consumed 0x%x\n", id);
442 		goto out;
443 	}
444 
445 	switch (id) {
446 	case WMI_TLV_MGMT_RX_EVENTID:
447 		ath10k_wmi_event_mgmt_rx(ar, skb);
448 		/* mgmt_rx() owns the skb now! */
449 		return;
450 	case WMI_TLV_SCAN_EVENTID:
451 		ath10k_wmi_event_scan(ar, skb);
452 		break;
453 	case WMI_TLV_CHAN_INFO_EVENTID:
454 		ath10k_wmi_event_chan_info(ar, skb);
455 		break;
456 	case WMI_TLV_ECHO_EVENTID:
457 		ath10k_wmi_event_echo(ar, skb);
458 		break;
459 	case WMI_TLV_DEBUG_MESG_EVENTID:
460 		ath10k_wmi_event_debug_mesg(ar, skb);
461 		break;
462 	case WMI_TLV_UPDATE_STATS_EVENTID:
463 		ath10k_wmi_event_update_stats(ar, skb);
464 		break;
465 	case WMI_TLV_VDEV_START_RESP_EVENTID:
466 		ath10k_wmi_event_vdev_start_resp(ar, skb);
467 		break;
468 	case WMI_TLV_VDEV_STOPPED_EVENTID:
469 		ath10k_wmi_event_vdev_stopped(ar, skb);
470 		break;
471 	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
472 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
473 		break;
474 	case WMI_TLV_HOST_SWBA_EVENTID:
475 		ath10k_wmi_event_host_swba(ar, skb);
476 		break;
477 	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
478 		ath10k_wmi_event_tbttoffset_update(ar, skb);
479 		break;
480 	case WMI_TLV_PHYERR_EVENTID:
481 		ath10k_wmi_event_phyerr(ar, skb);
482 		break;
483 	case WMI_TLV_ROAM_EVENTID:
484 		ath10k_wmi_event_roam(ar, skb);
485 		break;
486 	case WMI_TLV_PROFILE_MATCH:
487 		ath10k_wmi_event_profile_match(ar, skb);
488 		break;
489 	case WMI_TLV_DEBUG_PRINT_EVENTID:
490 		ath10k_wmi_event_debug_print(ar, skb);
491 		break;
492 	case WMI_TLV_PDEV_QVIT_EVENTID:
493 		ath10k_wmi_event_pdev_qvit(ar, skb);
494 		break;
495 	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
496 		ath10k_wmi_event_wlan_profile_data(ar, skb);
497 		break;
498 	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
499 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
500 		break;
501 	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
502 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
503 		break;
504 	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
505 		ath10k_wmi_event_rtt_error_report(ar, skb);
506 		break;
507 	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
508 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
509 		break;
510 	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
511 		ath10k_wmi_event_dcs_interference(ar, skb);
512 		break;
513 	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
514 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
515 		break;
516 	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
517 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
518 		break;
519 	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
520 		ath10k_wmi_event_gtk_offload_status(ar, skb);
521 		break;
522 	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
523 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
524 		break;
525 	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
526 		ath10k_wmi_event_delba_complete(ar, skb);
527 		break;
528 	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
529 		ath10k_wmi_event_addba_complete(ar, skb);
530 		break;
531 	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
532 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
533 		break;
534 	case WMI_TLV_SERVICE_READY_EVENTID:
535 		ath10k_wmi_event_service_ready(ar, skb);
536 		return;
537 	case WMI_TLV_READY_EVENTID:
538 		ath10k_wmi_event_ready(ar, skb);
539 		break;
540 	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
541 		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
542 		break;
543 	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
544 		ath10k_wmi_tlv_event_diag_data(ar, skb);
545 		break;
546 	case WMI_TLV_DIAG_EVENTID:
547 		ath10k_wmi_tlv_event_diag(ar, skb);
548 		break;
549 	case WMI_TLV_P2P_NOA_EVENTID:
550 		ath10k_wmi_tlv_event_p2p_noa(ar, skb);
551 		break;
552 	case WMI_TLV_TX_PAUSE_EVENTID:
553 		ath10k_wmi_tlv_event_tx_pause(ar, skb);
554 		break;
555 	default:
556 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
557 		break;
558 	}
559 
560 out:
561 	dev_kfree_skb(skb);
562 }
563 
564 static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
565 					  struct sk_buff *skb,
566 					  struct wmi_scan_ev_arg *arg)
567 {
568 	const void **tb;
569 	const struct wmi_scan_event *ev;
570 	int ret;
571 
572 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
573 	if (IS_ERR(tb)) {
574 		ret = PTR_ERR(tb);
575 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
576 		return ret;
577 	}
578 
579 	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
580 	if (!ev) {
581 		kfree(tb);
582 		return -EPROTO;
583 	}
584 
585 	arg->event_type = ev->event_type;
586 	arg->reason = ev->reason;
587 	arg->channel_freq = ev->channel_freq;
588 	arg->scan_req_id = ev->scan_req_id;
589 	arg->scan_id = ev->scan_id;
590 	arg->vdev_id = ev->vdev_id;
591 
592 	kfree(tb);
593 	return 0;
594 }
595 
596 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
597 					     struct sk_buff *skb,
598 					     struct wmi_mgmt_rx_ev_arg *arg)
599 {
600 	const void **tb;
601 	const struct wmi_tlv_mgmt_rx_ev *ev;
602 	const u8 *frame;
603 	u32 msdu_len;
604 	int ret;
605 
606 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
607 	if (IS_ERR(tb)) {
608 		ret = PTR_ERR(tb);
609 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
610 		return ret;
611 	}
612 
613 	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
614 	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
615 
616 	if (!ev || !frame) {
617 		kfree(tb);
618 		return -EPROTO;
619 	}
620 
621 	arg->channel = ev->channel;
622 	arg->buf_len = ev->buf_len;
623 	arg->status = ev->status;
624 	arg->snr = ev->snr;
625 	arg->phy_mode = ev->phy_mode;
626 	arg->rate = ev->rate;
627 
628 	msdu_len = __le32_to_cpu(arg->buf_len);
629 
630 	if (skb->len < (frame - skb->data) + msdu_len) {
631 		kfree(tb);
632 		return -EPROTO;
633 	}
634 
635 	/* shift the sk_buff to point to `frame` */
636 	skb_trim(skb, 0);
637 	skb_put(skb, frame - skb->data);
638 	skb_pull(skb, frame - skb->data);
639 	skb_put(skb, msdu_len);
640 
641 	kfree(tb);
642 	return 0;
643 }
644 
645 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
646 					     struct sk_buff *skb,
647 					     struct wmi_ch_info_ev_arg *arg)
648 {
649 	const void **tb;
650 	const struct wmi_chan_info_event *ev;
651 	int ret;
652 
653 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
654 	if (IS_ERR(tb)) {
655 		ret = PTR_ERR(tb);
656 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
657 		return ret;
658 	}
659 
660 	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
661 	if (!ev) {
662 		kfree(tb);
663 		return -EPROTO;
664 	}
665 
666 	arg->err_code = ev->err_code;
667 	arg->freq = ev->freq;
668 	arg->cmd_flags = ev->cmd_flags;
669 	arg->noise_floor = ev->noise_floor;
670 	arg->rx_clear_count = ev->rx_clear_count;
671 	arg->cycle_count = ev->cycle_count;
672 
673 	kfree(tb);
674 	return 0;
675 }
676 
677 static int
678 ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
679 				     struct wmi_vdev_start_ev_arg *arg)
680 {
681 	const void **tb;
682 	const struct wmi_vdev_start_response_event *ev;
683 	int ret;
684 
685 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
686 	if (IS_ERR(tb)) {
687 		ret = PTR_ERR(tb);
688 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
689 		return ret;
690 	}
691 
692 	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
693 	if (!ev) {
694 		kfree(tb);
695 		return -EPROTO;
696 	}
697 
698 	skb_pull(skb, sizeof(*ev));
699 	arg->vdev_id = ev->vdev_id;
700 	arg->req_id = ev->req_id;
701 	arg->resp_type = ev->resp_type;
702 	arg->status = ev->status;
703 
704 	kfree(tb);
705 	return 0;
706 }
707 
708 static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
709 					       struct sk_buff *skb,
710 					       struct wmi_peer_kick_ev_arg *arg)
711 {
712 	const void **tb;
713 	const struct wmi_peer_sta_kickout_event *ev;
714 	int ret;
715 
716 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
717 	if (IS_ERR(tb)) {
718 		ret = PTR_ERR(tb);
719 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
720 		return ret;
721 	}
722 
723 	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
724 	if (!ev) {
725 		kfree(tb);
726 		return -EPROTO;
727 	}
728 
729 	arg->mac_addr = ev->peer_macaddr.addr;
730 
731 	kfree(tb);
732 	return 0;
733 }
734 
735 struct wmi_tlv_swba_parse {
736 	const struct wmi_host_swba_event *ev;
737 	bool tim_done;
738 	bool noa_done;
739 	size_t n_tim;
740 	size_t n_noa;
741 	struct wmi_swba_ev_arg *arg;
742 };
743 
744 static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
745 					 const void *ptr, void *data)
746 {
747 	struct wmi_tlv_swba_parse *swba = data;
748 	struct wmi_tim_info_arg *tim_info_arg;
749 	const struct wmi_tim_info *tim_info_ev = ptr;
750 
751 	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
752 		return -EPROTO;
753 
754 	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
755 		return -ENOBUFS;
756 
757 	if (__le32_to_cpu(tim_info_ev->tim_len) >
758 	     sizeof(tim_info_ev->tim_bitmap)) {
759 		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
760 		return -EPROTO;
761 	}
762 
763 	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
764 	tim_info_arg->tim_len = tim_info_ev->tim_len;
765 	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
766 	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
767 	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
768 	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
769 
770 	swba->n_tim++;
771 
772 	return 0;
773 }
774 
775 static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
776 					 const void *ptr, void *data)
777 {
778 	struct wmi_tlv_swba_parse *swba = data;
779 
780 	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
781 		return -EPROTO;
782 
783 	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
784 		return -ENOBUFS;
785 
786 	swba->arg->noa_info[swba->n_noa++] = ptr;
787 	return 0;
788 }
789 
790 static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
791 				     const void *ptr, void *data)
792 {
793 	struct wmi_tlv_swba_parse *swba = data;
794 	int ret;
795 
796 	switch (tag) {
797 	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
798 		swba->ev = ptr;
799 		break;
800 	case WMI_TLV_TAG_ARRAY_STRUCT:
801 		if (!swba->tim_done) {
802 			swba->tim_done = true;
803 			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
804 						  ath10k_wmi_tlv_swba_tim_parse,
805 						  swba);
806 			if (ret)
807 				return ret;
808 		} else if (!swba->noa_done) {
809 			swba->noa_done = true;
810 			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
811 						  ath10k_wmi_tlv_swba_noa_parse,
812 						  swba);
813 			if (ret)
814 				return ret;
815 		}
816 		break;
817 	default:
818 		break;
819 	}
820 	return 0;
821 }
822 
823 static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
824 					  struct sk_buff *skb,
825 					  struct wmi_swba_ev_arg *arg)
826 {
827 	struct wmi_tlv_swba_parse swba = { .arg = arg };
828 	u32 map;
829 	size_t n_vdevs;
830 	int ret;
831 
832 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
833 				  ath10k_wmi_tlv_swba_parse, &swba);
834 	if (ret) {
835 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
836 		return ret;
837 	}
838 
839 	if (!swba.ev)
840 		return -EPROTO;
841 
842 	arg->vdev_map = swba.ev->vdev_map;
843 
844 	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
845 		if (map & BIT(0))
846 			n_vdevs++;
847 
848 	if (n_vdevs != swba.n_tim ||
849 	    n_vdevs != swba.n_noa)
850 		return -EPROTO;
851 
852 	return 0;
853 }
854 
855 static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
856 						struct sk_buff *skb,
857 						struct wmi_phyerr_hdr_arg *arg)
858 {
859 	const void **tb;
860 	const struct wmi_tlv_phyerr_ev *ev;
861 	const void *phyerrs;
862 	int ret;
863 
864 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
865 	if (IS_ERR(tb)) {
866 		ret = PTR_ERR(tb);
867 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
868 		return ret;
869 	}
870 
871 	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
872 	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
873 
874 	if (!ev || !phyerrs) {
875 		kfree(tb);
876 		return -EPROTO;
877 	}
878 
879 	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
880 	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
881 	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
882 	arg->buf_len = __le32_to_cpu(ev->buf_len);
883 	arg->phyerrs = phyerrs;
884 
885 	kfree(tb);
886 	return 0;
887 }
888 
889 #define WMI_TLV_ABI_VER_NS0 0x5F414351
890 #define WMI_TLV_ABI_VER_NS1 0x00004C4D
891 #define WMI_TLV_ABI_VER_NS2 0x00000000
892 #define WMI_TLV_ABI_VER_NS3 0x00000000
893 
894 #define WMI_TLV_ABI_VER0_MAJOR 1
895 #define WMI_TLV_ABI_VER0_MINOR 0
896 #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
897 			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
898 #define WMI_TLV_ABI_VER1 53
899 
900 static int
901 ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
902 			      const void *ptr, void *data)
903 {
904 	struct wmi_svc_rdy_ev_arg *arg = data;
905 	int i;
906 
907 	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
908 		return -EPROTO;
909 
910 	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
911 		if (!arg->mem_reqs[i]) {
912 			arg->mem_reqs[i] = ptr;
913 			return 0;
914 		}
915 	}
916 
917 	return -ENOMEM;
918 }
919 
920 struct wmi_tlv_svc_rdy_parse {
921 	const struct hal_reg_capabilities *reg;
922 	const struct wmi_tlv_svc_rdy_ev *ev;
923 	const __le32 *svc_bmap;
924 	const struct wlan_host_mem_req *mem_reqs;
925 	bool svc_bmap_done;
926 	bool dbs_hw_mode_done;
927 };
928 
929 static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
930 					const void *ptr, void *data)
931 {
932 	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
933 
934 	switch (tag) {
935 	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
936 		svc_rdy->ev = ptr;
937 		break;
938 	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
939 		svc_rdy->reg = ptr;
940 		break;
941 	case WMI_TLV_TAG_ARRAY_STRUCT:
942 		svc_rdy->mem_reqs = ptr;
943 		break;
944 	case WMI_TLV_TAG_ARRAY_UINT32:
945 		if (!svc_rdy->svc_bmap_done) {
946 			svc_rdy->svc_bmap_done = true;
947 			svc_rdy->svc_bmap = ptr;
948 		} else if (!svc_rdy->dbs_hw_mode_done) {
949 			svc_rdy->dbs_hw_mode_done = true;
950 		}
951 		break;
952 	default:
953 		break;
954 	}
955 	return 0;
956 }
957 
958 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
959 					     struct sk_buff *skb,
960 					     struct wmi_svc_rdy_ev_arg *arg)
961 {
962 	const struct hal_reg_capabilities *reg;
963 	const struct wmi_tlv_svc_rdy_ev *ev;
964 	const __le32 *svc_bmap;
965 	const struct wlan_host_mem_req *mem_reqs;
966 	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
967 	int ret;
968 
969 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
970 				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
971 	if (ret) {
972 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
973 		return ret;
974 	}
975 
976 	ev = svc_rdy.ev;
977 	reg = svc_rdy.reg;
978 	svc_bmap = svc_rdy.svc_bmap;
979 	mem_reqs = svc_rdy.mem_reqs;
980 
981 	if (!ev || !reg || !svc_bmap || !mem_reqs)
982 		return -EPROTO;
983 
984 	/* This is an internal ABI compatibility check for WMI TLV so check it
985 	 * here instead of the generic WMI code.
986 	 */
987 	ath10k_dbg(ar, ATH10K_DBG_WMI,
988 		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
989 		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
990 		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
991 		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
992 		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
993 		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
994 
995 	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
996 	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
997 	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
998 	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
999 	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
1000 		return -ENOTSUPP;
1001 	}
1002 
1003 	arg->min_tx_power = ev->hw_min_tx_power;
1004 	arg->max_tx_power = ev->hw_max_tx_power;
1005 	arg->ht_cap = ev->ht_cap_info;
1006 	arg->vht_cap = ev->vht_cap_info;
1007 	arg->sw_ver0 = ev->abi.abi_ver0;
1008 	arg->sw_ver1 = ev->abi.abi_ver1;
1009 	arg->fw_build = ev->fw_build_vers;
1010 	arg->phy_capab = ev->phy_capability;
1011 	arg->num_rf_chains = ev->num_rf_chains;
1012 	arg->eeprom_rd = reg->eeprom_rd;
1013 	arg->num_mem_reqs = ev->num_mem_reqs;
1014 	arg->service_map = svc_bmap;
1015 	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
1016 
1017 	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
1018 				  ath10k_wmi_tlv_parse_mem_reqs, arg);
1019 	if (ret) {
1020 		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
1021 		return ret;
1022 	}
1023 
1024 	return 0;
1025 }
1026 
1027 static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
1028 					 struct sk_buff *skb,
1029 					 struct wmi_rdy_ev_arg *arg)
1030 {
1031 	const void **tb;
1032 	const struct wmi_tlv_rdy_ev *ev;
1033 	int ret;
1034 
1035 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1036 	if (IS_ERR(tb)) {
1037 		ret = PTR_ERR(tb);
1038 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1039 		return ret;
1040 	}
1041 
1042 	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
1043 	if (!ev) {
1044 		kfree(tb);
1045 		return -EPROTO;
1046 	}
1047 
1048 	arg->sw_version = ev->abi.abi_ver0;
1049 	arg->abi_version = ev->abi.abi_ver1;
1050 	arg->status = ev->status;
1051 	arg->mac_addr = ev->mac_addr.addr;
1052 
1053 	kfree(tb);
1054 	return 0;
1055 }
1056 
1057 static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
1058 					   struct ath10k_fw_stats_vdev *dst)
1059 {
1060 	int i;
1061 
1062 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
1063 	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
1064 	dst->data_snr = __le32_to_cpu(src->data_snr);
1065 	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
1066 	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
1067 	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
1068 	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
1069 	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
1070 	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
1071 
1072 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
1073 		dst->num_tx_frames[i] =
1074 			__le32_to_cpu(src->num_tx_frames[i]);
1075 
1076 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
1077 		dst->num_tx_frames_retries[i] =
1078 			__le32_to_cpu(src->num_tx_frames_retries[i]);
1079 
1080 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
1081 		dst->num_tx_frames_failures[i] =
1082 			__le32_to_cpu(src->num_tx_frames_failures[i]);
1083 
1084 	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
1085 		dst->tx_rate_history[i] =
1086 			__le32_to_cpu(src->tx_rate_history[i]);
1087 
1088 	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
1089 		dst->beacon_rssi_history[i] =
1090 			__le32_to_cpu(src->beacon_rssi_history[i]);
1091 }
1092 
1093 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
1094 					   struct sk_buff *skb,
1095 					   struct ath10k_fw_stats *stats)
1096 {
1097 	const void **tb;
1098 	const struct wmi_tlv_stats_ev *ev;
1099 	const void *data;
1100 	u32 num_pdev_stats;
1101 	u32 num_vdev_stats;
1102 	u32 num_peer_stats;
1103 	u32 num_bcnflt_stats;
1104 	u32 num_chan_stats;
1105 	size_t data_len;
1106 	int ret;
1107 	int i;
1108 
1109 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1110 	if (IS_ERR(tb)) {
1111 		ret = PTR_ERR(tb);
1112 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1113 		return ret;
1114 	}
1115 
1116 	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
1117 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
1118 
1119 	if (!ev || !data) {
1120 		kfree(tb);
1121 		return -EPROTO;
1122 	}
1123 
1124 	data_len = ath10k_wmi_tlv_len(data);
1125 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
1126 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
1127 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
1128 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
1129 	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
1130 
1131 	ath10k_dbg(ar, ATH10K_DBG_WMI,
1132 		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
1133 		   num_pdev_stats, num_vdev_stats, num_peer_stats,
1134 		   num_bcnflt_stats, num_chan_stats);
1135 
1136 	for (i = 0; i < num_pdev_stats; i++) {
1137 		const struct wmi_pdev_stats *src;
1138 		struct ath10k_fw_stats_pdev *dst;
1139 
1140 		src = data;
1141 		if (data_len < sizeof(*src)) {
1142 			kfree(tb);
1143 			return -EPROTO;
1144 		}
1145 
1146 		data += sizeof(*src);
1147 		data_len -= sizeof(*src);
1148 
1149 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1150 		if (!dst)
1151 			continue;
1152 
1153 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
1154 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
1155 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
1156 		list_add_tail(&dst->list, &stats->pdevs);
1157 	}
1158 
1159 	for (i = 0; i < num_vdev_stats; i++) {
1160 		const struct wmi_tlv_vdev_stats *src;
1161 		struct ath10k_fw_stats_vdev *dst;
1162 
1163 		src = data;
1164 		if (data_len < sizeof(*src)) {
1165 			kfree(tb);
1166 			return -EPROTO;
1167 		}
1168 
1169 		data += sizeof(*src);
1170 		data_len -= sizeof(*src);
1171 
1172 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1173 		if (!dst)
1174 			continue;
1175 
1176 		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
1177 		list_add_tail(&dst->list, &stats->vdevs);
1178 	}
1179 
1180 	for (i = 0; i < num_peer_stats; i++) {
1181 		const struct wmi_10x_peer_stats *src;
1182 		struct ath10k_fw_stats_peer *dst;
1183 
1184 		src = data;
1185 		if (data_len < sizeof(*src)) {
1186 			kfree(tb);
1187 			return -EPROTO;
1188 		}
1189 
1190 		data += sizeof(*src);
1191 		data_len -= sizeof(*src);
1192 
1193 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1194 		if (!dst)
1195 			continue;
1196 
1197 		ath10k_wmi_pull_peer_stats(&src->old, dst);
1198 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1199 		list_add_tail(&dst->list, &stats->peers);
1200 	}
1201 
1202 	kfree(tb);
1203 	return 0;
1204 }
1205 
1206 static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
1207 					  struct sk_buff *skb,
1208 					  struct wmi_roam_ev_arg *arg)
1209 {
1210 	const void **tb;
1211 	const struct wmi_tlv_roam_ev *ev;
1212 	int ret;
1213 
1214 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1215 	if (IS_ERR(tb)) {
1216 		ret = PTR_ERR(tb);
1217 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1218 		return ret;
1219 	}
1220 
1221 	ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
1222 	if (!ev) {
1223 		kfree(tb);
1224 		return -EPROTO;
1225 	}
1226 
1227 	arg->vdev_id = ev->vdev_id;
1228 	arg->reason = ev->reason;
1229 	arg->rssi = ev->rssi;
1230 
1231 	kfree(tb);
1232 	return 0;
1233 }
1234 
1235 static int
1236 ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
1237 			      struct wmi_wow_ev_arg *arg)
1238 {
1239 	const void **tb;
1240 	const struct wmi_tlv_wow_event_info *ev;
1241 	int ret;
1242 
1243 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1244 	if (IS_ERR(tb)) {
1245 		ret = PTR_ERR(tb);
1246 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1247 		return ret;
1248 	}
1249 
1250 	ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
1251 	if (!ev) {
1252 		kfree(tb);
1253 		return -EPROTO;
1254 	}
1255 
1256 	arg->vdev_id = __le32_to_cpu(ev->vdev_id);
1257 	arg->flag = __le32_to_cpu(ev->flag);
1258 	arg->wake_reason = __le32_to_cpu(ev->wake_reason);
1259 	arg->data_len = __le32_to_cpu(ev->data_len);
1260 
1261 	kfree(tb);
1262 	return 0;
1263 }
1264 
1265 static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
1266 					  struct sk_buff *skb,
1267 					  struct wmi_echo_ev_arg *arg)
1268 {
1269 	const void **tb;
1270 	const struct wmi_echo_event *ev;
1271 	int ret;
1272 
1273 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
1274 	if (IS_ERR(tb)) {
1275 		ret = PTR_ERR(tb);
1276 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
1277 		return ret;
1278 	}
1279 
1280 	ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
1281 	if (!ev) {
1282 		kfree(tb);
1283 		return -EPROTO;
1284 	}
1285 
1286 	arg->value = ev->value;
1287 
1288 	kfree(tb);
1289 	return 0;
1290 }
1291 
1292 static struct sk_buff *
1293 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1294 {
1295 	struct wmi_tlv_pdev_suspend *cmd;
1296 	struct wmi_tlv *tlv;
1297 	struct sk_buff *skb;
1298 
1299 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1300 	if (!skb)
1301 		return ERR_PTR(-ENOMEM);
1302 
1303 	tlv = (void *)skb->data;
1304 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1305 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1306 	cmd = (void *)tlv->value;
1307 	cmd->opt = __cpu_to_le32(opt);
1308 
1309 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1310 	return skb;
1311 }
1312 
1313 static struct sk_buff *
1314 ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1315 {
1316 	struct wmi_tlv_resume_cmd *cmd;
1317 	struct wmi_tlv *tlv;
1318 	struct sk_buff *skb;
1319 
1320 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1321 	if (!skb)
1322 		return ERR_PTR(-ENOMEM);
1323 
1324 	tlv = (void *)skb->data;
1325 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1326 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1327 	cmd = (void *)tlv->value;
1328 	cmd->reserved = __cpu_to_le32(0);
1329 
1330 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1331 	return skb;
1332 }
1333 
1334 static struct sk_buff *
1335 ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1336 				  u16 rd, u16 rd2g, u16 rd5g,
1337 				  u16 ctl2g, u16 ctl5g,
1338 				  enum wmi_dfs_region dfs_reg)
1339 {
1340 	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1341 	struct wmi_tlv *tlv;
1342 	struct sk_buff *skb;
1343 
1344 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1345 	if (!skb)
1346 		return ERR_PTR(-ENOMEM);
1347 
1348 	tlv = (void *)skb->data;
1349 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1350 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1351 	cmd = (void *)tlv->value;
1352 	cmd->regd = __cpu_to_le32(rd);
1353 	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1354 	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1355 	cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
1356 	cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
1357 
1358 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1359 	return skb;
1360 }
1361 
1362 static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
1363 {
1364 	return WMI_TXBF_CONF_AFTER_ASSOC;
1365 }
1366 
1367 static struct sk_buff *
1368 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1369 				     u32 param_value)
1370 {
1371 	struct wmi_tlv_pdev_set_param_cmd *cmd;
1372 	struct wmi_tlv *tlv;
1373 	struct sk_buff *skb;
1374 
1375 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1376 	if (!skb)
1377 		return ERR_PTR(-ENOMEM);
1378 
1379 	tlv = (void *)skb->data;
1380 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1381 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1382 	cmd = (void *)tlv->value;
1383 	cmd->param_id = __cpu_to_le32(param_id);
1384 	cmd->param_value = __cpu_to_le32(param_value);
1385 
1386 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
1387 	return skb;
1388 }
1389 
1390 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1391 {
1392 	struct sk_buff *skb;
1393 	struct wmi_tlv *tlv;
1394 	struct wmi_tlv_init_cmd *cmd;
1395 	struct wmi_tlv_resource_config *cfg;
1396 	struct wmi_host_mem_chunks *chunks;
1397 	size_t len, chunks_len;
1398 	void *ptr;
1399 
1400 	chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
1401 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1402 	      (sizeof(*tlv) + sizeof(*cfg)) +
1403 	      (sizeof(*tlv) + chunks_len);
1404 
1405 	skb = ath10k_wmi_alloc_skb(ar, len);
1406 	if (!skb)
1407 		return ERR_PTR(-ENOMEM);
1408 
1409 	ptr = skb->data;
1410 
1411 	tlv = ptr;
1412 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1413 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1414 	cmd = (void *)tlv->value;
1415 	ptr += sizeof(*tlv);
1416 	ptr += sizeof(*cmd);
1417 
1418 	tlv = ptr;
1419 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1420 	tlv->len = __cpu_to_le16(sizeof(*cfg));
1421 	cfg = (void *)tlv->value;
1422 	ptr += sizeof(*tlv);
1423 	ptr += sizeof(*cfg);
1424 
1425 	tlv = ptr;
1426 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1427 	tlv->len = __cpu_to_le16(chunks_len);
1428 	chunks = (void *)tlv->value;
1429 
1430 	ptr += sizeof(*tlv);
1431 	ptr += chunks_len;
1432 
1433 	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1434 	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1435 	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1436 	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1437 	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1438 	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1439 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1440 
1441 	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1442 
1443 	cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
1444 	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
1445 	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
1446 
1447 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1448 		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1449 		cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1450 	} else {
1451 		cfg->num_offload_peers = __cpu_to_le32(0);
1452 		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1453 	}
1454 
1455 	cfg->num_peer_keys = __cpu_to_le32(2);
1456 	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1457 	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1458 	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1459 	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1460 	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1461 	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1462 	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1463 	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
1464 	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1465 	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1466 	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1467 	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1468 	cfg->num_mcast_groups = __cpu_to_le32(0);
1469 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1470 	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1471 	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1472 	cfg->dma_burst_size = __cpu_to_le32(0);
1473 	cfg->mac_aggr_delim = __cpu_to_le32(0);
1474 	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1475 	cfg->vow_config = __cpu_to_le32(0);
1476 	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1477 	cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
1478 	cfg->max_frag_entries = __cpu_to_le32(2);
1479 	cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
1480 	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1481 	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1482 	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1483 	cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
1484 	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1485 	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1486 	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1487 	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1488 
1489 	ath10k_wmi_put_host_mem_chunks(ar, chunks);
1490 
1491 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1492 	return skb;
1493 }
1494 
1495 static struct sk_buff *
1496 ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1497 				 const struct wmi_start_scan_arg *arg)
1498 {
1499 	struct wmi_tlv_start_scan_cmd *cmd;
1500 	struct wmi_tlv *tlv;
1501 	struct sk_buff *skb;
1502 	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1503 	__le32 *chans;
1504 	struct wmi_ssid *ssids;
1505 	struct wmi_mac_addr *addrs;
1506 	void *ptr;
1507 	int i, ret;
1508 
1509 	ret = ath10k_wmi_start_scan_verify(arg);
1510 	if (ret)
1511 		return ERR_PTR(ret);
1512 
1513 	chan_len = arg->n_channels * sizeof(__le32);
1514 	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1515 	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1516 	ie_len = roundup(arg->ie_len, 4);
1517 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1518 	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
1519 	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
1520 	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
1521 	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
1522 
1523 	skb = ath10k_wmi_alloc_skb(ar, len);
1524 	if (!skb)
1525 		return ERR_PTR(-ENOMEM);
1526 
1527 	ptr = (void *)skb->data;
1528 	tlv = ptr;
1529 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
1530 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1531 	cmd = (void *)tlv->value;
1532 
1533 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
1534 	cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
1535 	cmd->num_channels = __cpu_to_le32(arg->n_channels);
1536 	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
1537 	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
1538 	cmd->ie_len = __cpu_to_le32(arg->ie_len);
1539 	cmd->num_probes = __cpu_to_le32(3);
1540 
1541 	/* FIXME: There are some scan flag inconsistencies across firmwares,
1542 	 * e.g. WMI-TLV inverts the logic behind the following flag.
1543 	 */
1544 	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
1545 
1546 	ptr += sizeof(*tlv);
1547 	ptr += sizeof(*cmd);
1548 
1549 	tlv = ptr;
1550 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
1551 	tlv->len = __cpu_to_le16(chan_len);
1552 	chans = (void *)tlv->value;
1553 	for (i = 0; i < arg->n_channels; i++)
1554 		chans[i] = __cpu_to_le32(arg->channels[i]);
1555 
1556 	ptr += sizeof(*tlv);
1557 	ptr += chan_len;
1558 
1559 	tlv = ptr;
1560 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1561 	tlv->len = __cpu_to_le16(ssid_len);
1562 	ssids = (void *)tlv->value;
1563 	for (i = 0; i < arg->n_ssids; i++) {
1564 		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
1565 		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
1566 	}
1567 
1568 	ptr += sizeof(*tlv);
1569 	ptr += ssid_len;
1570 
1571 	tlv = ptr;
1572 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1573 	tlv->len = __cpu_to_le16(bssid_len);
1574 	addrs = (void *)tlv->value;
1575 	for (i = 0; i < arg->n_bssids; i++)
1576 		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
1577 
1578 	ptr += sizeof(*tlv);
1579 	ptr += bssid_len;
1580 
1581 	tlv = ptr;
1582 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1583 	tlv->len = __cpu_to_le16(ie_len);
1584 	memcpy(tlv->value, arg->ie, arg->ie_len);
1585 
1586 	ptr += sizeof(*tlv);
1587 	ptr += ie_len;
1588 
1589 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
1590 	return skb;
1591 }
1592 
1593 static struct sk_buff *
1594 ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
1595 				const struct wmi_stop_scan_arg *arg)
1596 {
1597 	struct wmi_stop_scan_cmd *cmd;
1598 	struct wmi_tlv *tlv;
1599 	struct sk_buff *skb;
1600 	u32 scan_id;
1601 	u32 req_id;
1602 
1603 	if (arg->req_id > 0xFFF)
1604 		return ERR_PTR(-EINVAL);
1605 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1606 		return ERR_PTR(-EINVAL);
1607 
1608 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1609 	if (!skb)
1610 		return ERR_PTR(-ENOMEM);
1611 
1612 	scan_id = arg->u.scan_id;
1613 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1614 
1615 	req_id = arg->req_id;
1616 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1617 
1618 	tlv = (void *)skb->data;
1619 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
1620 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1621 	cmd = (void *)tlv->value;
1622 	cmd->req_type = __cpu_to_le32(arg->req_type);
1623 	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1624 	cmd->scan_id = __cpu_to_le32(scan_id);
1625 	cmd->scan_req_id = __cpu_to_le32(req_id);
1626 
1627 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
1628 	return skb;
1629 }
1630 
1631 static struct sk_buff *
1632 ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
1633 				  u32 vdev_id,
1634 				  enum wmi_vdev_type vdev_type,
1635 				  enum wmi_vdev_subtype vdev_subtype,
1636 				  const u8 mac_addr[ETH_ALEN])
1637 {
1638 	struct wmi_vdev_create_cmd *cmd;
1639 	struct wmi_tlv *tlv;
1640 	struct sk_buff *skb;
1641 
1642 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1643 	if (!skb)
1644 		return ERR_PTR(-ENOMEM);
1645 
1646 	tlv = (void *)skb->data;
1647 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
1648 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1649 	cmd = (void *)tlv->value;
1650 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1651 	cmd->vdev_type = __cpu_to_le32(vdev_type);
1652 	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
1653 	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
1654 
1655 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
1656 	return skb;
1657 }
1658 
1659 static struct sk_buff *
1660 ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
1661 {
1662 	struct wmi_vdev_delete_cmd *cmd;
1663 	struct wmi_tlv *tlv;
1664 	struct sk_buff *skb;
1665 
1666 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1667 	if (!skb)
1668 		return ERR_PTR(-ENOMEM);
1669 
1670 	tlv = (void *)skb->data;
1671 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
1672 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1673 	cmd = (void *)tlv->value;
1674 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1675 
1676 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
1677 	return skb;
1678 }
1679 
1680 static struct sk_buff *
1681 ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
1682 				 const struct wmi_vdev_start_request_arg *arg,
1683 				 bool restart)
1684 {
1685 	struct wmi_tlv_vdev_start_cmd *cmd;
1686 	struct wmi_channel *ch;
1687 	struct wmi_p2p_noa_descriptor *noa;
1688 	struct wmi_tlv *tlv;
1689 	struct sk_buff *skb;
1690 	size_t len;
1691 	void *ptr;
1692 	u32 flags = 0;
1693 
1694 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1695 		return ERR_PTR(-EINVAL);
1696 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1697 		return ERR_PTR(-EINVAL);
1698 
1699 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1700 	      (sizeof(*tlv) + sizeof(*ch)) +
1701 	      (sizeof(*tlv) + 0);
1702 	skb = ath10k_wmi_alloc_skb(ar, len);
1703 	if (!skb)
1704 		return ERR_PTR(-ENOMEM);
1705 
1706 	if (arg->hidden_ssid)
1707 		flags |= WMI_VDEV_START_HIDDEN_SSID;
1708 	if (arg->pmf_enabled)
1709 		flags |= WMI_VDEV_START_PMF_ENABLED;
1710 
1711 	ptr = (void *)skb->data;
1712 
1713 	tlv = ptr;
1714 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
1715 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1716 	cmd = (void *)tlv->value;
1717 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1718 	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
1719 	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1720 	cmd->flags = __cpu_to_le32(flags);
1721 	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1722 	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1723 	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1724 
1725 	if (arg->ssid) {
1726 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1727 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1728 	}
1729 
1730 	ptr += sizeof(*tlv);
1731 	ptr += sizeof(*cmd);
1732 
1733 	tlv = ptr;
1734 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
1735 	tlv->len = __cpu_to_le16(sizeof(*ch));
1736 	ch = (void *)tlv->value;
1737 	ath10k_wmi_put_wmi_channel(ch, &arg->channel);
1738 
1739 	ptr += sizeof(*tlv);
1740 	ptr += sizeof(*ch);
1741 
1742 	tlv = ptr;
1743 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1744 	tlv->len = 0;
1745 	noa = (void *)tlv->value;
1746 
1747 	/* Note: This is a nested TLV containing:
1748 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1749 	 */
1750 
1751 	ptr += sizeof(*tlv);
1752 	ptr += 0;
1753 
1754 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
1755 	return skb;
1756 }
1757 
1758 static struct sk_buff *
1759 ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
1760 {
1761 	struct wmi_vdev_stop_cmd *cmd;
1762 	struct wmi_tlv *tlv;
1763 	struct sk_buff *skb;
1764 
1765 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1766 	if (!skb)
1767 		return ERR_PTR(-ENOMEM);
1768 
1769 	tlv = (void *)skb->data;
1770 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
1771 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1772 	cmd = (void *)tlv->value;
1773 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1774 
1775 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
1776 	return skb;
1777 }
1778 
1779 static struct sk_buff *
1780 ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
1781 			      const u8 *bssid)
1782 
1783 {
1784 	struct wmi_vdev_up_cmd *cmd;
1785 	struct wmi_tlv *tlv;
1786 	struct sk_buff *skb;
1787 
1788 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1789 	if (!skb)
1790 		return ERR_PTR(-ENOMEM);
1791 
1792 	tlv = (void *)skb->data;
1793 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
1794 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1795 	cmd = (void *)tlv->value;
1796 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1797 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
1798 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1799 
1800 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
1801 	return skb;
1802 }
1803 
1804 static struct sk_buff *
1805 ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
1806 {
1807 	struct wmi_vdev_down_cmd *cmd;
1808 	struct wmi_tlv *tlv;
1809 	struct sk_buff *skb;
1810 
1811 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1812 	if (!skb)
1813 		return ERR_PTR(-ENOMEM);
1814 
1815 	tlv = (void *)skb->data;
1816 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
1817 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1818 	cmd = (void *)tlv->value;
1819 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1820 
1821 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
1822 	return skb;
1823 }
1824 
1825 static struct sk_buff *
1826 ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1827 				     u32 param_id, u32 param_value)
1828 {
1829 	struct wmi_vdev_set_param_cmd *cmd;
1830 	struct wmi_tlv *tlv;
1831 	struct sk_buff *skb;
1832 
1833 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1834 	if (!skb)
1835 		return ERR_PTR(-ENOMEM);
1836 
1837 	tlv = (void *)skb->data;
1838 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
1839 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1840 	cmd = (void *)tlv->value;
1841 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1842 	cmd->param_id = __cpu_to_le32(param_id);
1843 	cmd->param_value = __cpu_to_le32(param_value);
1844 
1845 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
1846 	return skb;
1847 }
1848 
1849 static struct sk_buff *
1850 ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
1851 				       const struct wmi_vdev_install_key_arg *arg)
1852 {
1853 	struct wmi_vdev_install_key_cmd *cmd;
1854 	struct wmi_tlv *tlv;
1855 	struct sk_buff *skb;
1856 	size_t len;
1857 	void *ptr;
1858 
1859 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1860 		return ERR_PTR(-EINVAL);
1861 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1862 		return ERR_PTR(-EINVAL);
1863 
1864 	len = sizeof(*tlv) + sizeof(*cmd) +
1865 	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
1866 	skb = ath10k_wmi_alloc_skb(ar, len);
1867 	if (!skb)
1868 		return ERR_PTR(-ENOMEM);
1869 
1870 	ptr = (void *)skb->data;
1871 	tlv = ptr;
1872 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
1873 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1874 	cmd = (void *)tlv->value;
1875 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1876 	cmd->key_idx = __cpu_to_le32(arg->key_idx);
1877 	cmd->key_flags = __cpu_to_le32(arg->key_flags);
1878 	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1879 	cmd->key_len = __cpu_to_le32(arg->key_len);
1880 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1881 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1882 
1883 	if (arg->macaddr)
1884 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1885 
1886 	ptr += sizeof(*tlv);
1887 	ptr += sizeof(*cmd);
1888 
1889 	tlv = ptr;
1890 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1891 	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
1892 	if (arg->key_data)
1893 		memcpy(tlv->value, arg->key_data, arg->key_len);
1894 
1895 	ptr += sizeof(*tlv);
1896 	ptr += roundup(arg->key_len, sizeof(__le32));
1897 
1898 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
1899 	return skb;
1900 }
1901 
1902 static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
1903 					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
1904 {
1905 	struct wmi_sta_uapsd_auto_trig_param *ac;
1906 	struct wmi_tlv *tlv;
1907 
1908 	tlv = ptr;
1909 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
1910 	tlv->len = __cpu_to_le16(sizeof(*ac));
1911 	ac = (void *)tlv->value;
1912 
1913 	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
1914 	ac->user_priority = __cpu_to_le32(arg->user_priority);
1915 	ac->service_interval = __cpu_to_le32(arg->service_interval);
1916 	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
1917 	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
1918 
1919 	ath10k_dbg(ar, ATH10K_DBG_WMI,
1920 		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
1921 		   ac->wmm_ac, ac->user_priority, ac->service_interval,
1922 		   ac->suspend_interval, ac->delay_interval);
1923 
1924 	return ptr + sizeof(*tlv) + sizeof(*ac);
1925 }
1926 
1927 static struct sk_buff *
1928 ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
1929 				     const u8 peer_addr[ETH_ALEN],
1930 				     const struct wmi_sta_uapsd_auto_trig_arg *args,
1931 				     u32 num_ac)
1932 {
1933 	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
1934 	struct wmi_sta_uapsd_auto_trig_param *ac;
1935 	struct wmi_tlv *tlv;
1936 	struct sk_buff *skb;
1937 	size_t len;
1938 	size_t ac_tlv_len;
1939 	void *ptr;
1940 	int i;
1941 
1942 	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
1943 	len = sizeof(*tlv) + sizeof(*cmd) +
1944 	      sizeof(*tlv) + ac_tlv_len;
1945 	skb = ath10k_wmi_alloc_skb(ar, len);
1946 	if (!skb)
1947 		return ERR_PTR(-ENOMEM);
1948 
1949 	ptr = (void *)skb->data;
1950 	tlv = ptr;
1951 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
1952 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1953 	cmd = (void *)tlv->value;
1954 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1955 	cmd->num_ac = __cpu_to_le32(num_ac);
1956 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1957 
1958 	ptr += sizeof(*tlv);
1959 	ptr += sizeof(*cmd);
1960 
1961 	tlv = ptr;
1962 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1963 	tlv->len = __cpu_to_le16(ac_tlv_len);
1964 	ac = (void *)tlv->value;
1965 
1966 	ptr += sizeof(*tlv);
1967 	for (i = 0; i < num_ac; i++)
1968 		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
1969 
1970 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
1971 	return skb;
1972 }
1973 
1974 static void *ath10k_wmi_tlv_put_wmm(void *ptr,
1975 				    const struct wmi_wmm_params_arg *arg)
1976 {
1977 	struct wmi_wmm_params *wmm;
1978 	struct wmi_tlv *tlv;
1979 
1980 	tlv = ptr;
1981 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
1982 	tlv->len = __cpu_to_le16(sizeof(*wmm));
1983 	wmm = (void *)tlv->value;
1984 	ath10k_wmi_set_wmm_param(wmm, arg);
1985 
1986 	return ptr + sizeof(*tlv) + sizeof(*wmm);
1987 }
1988 
1989 static struct sk_buff *
1990 ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
1991 				    const struct wmi_wmm_params_all_arg *arg)
1992 {
1993 	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
1994 	struct wmi_tlv *tlv;
1995 	struct sk_buff *skb;
1996 	size_t len;
1997 	void *ptr;
1998 
1999 	len = sizeof(*tlv) + sizeof(*cmd);
2000 	skb = ath10k_wmi_alloc_skb(ar, len);
2001 	if (!skb)
2002 		return ERR_PTR(-ENOMEM);
2003 
2004 	ptr = (void *)skb->data;
2005 	tlv = ptr;
2006 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
2007 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2008 	cmd = (void *)tlv->value;
2009 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2010 
2011 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
2012 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
2013 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
2014 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
2015 
2016 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
2017 	return skb;
2018 }
2019 
2020 static struct sk_buff *
2021 ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
2022 				    const struct wmi_sta_keepalive_arg *arg)
2023 {
2024 	struct wmi_tlv_sta_keepalive_cmd *cmd;
2025 	struct wmi_sta_keepalive_arp_resp *arp;
2026 	struct sk_buff *skb;
2027 	struct wmi_tlv *tlv;
2028 	void *ptr;
2029 	size_t len;
2030 
2031 	len = sizeof(*tlv) + sizeof(*cmd) +
2032 	      sizeof(*tlv) + sizeof(*arp);
2033 	skb = ath10k_wmi_alloc_skb(ar, len);
2034 	if (!skb)
2035 		return ERR_PTR(-ENOMEM);
2036 
2037 	ptr = (void *)skb->data;
2038 	tlv = ptr;
2039 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
2040 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2041 	cmd = (void *)tlv->value;
2042 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2043 	cmd->enabled = __cpu_to_le32(arg->enabled);
2044 	cmd->method = __cpu_to_le32(arg->method);
2045 	cmd->interval = __cpu_to_le32(arg->interval);
2046 
2047 	ptr += sizeof(*tlv);
2048 	ptr += sizeof(*cmd);
2049 
2050 	tlv = ptr;
2051 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
2052 	tlv->len = __cpu_to_le16(sizeof(*arp));
2053 	arp = (void *)tlv->value;
2054 
2055 	arp->src_ip4_addr = arg->src_ip4_addr;
2056 	arp->dest_ip4_addr = arg->dest_ip4_addr;
2057 	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
2058 
2059 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
2060 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
2061 	return skb;
2062 }
2063 
2064 static struct sk_buff *
2065 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
2066 				  const u8 peer_addr[ETH_ALEN],
2067 				  enum wmi_peer_type peer_type)
2068 {
2069 	struct wmi_tlv_peer_create_cmd *cmd;
2070 	struct wmi_tlv *tlv;
2071 	struct sk_buff *skb;
2072 
2073 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2074 	if (!skb)
2075 		return ERR_PTR(-ENOMEM);
2076 
2077 	tlv = (void *)skb->data;
2078 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
2079 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2080 	cmd = (void *)tlv->value;
2081 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2082 	cmd->peer_type = __cpu_to_le32(peer_type);
2083 	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
2084 
2085 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
2086 	return skb;
2087 }
2088 
2089 static struct sk_buff *
2090 ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
2091 				  const u8 peer_addr[ETH_ALEN])
2092 {
2093 	struct wmi_peer_delete_cmd *cmd;
2094 	struct wmi_tlv *tlv;
2095 	struct sk_buff *skb;
2096 
2097 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2098 	if (!skb)
2099 		return ERR_PTR(-ENOMEM);
2100 
2101 	tlv = (void *)skb->data;
2102 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
2103 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2104 	cmd = (void *)tlv->value;
2105 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2106 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2107 
2108 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
2109 	return skb;
2110 }
2111 
2112 static struct sk_buff *
2113 ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
2114 				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
2115 {
2116 	struct wmi_peer_flush_tids_cmd *cmd;
2117 	struct wmi_tlv *tlv;
2118 	struct sk_buff *skb;
2119 
2120 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2121 	if (!skb)
2122 		return ERR_PTR(-ENOMEM);
2123 
2124 	tlv = (void *)skb->data;
2125 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
2126 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2127 	cmd = (void *)tlv->value;
2128 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2129 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
2130 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2131 
2132 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
2133 	return skb;
2134 }
2135 
2136 static struct sk_buff *
2137 ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
2138 				     const u8 *peer_addr,
2139 				     enum wmi_peer_param param_id,
2140 				     u32 param_value)
2141 {
2142 	struct wmi_peer_set_param_cmd *cmd;
2143 	struct wmi_tlv *tlv;
2144 	struct sk_buff *skb;
2145 
2146 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2147 	if (!skb)
2148 		return ERR_PTR(-ENOMEM);
2149 
2150 	tlv = (void *)skb->data;
2151 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
2152 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2153 	cmd = (void *)tlv->value;
2154 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2155 	cmd->param_id = __cpu_to_le32(param_id);
2156 	cmd->param_value = __cpu_to_le32(param_value);
2157 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
2158 
2159 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
2160 	return skb;
2161 }
2162 
2163 static struct sk_buff *
2164 ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
2165 				 const struct wmi_peer_assoc_complete_arg *arg)
2166 {
2167 	struct wmi_tlv_peer_assoc_cmd *cmd;
2168 	struct wmi_vht_rate_set *vht_rate;
2169 	struct wmi_tlv *tlv;
2170 	struct sk_buff *skb;
2171 	size_t len, legacy_rate_len, ht_rate_len;
2172 	void *ptr;
2173 
2174 	if (arg->peer_mpdu_density > 16)
2175 		return ERR_PTR(-EINVAL);
2176 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
2177 		return ERR_PTR(-EINVAL);
2178 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
2179 		return ERR_PTR(-EINVAL);
2180 
2181 	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
2182 				  sizeof(__le32));
2183 	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
2184 	len = (sizeof(*tlv) + sizeof(*cmd)) +
2185 	      (sizeof(*tlv) + legacy_rate_len) +
2186 	      (sizeof(*tlv) + ht_rate_len) +
2187 	      (sizeof(*tlv) + sizeof(*vht_rate));
2188 	skb = ath10k_wmi_alloc_skb(ar, len);
2189 	if (!skb)
2190 		return ERR_PTR(-ENOMEM);
2191 
2192 	ptr = (void *)skb->data;
2193 	tlv = ptr;
2194 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
2195 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2196 	cmd = (void *)tlv->value;
2197 
2198 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2199 	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
2200 	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
2201 	cmd->flags = __cpu_to_le32(arg->peer_flags);
2202 	cmd->caps = __cpu_to_le32(arg->peer_caps);
2203 	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
2204 	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
2205 	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
2206 	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
2207 	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
2208 	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
2209 	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
2210 	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
2211 	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
2212 	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
2213 	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
2214 
2215 	ptr += sizeof(*tlv);
2216 	ptr += sizeof(*cmd);
2217 
2218 	tlv = ptr;
2219 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2220 	tlv->len = __cpu_to_le16(legacy_rate_len);
2221 	memcpy(tlv->value, arg->peer_legacy_rates.rates,
2222 	       arg->peer_legacy_rates.num_rates);
2223 
2224 	ptr += sizeof(*tlv);
2225 	ptr += legacy_rate_len;
2226 
2227 	tlv = ptr;
2228 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2229 	tlv->len = __cpu_to_le16(ht_rate_len);
2230 	memcpy(tlv->value, arg->peer_ht_rates.rates,
2231 	       arg->peer_ht_rates.num_rates);
2232 
2233 	ptr += sizeof(*tlv);
2234 	ptr += ht_rate_len;
2235 
2236 	tlv = ptr;
2237 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
2238 	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
2239 	vht_rate = (void *)tlv->value;
2240 
2241 	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
2242 	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
2243 	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
2244 	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
2245 
2246 	ptr += sizeof(*tlv);
2247 	ptr += sizeof(*vht_rate);
2248 
2249 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
2250 	return skb;
2251 }
2252 
2253 static struct sk_buff *
2254 ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
2255 				 enum wmi_sta_ps_mode psmode)
2256 {
2257 	struct wmi_sta_powersave_mode_cmd *cmd;
2258 	struct wmi_tlv *tlv;
2259 	struct sk_buff *skb;
2260 
2261 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2262 	if (!skb)
2263 		return ERR_PTR(-ENOMEM);
2264 
2265 	tlv = (void *)skb->data;
2266 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
2267 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2268 	cmd = (void *)tlv->value;
2269 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2270 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
2271 
2272 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
2273 	return skb;
2274 }
2275 
2276 static struct sk_buff *
2277 ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
2278 				 enum wmi_sta_powersave_param param_id,
2279 				 u32 param_value)
2280 {
2281 	struct wmi_sta_powersave_param_cmd *cmd;
2282 	struct wmi_tlv *tlv;
2283 	struct sk_buff *skb;
2284 
2285 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2286 	if (!skb)
2287 		return ERR_PTR(-ENOMEM);
2288 
2289 	tlv = (void *)skb->data;
2290 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2291 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2292 	cmd = (void *)tlv->value;
2293 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2294 	cmd->param_id = __cpu_to_le32(param_id);
2295 	cmd->param_value = __cpu_to_le32(param_value);
2296 
2297 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2298 	return skb;
2299 }
2300 
2301 static struct sk_buff *
2302 ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2303 				enum wmi_ap_ps_peer_param param_id, u32 value)
2304 {
2305 	struct wmi_ap_ps_peer_cmd *cmd;
2306 	struct wmi_tlv *tlv;
2307 	struct sk_buff *skb;
2308 
2309 	if (!mac)
2310 		return ERR_PTR(-EINVAL);
2311 
2312 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2313 	if (!skb)
2314 		return ERR_PTR(-ENOMEM);
2315 
2316 	tlv = (void *)skb->data;
2317 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2318 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2319 	cmd = (void *)tlv->value;
2320 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2321 	cmd->param_id = __cpu_to_le32(param_id);
2322 	cmd->param_value = __cpu_to_le32(value);
2323 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2324 
2325 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2326 	return skb;
2327 }
2328 
2329 static struct sk_buff *
2330 ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2331 				     const struct wmi_scan_chan_list_arg *arg)
2332 {
2333 	struct wmi_tlv_scan_chan_list_cmd *cmd;
2334 	struct wmi_channel *ci;
2335 	struct wmi_channel_arg *ch;
2336 	struct wmi_tlv *tlv;
2337 	struct sk_buff *skb;
2338 	size_t chans_len, len;
2339 	int i;
2340 	void *ptr, *chans;
2341 
2342 	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2343 	len = (sizeof(*tlv) + sizeof(*cmd)) +
2344 	      (sizeof(*tlv) + chans_len);
2345 
2346 	skb = ath10k_wmi_alloc_skb(ar, len);
2347 	if (!skb)
2348 		return ERR_PTR(-ENOMEM);
2349 
2350 	ptr = (void *)skb->data;
2351 	tlv = ptr;
2352 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2353 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2354 	cmd = (void *)tlv->value;
2355 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2356 
2357 	ptr += sizeof(*tlv);
2358 	ptr += sizeof(*cmd);
2359 
2360 	tlv = ptr;
2361 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2362 	tlv->len = __cpu_to_le16(chans_len);
2363 	chans = (void *)tlv->value;
2364 
2365 	for (i = 0; i < arg->n_channels; i++) {
2366 		ch = &arg->channels[i];
2367 
2368 		tlv = chans;
2369 		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2370 		tlv->len = __cpu_to_le16(sizeof(*ci));
2371 		ci = (void *)tlv->value;
2372 
2373 		ath10k_wmi_put_wmi_channel(ci, ch);
2374 
2375 		chans += sizeof(*tlv);
2376 		chans += sizeof(*ci);
2377 	}
2378 
2379 	ptr += sizeof(*tlv);
2380 	ptr += chans_len;
2381 
2382 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2383 	return skb;
2384 }
2385 
2386 static struct sk_buff *
2387 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2388 				 const void *bcn, size_t bcn_len,
2389 				 u32 bcn_paddr, bool dtim_zero,
2390 				 bool deliver_cab)
2391 
2392 {
2393 	struct wmi_bcn_tx_ref_cmd *cmd;
2394 	struct wmi_tlv *tlv;
2395 	struct sk_buff *skb;
2396 	struct ieee80211_hdr *hdr;
2397 	u16 fc;
2398 
2399 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2400 	if (!skb)
2401 		return ERR_PTR(-ENOMEM);
2402 
2403 	hdr = (struct ieee80211_hdr *)bcn;
2404 	fc = le16_to_cpu(hdr->frame_control);
2405 
2406 	tlv = (void *)skb->data;
2407 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2408 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2409 	cmd = (void *)tlv->value;
2410 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2411 	cmd->data_len = __cpu_to_le32(bcn_len);
2412 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2413 	cmd->msdu_id = 0;
2414 	cmd->frame_control = __cpu_to_le32(fc);
2415 	cmd->flags = 0;
2416 
2417 	if (dtim_zero)
2418 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2419 
2420 	if (deliver_cab)
2421 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2422 
2423 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2424 	return skb;
2425 }
2426 
2427 static struct sk_buff *
2428 ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2429 				   const struct wmi_wmm_params_all_arg *arg)
2430 {
2431 	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2432 	struct wmi_wmm_params *wmm;
2433 	struct wmi_tlv *tlv;
2434 	struct sk_buff *skb;
2435 	size_t len;
2436 	void *ptr;
2437 
2438 	len = (sizeof(*tlv) + sizeof(*cmd)) +
2439 	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2440 	skb = ath10k_wmi_alloc_skb(ar, len);
2441 	if (!skb)
2442 		return ERR_PTR(-ENOMEM);
2443 
2444 	ptr = (void *)skb->data;
2445 
2446 	tlv = ptr;
2447 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2448 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2449 	cmd = (void *)tlv->value;
2450 
2451 	/* nothing to set here */
2452 
2453 	ptr += sizeof(*tlv);
2454 	ptr += sizeof(*cmd);
2455 
2456 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2457 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2458 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2459 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2460 
2461 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2462 	return skb;
2463 }
2464 
2465 static struct sk_buff *
2466 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2467 {
2468 	struct wmi_request_stats_cmd *cmd;
2469 	struct wmi_tlv *tlv;
2470 	struct sk_buff *skb;
2471 
2472 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2473 	if (!skb)
2474 		return ERR_PTR(-ENOMEM);
2475 
2476 	tlv = (void *)skb->data;
2477 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2478 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2479 	cmd = (void *)tlv->value;
2480 	cmd->stats_id = __cpu_to_le32(stats_mask);
2481 
2482 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
2483 	return skb;
2484 }
2485 
2486 static struct sk_buff *
2487 ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
2488 {
2489 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
2490 	struct wmi_tlv_mgmt_tx_cmd *cmd;
2491 	struct wmi_tlv *tlv;
2492 	struct ieee80211_hdr *hdr;
2493 	struct sk_buff *skb;
2494 	void *ptr;
2495 	int len;
2496 	u32 buf_len = msdu->len;
2497 	struct ath10k_vif *arvif;
2498 	dma_addr_t mgmt_frame_dma;
2499 	u32 vdev_id;
2500 
2501 	if (!cb->vif)
2502 		return ERR_PTR(-EINVAL);
2503 
2504 	hdr = (struct ieee80211_hdr *)msdu->data;
2505 	arvif = (void *)cb->vif->drv_priv;
2506 	vdev_id = arvif->vdev_id;
2507 
2508 	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
2509 		return ERR_PTR(-EINVAL);
2510 
2511 	len = sizeof(*cmd) + 2 * sizeof(*tlv);
2512 
2513 	if ((ieee80211_is_action(hdr->frame_control) ||
2514 	     ieee80211_is_deauth(hdr->frame_control) ||
2515 	     ieee80211_is_disassoc(hdr->frame_control)) &&
2516 	     ieee80211_has_protected(hdr->frame_control)) {
2517 		len += IEEE80211_CCMP_MIC_LEN;
2518 		buf_len += IEEE80211_CCMP_MIC_LEN;
2519 	}
2520 
2521 	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
2522 	buf_len = round_up(buf_len, 4);
2523 
2524 	len += buf_len;
2525 	len = round_up(len, 4);
2526 	skb = ath10k_wmi_alloc_skb(ar, len);
2527 	if (!skb)
2528 		return ERR_PTR(-ENOMEM);
2529 
2530 	ptr = (void *)skb->data;
2531 	tlv = ptr;
2532 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
2533 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2534 	cmd = (void *)tlv->value;
2535 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2536 	cmd->desc_id = 0;
2537 	cmd->chanfreq = 0;
2538 	cmd->buf_len = __cpu_to_le32(buf_len);
2539 	cmd->frame_len = __cpu_to_le32(msdu->len);
2540 	mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
2541 					msdu->len, DMA_TO_DEVICE);
2542 	if (!mgmt_frame_dma)
2543 		return ERR_PTR(-ENOMEM);
2544 
2545 	cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
2546 
2547 	ptr += sizeof(*tlv);
2548 	ptr += sizeof(*cmd);
2549 
2550 	tlv = ptr;
2551 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2552 	tlv->len = __cpu_to_le16(buf_len);
2553 
2554 	ptr += sizeof(*tlv);
2555 	memcpy(ptr, msdu->data, buf_len);
2556 
2557 	return skb;
2558 }
2559 
2560 static struct sk_buff *
2561 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
2562 				    enum wmi_force_fw_hang_type type,
2563 				    u32 delay_ms)
2564 {
2565 	struct wmi_force_fw_hang_cmd *cmd;
2566 	struct wmi_tlv *tlv;
2567 	struct sk_buff *skb;
2568 
2569 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2570 	if (!skb)
2571 		return ERR_PTR(-ENOMEM);
2572 
2573 	tlv = (void *)skb->data;
2574 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
2575 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2576 	cmd = (void *)tlv->value;
2577 	cmd->type = __cpu_to_le32(type);
2578 	cmd->delay_ms = __cpu_to_le32(delay_ms);
2579 
2580 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
2581 	return skb;
2582 }
2583 
2584 static struct sk_buff *
2585 ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
2586 				 u32 log_level) {
2587 	struct wmi_tlv_dbglog_cmd *cmd;
2588 	struct wmi_tlv *tlv;
2589 	struct sk_buff *skb;
2590 	size_t len, bmap_len;
2591 	u32 value;
2592 	void *ptr;
2593 
2594 	if (module_enable) {
2595 		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2596 				module_enable,
2597 				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
2598 	} else {
2599 		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2600 				WMI_TLV_DBGLOG_ALL_MODULES,
2601 				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
2602 	}
2603 
2604 	bmap_len = 0;
2605 	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
2606 	skb = ath10k_wmi_alloc_skb(ar, len);
2607 	if (!skb)
2608 		return ERR_PTR(-ENOMEM);
2609 
2610 	ptr = (void *)skb->data;
2611 
2612 	tlv = ptr;
2613 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
2614 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2615 	cmd = (void *)tlv->value;
2616 	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
2617 	cmd->value = __cpu_to_le32(value);
2618 
2619 	ptr += sizeof(*tlv);
2620 	ptr += sizeof(*cmd);
2621 
2622 	tlv = ptr;
2623 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2624 	tlv->len = __cpu_to_le16(bmap_len);
2625 
2626 	/* nothing to do here */
2627 
2628 	ptr += sizeof(*tlv);
2629 	ptr += sizeof(bmap_len);
2630 
2631 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
2632 	return skb;
2633 }
2634 
2635 static struct sk_buff *
2636 ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
2637 {
2638 	struct wmi_tlv_pktlog_enable *cmd;
2639 	struct wmi_tlv *tlv;
2640 	struct sk_buff *skb;
2641 	void *ptr;
2642 	size_t len;
2643 
2644 	len = sizeof(*tlv) + sizeof(*cmd);
2645 	skb = ath10k_wmi_alloc_skb(ar, len);
2646 	if (!skb)
2647 		return ERR_PTR(-ENOMEM);
2648 
2649 	ptr = (void *)skb->data;
2650 	tlv = ptr;
2651 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
2652 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2653 	cmd = (void *)tlv->value;
2654 	cmd->filter = __cpu_to_le32(filter);
2655 
2656 	ptr += sizeof(*tlv);
2657 	ptr += sizeof(*cmd);
2658 
2659 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
2660 		   filter);
2661 	return skb;
2662 }
2663 
2664 static struct sk_buff *
2665 ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
2666 {
2667 	struct wmi_tlv_pktlog_disable *cmd;
2668 	struct wmi_tlv *tlv;
2669 	struct sk_buff *skb;
2670 	void *ptr;
2671 	size_t len;
2672 
2673 	len = sizeof(*tlv) + sizeof(*cmd);
2674 	skb = ath10k_wmi_alloc_skb(ar, len);
2675 	if (!skb)
2676 		return ERR_PTR(-ENOMEM);
2677 
2678 	ptr = (void *)skb->data;
2679 	tlv = ptr;
2680 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
2681 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2682 	cmd = (void *)tlv->value;
2683 
2684 	ptr += sizeof(*tlv);
2685 	ptr += sizeof(*cmd);
2686 
2687 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
2688 	return skb;
2689 }
2690 
2691 static struct sk_buff *
2692 ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
2693 			       u32 tim_ie_offset, struct sk_buff *bcn,
2694 			       u32 prb_caps, u32 prb_erp, void *prb_ies,
2695 			       size_t prb_ies_len)
2696 {
2697 	struct wmi_tlv_bcn_tmpl_cmd *cmd;
2698 	struct wmi_tlv_bcn_prb_info *info;
2699 	struct wmi_tlv *tlv;
2700 	struct sk_buff *skb;
2701 	void *ptr;
2702 	size_t len;
2703 
2704 	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
2705 		return ERR_PTR(-EINVAL);
2706 
2707 	len = sizeof(*tlv) + sizeof(*cmd) +
2708 	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
2709 	      sizeof(*tlv) + roundup(bcn->len, 4);
2710 	skb = ath10k_wmi_alloc_skb(ar, len);
2711 	if (!skb)
2712 		return ERR_PTR(-ENOMEM);
2713 
2714 	ptr = (void *)skb->data;
2715 	tlv = ptr;
2716 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
2717 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2718 	cmd = (void *)tlv->value;
2719 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2720 	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
2721 	cmd->buf_len = __cpu_to_le32(bcn->len);
2722 
2723 	ptr += sizeof(*tlv);
2724 	ptr += sizeof(*cmd);
2725 
2726 	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
2727 	 * then it is then impossible to pass original ie len.
2728 	 * This chunk is not used yet so if setting probe resp template yields
2729 	 * problems with beaconing or crashes firmware look here.
2730 	 */
2731 	tlv = ptr;
2732 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2733 	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
2734 	info = (void *)tlv->value;
2735 	info->caps = __cpu_to_le32(prb_caps);
2736 	info->erp = __cpu_to_le32(prb_erp);
2737 	memcpy(info->ies, prb_ies, prb_ies_len);
2738 
2739 	ptr += sizeof(*tlv);
2740 	ptr += sizeof(*info);
2741 	ptr += prb_ies_len;
2742 
2743 	tlv = ptr;
2744 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2745 	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
2746 	memcpy(tlv->value, bcn->data, bcn->len);
2747 
2748 	/* FIXME: Adjust TSF? */
2749 
2750 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
2751 		   vdev_id);
2752 	return skb;
2753 }
2754 
2755 static struct sk_buff *
2756 ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
2757 			       struct sk_buff *prb)
2758 {
2759 	struct wmi_tlv_prb_tmpl_cmd *cmd;
2760 	struct wmi_tlv_bcn_prb_info *info;
2761 	struct wmi_tlv *tlv;
2762 	struct sk_buff *skb;
2763 	void *ptr;
2764 	size_t len;
2765 
2766 	len = sizeof(*tlv) + sizeof(*cmd) +
2767 	      sizeof(*tlv) + sizeof(*info) +
2768 	      sizeof(*tlv) + roundup(prb->len, 4);
2769 	skb = ath10k_wmi_alloc_skb(ar, len);
2770 	if (!skb)
2771 		return ERR_PTR(-ENOMEM);
2772 
2773 	ptr = (void *)skb->data;
2774 	tlv = ptr;
2775 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
2776 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2777 	cmd = (void *)tlv->value;
2778 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2779 	cmd->buf_len = __cpu_to_le32(prb->len);
2780 
2781 	ptr += sizeof(*tlv);
2782 	ptr += sizeof(*cmd);
2783 
2784 	tlv = ptr;
2785 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2786 	tlv->len = __cpu_to_le16(sizeof(*info));
2787 	info = (void *)tlv->value;
2788 	info->caps = 0;
2789 	info->erp = 0;
2790 
2791 	ptr += sizeof(*tlv);
2792 	ptr += sizeof(*info);
2793 
2794 	tlv = ptr;
2795 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2796 	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
2797 	memcpy(tlv->value, prb->data, prb->len);
2798 
2799 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
2800 		   vdev_id);
2801 	return skb;
2802 }
2803 
2804 static struct sk_buff *
2805 ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
2806 				    const u8 *p2p_ie)
2807 {
2808 	struct wmi_tlv_p2p_go_bcn_ie *cmd;
2809 	struct wmi_tlv *tlv;
2810 	struct sk_buff *skb;
2811 	void *ptr;
2812 	size_t len;
2813 
2814 	len = sizeof(*tlv) + sizeof(*cmd) +
2815 	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
2816 	skb = ath10k_wmi_alloc_skb(ar, len);
2817 	if (!skb)
2818 		return ERR_PTR(-ENOMEM);
2819 
2820 	ptr = (void *)skb->data;
2821 	tlv = ptr;
2822 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
2823 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2824 	cmd = (void *)tlv->value;
2825 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2826 	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
2827 
2828 	ptr += sizeof(*tlv);
2829 	ptr += sizeof(*cmd);
2830 
2831 	tlv = ptr;
2832 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2833 	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
2834 	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
2835 
2836 	ptr += sizeof(*tlv);
2837 	ptr += roundup(p2p_ie[1] + 2, 4);
2838 
2839 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
2840 		   vdev_id);
2841 	return skb;
2842 }
2843 
2844 static struct sk_buff *
2845 ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
2846 					   enum wmi_tdls_state state)
2847 {
2848 	struct wmi_tdls_set_state_cmd *cmd;
2849 	struct wmi_tlv *tlv;
2850 	struct sk_buff *skb;
2851 	void *ptr;
2852 	size_t len;
2853 	/* Set to options from wmi_tlv_tdls_options,
2854 	 * for now none of them are enabled.
2855 	 */
2856 	u32 options = 0;
2857 
2858 	len = sizeof(*tlv) + sizeof(*cmd);
2859 	skb = ath10k_wmi_alloc_skb(ar, len);
2860 	if (!skb)
2861 		return ERR_PTR(-ENOMEM);
2862 
2863 	ptr = (void *)skb->data;
2864 	tlv = ptr;
2865 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
2866 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2867 
2868 	cmd = (void *)tlv->value;
2869 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2870 	cmd->state = __cpu_to_le32(state);
2871 	cmd->notification_interval_ms = __cpu_to_le32(5000);
2872 	cmd->tx_discovery_threshold = __cpu_to_le32(100);
2873 	cmd->tx_teardown_threshold = __cpu_to_le32(5);
2874 	cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
2875 	cmd->rssi_delta = __cpu_to_le32(-20);
2876 	cmd->tdls_options = __cpu_to_le32(options);
2877 	cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
2878 	cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
2879 	cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
2880 	cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
2881 	cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
2882 
2883 	ptr += sizeof(*tlv);
2884 	ptr += sizeof(*cmd);
2885 
2886 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
2887 		   state, vdev_id);
2888 	return skb;
2889 }
2890 
2891 static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
2892 {
2893 	u32 peer_qos = 0;
2894 
2895 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2896 		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
2897 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2898 		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
2899 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2900 		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
2901 	if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2902 		peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
2903 
2904 	peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
2905 
2906 	return peer_qos;
2907 }
2908 
2909 static struct sk_buff *
2910 ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
2911 				       const struct wmi_tdls_peer_update_cmd_arg *arg,
2912 				       const struct wmi_tdls_peer_capab_arg *cap,
2913 				       const struct wmi_channel_arg *chan_arg)
2914 {
2915 	struct wmi_tdls_peer_update_cmd *cmd;
2916 	struct wmi_tdls_peer_capab *peer_cap;
2917 	struct wmi_channel *chan;
2918 	struct wmi_tlv *tlv;
2919 	struct sk_buff *skb;
2920 	u32 peer_qos;
2921 	void *ptr;
2922 	int len;
2923 	int i;
2924 
2925 	len = sizeof(*tlv) + sizeof(*cmd) +
2926 	      sizeof(*tlv) + sizeof(*peer_cap) +
2927 	      sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
2928 
2929 	skb = ath10k_wmi_alloc_skb(ar, len);
2930 	if (!skb)
2931 		return ERR_PTR(-ENOMEM);
2932 
2933 	ptr = (void *)skb->data;
2934 	tlv = ptr;
2935 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
2936 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2937 
2938 	cmd = (void *)tlv->value;
2939 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
2940 	ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
2941 	cmd->peer_state = __cpu_to_le32(arg->peer_state);
2942 
2943 	ptr += sizeof(*tlv);
2944 	ptr += sizeof(*cmd);
2945 
2946 	tlv = ptr;
2947 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
2948 	tlv->len = __cpu_to_le16(sizeof(*peer_cap));
2949 	peer_cap = (void *)tlv->value;
2950 	peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
2951 						   cap->peer_max_sp);
2952 	peer_cap->peer_qos = __cpu_to_le32(peer_qos);
2953 	peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
2954 	peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
2955 	peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
2956 	peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
2957 	peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
2958 	peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
2959 
2960 	for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
2961 		peer_cap->peer_operclass[i] = cap->peer_operclass[i];
2962 
2963 	peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
2964 	peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
2965 	peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
2966 
2967 	ptr += sizeof(*tlv);
2968 	ptr += sizeof(*peer_cap);
2969 
2970 	tlv = ptr;
2971 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2972 	tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
2973 
2974 	ptr += sizeof(*tlv);
2975 
2976 	for (i = 0; i < cap->peer_chan_len; i++) {
2977 		tlv = ptr;
2978 		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2979 		tlv->len = __cpu_to_le16(sizeof(*chan));
2980 		chan = (void *)tlv->value;
2981 		ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
2982 
2983 		ptr += sizeof(*tlv);
2984 		ptr += sizeof(*chan);
2985 	}
2986 
2987 	ath10k_dbg(ar, ATH10K_DBG_WMI,
2988 		   "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
2989 		   arg->vdev_id, arg->peer_state, cap->peer_chan_len);
2990 	return skb;
2991 }
2992 
2993 static struct sk_buff *
2994 ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
2995 {
2996 	struct wmi_tlv_wow_enable_cmd *cmd;
2997 	struct wmi_tlv *tlv;
2998 	struct sk_buff *skb;
2999 	size_t len;
3000 
3001 	len = sizeof(*tlv) + sizeof(*cmd);
3002 	skb = ath10k_wmi_alloc_skb(ar, len);
3003 	if (!skb)
3004 		return ERR_PTR(-ENOMEM);
3005 
3006 	tlv = (struct wmi_tlv *)skb->data;
3007 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
3008 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3009 	cmd = (void *)tlv->value;
3010 
3011 	cmd->enable = __cpu_to_le32(1);
3012 
3013 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
3014 	return skb;
3015 }
3016 
3017 static struct sk_buff *
3018 ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
3019 					   u32 vdev_id,
3020 					   enum wmi_wow_wakeup_event event,
3021 					   u32 enable)
3022 {
3023 	struct wmi_tlv_wow_add_del_event_cmd *cmd;
3024 	struct wmi_tlv *tlv;
3025 	struct sk_buff *skb;
3026 	size_t len;
3027 
3028 	len = sizeof(*tlv) + sizeof(*cmd);
3029 	skb = ath10k_wmi_alloc_skb(ar, len);
3030 	if (!skb)
3031 		return ERR_PTR(-ENOMEM);
3032 
3033 	tlv = (struct wmi_tlv *)skb->data;
3034 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
3035 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3036 	cmd = (void *)tlv->value;
3037 
3038 	cmd->vdev_id = __cpu_to_le32(vdev_id);
3039 	cmd->is_add = __cpu_to_le32(enable);
3040 	cmd->event_bitmap = __cpu_to_le32(1 << event);
3041 
3042 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
3043 		   wow_wakeup_event(event), enable, vdev_id);
3044 	return skb;
3045 }
3046 
3047 static struct sk_buff *
3048 ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
3049 {
3050 	struct wmi_tlv_wow_host_wakeup_ind *cmd;
3051 	struct wmi_tlv *tlv;
3052 	struct sk_buff *skb;
3053 	size_t len;
3054 
3055 	len = sizeof(*tlv) + sizeof(*cmd);
3056 	skb = ath10k_wmi_alloc_skb(ar, len);
3057 	if (!skb)
3058 		return ERR_PTR(-ENOMEM);
3059 
3060 	tlv = (struct wmi_tlv *)skb->data;
3061 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
3062 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3063 	cmd = (void *)tlv->value;
3064 
3065 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
3066 	return skb;
3067 }
3068 
3069 static struct sk_buff *
3070 ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
3071 				      u32 pattern_id, const u8 *pattern,
3072 				      const u8 *bitmask, int pattern_len,
3073 				      int pattern_offset)
3074 {
3075 	struct wmi_tlv_wow_add_pattern_cmd *cmd;
3076 	struct wmi_tlv_wow_bitmap_pattern *bitmap;
3077 	struct wmi_tlv *tlv;
3078 	struct sk_buff *skb;
3079 	void *ptr;
3080 	size_t len;
3081 
3082 	len = sizeof(*tlv) + sizeof(*cmd) +
3083 	      sizeof(*tlv) +			/* array struct */
3084 	      sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
3085 	      sizeof(*tlv) +			/* empty ipv4 sync */
3086 	      sizeof(*tlv) +			/* empty ipv6 sync */
3087 	      sizeof(*tlv) +			/* empty magic */
3088 	      sizeof(*tlv) +			/* empty info timeout */
3089 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
3090 
3091 	skb = ath10k_wmi_alloc_skb(ar, len);
3092 	if (!skb)
3093 		return ERR_PTR(-ENOMEM);
3094 
3095 	/* cmd */
3096 	ptr = (void *)skb->data;
3097 	tlv = ptr;
3098 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
3099 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3100 	cmd = (void *)tlv->value;
3101 
3102 	cmd->vdev_id = __cpu_to_le32(vdev_id);
3103 	cmd->pattern_id = __cpu_to_le32(pattern_id);
3104 	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3105 
3106 	ptr += sizeof(*tlv);
3107 	ptr += sizeof(*cmd);
3108 
3109 	/* bitmap */
3110 	tlv = ptr;
3111 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3112 	tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
3113 
3114 	ptr += sizeof(*tlv);
3115 
3116 	tlv = ptr;
3117 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
3118 	tlv->len = __cpu_to_le16(sizeof(*bitmap));
3119 	bitmap = (void *)tlv->value;
3120 
3121 	memcpy(bitmap->patternbuf, pattern, pattern_len);
3122 	memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
3123 	bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
3124 	bitmap->pattern_len = __cpu_to_le32(pattern_len);
3125 	bitmap->bitmask_len = __cpu_to_le32(pattern_len);
3126 	bitmap->pattern_id = __cpu_to_le32(pattern_id);
3127 
3128 	ptr += sizeof(*tlv);
3129 	ptr += sizeof(*bitmap);
3130 
3131 	/* ipv4 sync */
3132 	tlv = ptr;
3133 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3134 	tlv->len = __cpu_to_le16(0);
3135 
3136 	ptr += sizeof(*tlv);
3137 
3138 	/* ipv6 sync */
3139 	tlv = ptr;
3140 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3141 	tlv->len = __cpu_to_le16(0);
3142 
3143 	ptr += sizeof(*tlv);
3144 
3145 	/* magic */
3146 	tlv = ptr;
3147 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
3148 	tlv->len = __cpu_to_le16(0);
3149 
3150 	ptr += sizeof(*tlv);
3151 
3152 	/* pattern info timeout */
3153 	tlv = ptr;
3154 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3155 	tlv->len = __cpu_to_le16(0);
3156 
3157 	ptr += sizeof(*tlv);
3158 
3159 	/* ratelimit interval */
3160 	tlv = ptr;
3161 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
3162 	tlv->len = __cpu_to_le16(sizeof(u32));
3163 
3164 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
3165 		   vdev_id, pattern_id, pattern_offset);
3166 	return skb;
3167 }
3168 
3169 static struct sk_buff *
3170 ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
3171 				      u32 pattern_id)
3172 {
3173 	struct wmi_tlv_wow_del_pattern_cmd *cmd;
3174 	struct wmi_tlv *tlv;
3175 	struct sk_buff *skb;
3176 	size_t len;
3177 
3178 	len = sizeof(*tlv) + sizeof(*cmd);
3179 	skb = ath10k_wmi_alloc_skb(ar, len);
3180 	if (!skb)
3181 		return ERR_PTR(-ENOMEM);
3182 
3183 	tlv = (struct wmi_tlv *)skb->data;
3184 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
3185 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3186 	cmd = (void *)tlv->value;
3187 
3188 	cmd->vdev_id = __cpu_to_le32(vdev_id);
3189 	cmd->pattern_id = __cpu_to_le32(pattern_id);
3190 	cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
3191 
3192 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
3193 		   vdev_id, pattern_id);
3194 	return skb;
3195 }
3196 
3197 static struct sk_buff *
3198 ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
3199 {
3200 	struct wmi_tlv_adaptive_qcs *cmd;
3201 	struct wmi_tlv *tlv;
3202 	struct sk_buff *skb;
3203 	void *ptr;
3204 	size_t len;
3205 
3206 	len = sizeof(*tlv) + sizeof(*cmd);
3207 	skb = ath10k_wmi_alloc_skb(ar, len);
3208 	if (!skb)
3209 		return ERR_PTR(-ENOMEM);
3210 
3211 	ptr = (void *)skb->data;
3212 	tlv = ptr;
3213 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
3214 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3215 	cmd = (void *)tlv->value;
3216 	cmd->enable = __cpu_to_le32(enable ? 1 : 0);
3217 
3218 	ptr += sizeof(*tlv);
3219 	ptr += sizeof(*cmd);
3220 
3221 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
3222 	return skb;
3223 }
3224 
3225 static struct sk_buff *
3226 ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
3227 {
3228 	struct wmi_echo_cmd *cmd;
3229 	struct wmi_tlv *tlv;
3230 	struct sk_buff *skb;
3231 	void *ptr;
3232 	size_t len;
3233 
3234 	len = sizeof(*tlv) + sizeof(*cmd);
3235 	skb = ath10k_wmi_alloc_skb(ar, len);
3236 	if (!skb)
3237 		return ERR_PTR(-ENOMEM);
3238 
3239 	ptr = (void *)skb->data;
3240 	tlv = ptr;
3241 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
3242 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3243 	cmd = (void *)tlv->value;
3244 	cmd->value = cpu_to_le32(value);
3245 
3246 	ptr += sizeof(*tlv);
3247 	ptr += sizeof(*cmd);
3248 
3249 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
3250 	return skb;
3251 }
3252 
3253 static struct sk_buff *
3254 ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
3255 					 const struct wmi_vdev_spectral_conf_arg *arg)
3256 {
3257 	struct wmi_vdev_spectral_conf_cmd *cmd;
3258 	struct sk_buff *skb;
3259 	struct wmi_tlv *tlv;
3260 	void *ptr;
3261 	size_t len;
3262 
3263 	len = sizeof(*tlv) + sizeof(*cmd);
3264 	skb = ath10k_wmi_alloc_skb(ar, len);
3265 	if (!skb)
3266 		return ERR_PTR(-ENOMEM);
3267 
3268 	ptr = (void *)skb->data;
3269 	tlv = ptr;
3270 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
3271 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3272 	cmd = (void *)tlv->value;
3273 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
3274 	cmd->scan_count = __cpu_to_le32(arg->scan_count);
3275 	cmd->scan_period = __cpu_to_le32(arg->scan_period);
3276 	cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
3277 	cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
3278 	cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
3279 	cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
3280 	cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
3281 	cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
3282 	cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
3283 	cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
3284 	cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
3285 	cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
3286 	cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
3287 	cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
3288 	cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
3289 	cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
3290 	cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
3291 	cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
3292 
3293 	return skb;
3294 }
3295 
3296 static struct sk_buff *
3297 ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
3298 					   u32 trigger, u32 enable)
3299 {
3300 	struct wmi_vdev_spectral_enable_cmd *cmd;
3301 	struct sk_buff *skb;
3302 	struct wmi_tlv *tlv;
3303 	void *ptr;
3304 	size_t len;
3305 
3306 	len = sizeof(*tlv) + sizeof(*cmd);
3307 	skb = ath10k_wmi_alloc_skb(ar, len);
3308 	if (!skb)
3309 		return ERR_PTR(-ENOMEM);
3310 
3311 	ptr = (void *)skb->data;
3312 	tlv = ptr;
3313 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
3314 	tlv->len = __cpu_to_le16(sizeof(*cmd));
3315 	cmd = (void *)tlv->value;
3316 	cmd->vdev_id = __cpu_to_le32(vdev_id);
3317 	cmd->trigger_cmd = __cpu_to_le32(trigger);
3318 	cmd->enable_cmd = __cpu_to_le32(enable);
3319 
3320 	return skb;
3321 }
3322 
3323 /****************/
3324 /* TLV mappings */
3325 /****************/
3326 
3327 static struct wmi_cmd_map wmi_tlv_cmd_map = {
3328 	.init_cmdid = WMI_TLV_INIT_CMDID,
3329 	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
3330 	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
3331 	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
3332 	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
3333 	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
3334 	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
3335 	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
3336 	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
3337 	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
3338 	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
3339 	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
3340 	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
3341 	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
3342 	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
3343 	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
3344 	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
3345 	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
3346 	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
3347 	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
3348 	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
3349 	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
3350 	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
3351 	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
3352 	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
3353 	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
3354 	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
3355 	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
3356 	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
3357 	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
3358 	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
3359 	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
3360 	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
3361 	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
3362 	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
3363 	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
3364 	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
3365 	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
3366 	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
3367 	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
3368 	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
3369 	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
3370 	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
3371 	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
3372 	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
3373 	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
3374 	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
3375 	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
3376 	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
3377 	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
3378 	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
3379 	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
3380 	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
3381 	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
3382 	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
3383 	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
3384 	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
3385 	.roam_scan_rssi_change_threshold =
3386 				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
3387 	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
3388 	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
3389 	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
3390 	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
3391 	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
3392 	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
3393 	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
3394 	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
3395 	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
3396 	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
3397 	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
3398 	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
3399 	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
3400 	.wlan_profile_set_hist_intvl_cmdid =
3401 				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
3402 	.wlan_profile_get_profile_data_cmdid =
3403 				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
3404 	.wlan_profile_enable_profile_id_cmdid =
3405 				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
3406 	.wlan_profile_list_profile_id_cmdid =
3407 				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
3408 	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
3409 	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
3410 	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
3411 	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
3412 	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
3413 	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
3414 	.wow_enable_disable_wake_event_cmdid =
3415 				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
3416 	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
3417 	.wow_hostwakeup_from_sleep_cmdid =
3418 				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
3419 	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
3420 	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
3421 	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
3422 	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
3423 	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
3424 	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
3425 	.network_list_offload_config_cmdid =
3426 				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
3427 	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
3428 	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
3429 	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
3430 	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
3431 	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
3432 	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
3433 	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
3434 	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
3435 	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
3436 	.echo_cmdid = WMI_TLV_ECHO_CMDID,
3437 	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
3438 	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
3439 	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
3440 	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
3441 	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
3442 	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
3443 	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
3444 	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
3445 	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
3446 	.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
3447 	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
3448 	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
3449 	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
3450 	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
3451 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
3452 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
3453 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
3454 	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
3455 	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
3456 	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
3457 	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
3458 	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
3459 	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
3460 	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
3461 	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
3462 	.nan_cmdid = WMI_CMD_UNSUPPORTED,
3463 	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
3464 	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
3465 	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
3466 	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
3467 	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
3468 	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
3469 	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
3470 	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
3471 	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
3472 	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
3473 	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
3474 	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
3475 	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
3476 	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
3477 	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
3478 	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
3479 	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
3480 	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
3481 	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
3482 	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
3483 };
3484 
3485 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
3486 	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
3487 	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
3488 	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
3489 	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
3490 	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
3491 	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
3492 	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
3493 	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
3494 	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
3495 	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
3496 	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
3497 	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
3498 	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
3499 	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
3500 	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
3501 	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
3502 	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
3503 	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
3504 	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
3505 	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
3506 	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
3507 	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
3508 	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
3509 	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
3510 	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
3511 	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
3512 	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
3513 	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
3514 	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
3515 	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
3516 	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
3517 	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
3518 	.bcnflt_stats_update_period =
3519 				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
3520 	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
3521 	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
3522 	.dcs = WMI_TLV_PDEV_PARAM_DCS,
3523 	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
3524 	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
3525 	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
3526 	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
3527 	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
3528 	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
3529 	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
3530 	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
3531 	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
3532 	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
3533 	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
3534 	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
3535 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
3536 	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
3537 	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3538 	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
3539 	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
3540 	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
3541 	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
3542 	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
3543 	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
3544 	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3545 	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
3546 	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
3547 	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
3548 	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
3549 	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
3550 	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
3551 	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3552 	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3553 	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3554 	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3555 	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3556 	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
3557 	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
3558 	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
3559 	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
3560 	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
3561 	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
3562 	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
3563 	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
3564 	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
3565 	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
3566 	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
3567 	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
3568 	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
3569 	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
3570 	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
3571 	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
3572 	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
3573 	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
3574 	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
3575 	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
3576 	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
3577 	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
3578 };
3579 
3580 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
3581 	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
3582 	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
3583 	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
3584 	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
3585 	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
3586 	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
3587 	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
3588 	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
3589 	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
3590 	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
3591 	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
3592 	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
3593 	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
3594 	.wmi_vdev_oc_scheduler_air_time_limit =
3595 				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
3596 	.wds = WMI_TLV_VDEV_PARAM_WDS,
3597 	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
3598 	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
3599 	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
3600 	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
3601 	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
3602 	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
3603 	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
3604 	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
3605 	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
3606 	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
3607 	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
3608 	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
3609 	.sgi = WMI_TLV_VDEV_PARAM_SGI,
3610 	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
3611 	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
3612 	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
3613 	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
3614 	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
3615 	.nss = WMI_TLV_VDEV_PARAM_NSS,
3616 	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
3617 	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
3618 	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
3619 	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
3620 	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
3621 	.ap_keepalive_min_idle_inactive_time_secs =
3622 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
3623 	.ap_keepalive_max_idle_inactive_time_secs =
3624 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
3625 	.ap_keepalive_max_unresponsive_time_secs =
3626 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
3627 	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
3628 	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
3629 	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
3630 	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
3631 	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
3632 	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
3633 	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
3634 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
3635 					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
3636 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
3637 	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
3638 	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
3639 	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
3640 	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
3641 	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
3642 	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
3643 	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
3644 	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
3645 	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
3646 	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
3647 	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
3648 	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
3649 	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
3650 	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
3651 	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
3652 };
3653 
3654 static const struct wmi_ops wmi_tlv_ops = {
3655 	.rx = ath10k_wmi_tlv_op_rx,
3656 	.map_svc = wmi_tlv_svc_map,
3657 
3658 	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
3659 	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
3660 	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
3661 	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
3662 	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
3663 	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
3664 	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
3665 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
3666 	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
3667 	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
3668 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
3669 	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
3670 	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
3671 	.pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
3672 	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
3673 
3674 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
3675 	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
3676 	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
3677 	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
3678 	.gen_init = ath10k_wmi_tlv_op_gen_init,
3679 	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
3680 	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
3681 	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
3682 	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
3683 	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
3684 	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
3685 	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
3686 	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
3687 	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
3688 	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
3689 	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
3690 	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
3691 	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
3692 	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
3693 	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
3694 	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
3695 	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
3696 	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
3697 	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
3698 	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
3699 	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
3700 	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
3701 	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
3702 	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
3703 	/* .gen_mgmt_tx = not implemented; HTT is used */
3704 	.gen_mgmt_tx =  ath10k_wmi_tlv_op_gen_mgmt_tx,
3705 	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
3706 	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
3707 	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
3708 	/* .gen_pdev_set_quiet_mode not implemented */
3709 	/* .gen_pdev_get_temperature not implemented */
3710 	/* .gen_addba_clear_resp not implemented */
3711 	/* .gen_addba_send not implemented */
3712 	/* .gen_addba_set_resp not implemented */
3713 	/* .gen_delba_send not implemented */
3714 	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
3715 	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
3716 	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
3717 	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
3718 	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
3719 	.gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
3720 	.gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
3721 	.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
3722 	.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
3723 	.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
3724 	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
3725 	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
3726 	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
3727 	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
3728 	.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
3729 	.gen_echo = ath10k_wmi_tlv_op_gen_echo,
3730 	.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
3731 	.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
3732 };
3733 
3734 static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
3735 	.auth = WMI_TLV_PEER_AUTH,
3736 	.qos = WMI_TLV_PEER_QOS,
3737 	.need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
3738 	.need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
3739 	.apsd = WMI_TLV_PEER_APSD,
3740 	.ht = WMI_TLV_PEER_HT,
3741 	.bw40 = WMI_TLV_PEER_40MHZ,
3742 	.stbc = WMI_TLV_PEER_STBC,
3743 	.ldbc = WMI_TLV_PEER_LDPC,
3744 	.dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
3745 	.static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
3746 	.spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
3747 	.vht = WMI_TLV_PEER_VHT,
3748 	.bw80 = WMI_TLV_PEER_80MHZ,
3749 	.pmf = WMI_TLV_PEER_PMF,
3750 	.bw160 = WMI_TLV_PEER_160MHZ,
3751 };
3752 
3753 /************/
3754 /* TLV init */
3755 /************/
3756 
3757 void ath10k_wmi_tlv_attach(struct ath10k *ar)
3758 {
3759 	ar->wmi.cmd = &wmi_tlv_cmd_map;
3760 	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
3761 	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
3762 	ar->wmi.ops = &wmi_tlv_ops;
3763 	ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
3764 }
3765