1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 #include "core.h"
18 #include "debug.h"
19 #include "hw.h"
20 #include "wmi.h"
21 #include "wmi-ops.h"
22 #include "wmi-tlv.h"
23 
24 /***************/
25 /* TLV helpers */
26 /**************/
27 
28 struct wmi_tlv_policy {
29 	size_t min_len;
30 };
31 
32 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
33 	[WMI_TLV_TAG_ARRAY_BYTE]
34 		= { .min_len = sizeof(u8) },
35 	[WMI_TLV_TAG_ARRAY_UINT32]
36 		= { .min_len = sizeof(u32) },
37 	[WMI_TLV_TAG_STRUCT_SCAN_EVENT]
38 		= { .min_len = sizeof(struct wmi_scan_event) },
39 	[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
40 		= { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
41 	[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
42 		= { .min_len = sizeof(struct wmi_chan_info_event) },
43 	[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
44 		= { .min_len = sizeof(struct wmi_vdev_start_response_event) },
45 	[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
46 		= { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
47 	[WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
48 		= { .min_len = sizeof(struct wmi_host_swba_event) },
49 	[WMI_TLV_TAG_STRUCT_TIM_INFO]
50 		= { .min_len = sizeof(struct wmi_tim_info) },
51 	[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
52 		= { .min_len = sizeof(struct wmi_p2p_noa_info) },
53 	[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
54 		= { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
55 	[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
56 		= { .min_len = sizeof(struct hal_reg_capabilities) },
57 	[WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
58 		= { .min_len = sizeof(struct wlan_host_mem_req) },
59 	[WMI_TLV_TAG_STRUCT_READY_EVENT]
60 		= { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
61 	[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
62 		= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
63 	[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
64 		= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
65 };
66 
67 static int
68 ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
69 		    int (*iter)(struct ath10k *ar, u16 tag, u16 len,
70 				const void *ptr, void *data),
71 		    void *data)
72 {
73 	const void *begin = ptr;
74 	const struct wmi_tlv *tlv;
75 	u16 tlv_tag, tlv_len;
76 	int ret;
77 
78 	while (len > 0) {
79 		if (len < sizeof(*tlv)) {
80 			ath10k_dbg(ar, ATH10K_DBG_WMI,
81 				   "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
82 				   ptr - begin, len, sizeof(*tlv));
83 			return -EINVAL;
84 		}
85 
86 		tlv = ptr;
87 		tlv_tag = __le16_to_cpu(tlv->tag);
88 		tlv_len = __le16_to_cpu(tlv->len);
89 		ptr += sizeof(*tlv);
90 		len -= sizeof(*tlv);
91 
92 		if (tlv_len > len) {
93 			ath10k_dbg(ar, ATH10K_DBG_WMI,
94 				   "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
95 				   tlv_tag, ptr - begin, len, tlv_len);
96 			return -EINVAL;
97 		}
98 
99 		if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
100 		    wmi_tlv_policies[tlv_tag].min_len &&
101 		    wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
102 			ath10k_dbg(ar, ATH10K_DBG_WMI,
103 				   "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
104 				   tlv_tag, ptr - begin, tlv_len,
105 				   wmi_tlv_policies[tlv_tag].min_len);
106 			return -EINVAL;
107 		}
108 
109 		ret = iter(ar, tlv_tag, tlv_len, ptr, data);
110 		if (ret)
111 			return ret;
112 
113 		ptr += tlv_len;
114 		len -= tlv_len;
115 	}
116 
117 	return 0;
118 }
119 
120 static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
121 				     const void *ptr, void *data)
122 {
123 	const void **tb = data;
124 
125 	if (tag < WMI_TLV_TAG_MAX)
126 		tb[tag] = ptr;
127 
128 	return 0;
129 }
130 
131 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
132 				const void *ptr, size_t len)
133 {
134 	return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
135 				   (void *)tb);
136 }
137 
138 static const void **
139 ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
140 			   size_t len, gfp_t gfp)
141 {
142 	const void **tb;
143 	int ret;
144 
145 	tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
146 	if (!tb)
147 		return ERR_PTR(-ENOMEM);
148 
149 	ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
150 	if (ret) {
151 		kfree(tb);
152 		return ERR_PTR(ret);
153 	}
154 
155 	return tb;
156 }
157 
158 static u16 ath10k_wmi_tlv_len(const void *ptr)
159 {
160 	return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
161 }
162 
163 /**************/
164 /* TLV events */
165 /**************/
166 static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
167 					      struct sk_buff *skb)
168 {
169 	const void **tb;
170 	const struct wmi_tlv_bcn_tx_status_ev *ev;
171 	u32 vdev_id, tx_status;
172 	int ret;
173 
174 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
175 	if (IS_ERR(tb)) {
176 		ret = PTR_ERR(tb);
177 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
178 		return ret;
179 	}
180 
181 	ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
182 	if (!ev) {
183 		kfree(tb);
184 		return -EPROTO;
185 	}
186 
187 	tx_status = __le32_to_cpu(ev->tx_status);
188 	vdev_id = __le32_to_cpu(ev->vdev_id);
189 
190 	switch (tx_status) {
191 	case WMI_TLV_BCN_TX_STATUS_OK:
192 		break;
193 	case WMI_TLV_BCN_TX_STATUS_XRETRY:
194 	case WMI_TLV_BCN_TX_STATUS_DROP:
195 	case WMI_TLV_BCN_TX_STATUS_FILTERED:
196 		/* FIXME: It's probably worth telling mac80211 to stop the
197 		 * interface as it is crippled.
198 		 */
199 		ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
200 			    vdev_id, tx_status);
201 		break;
202 	}
203 
204 	kfree(tb);
205 	return 0;
206 }
207 
208 static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
209 					  struct sk_buff *skb)
210 {
211 	const void **tb;
212 	const struct wmi_tlv_diag_data_ev *ev;
213 	const struct wmi_tlv_diag_item *item;
214 	const void *data;
215 	int ret, num_items, len;
216 
217 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
218 	if (IS_ERR(tb)) {
219 		ret = PTR_ERR(tb);
220 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
221 		return ret;
222 	}
223 
224 	ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
225 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
226 	if (!ev || !data) {
227 		kfree(tb);
228 		return -EPROTO;
229 	}
230 
231 	num_items = __le32_to_cpu(ev->num_items);
232 	len = ath10k_wmi_tlv_len(data);
233 
234 	while (num_items--) {
235 		if (len == 0)
236 			break;
237 		if (len < sizeof(*item)) {
238 			ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
239 			break;
240 		}
241 
242 		item = data;
243 
244 		if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
245 			ath10k_warn(ar, "failed to parse diag data: item is too long\n");
246 			break;
247 		}
248 
249 		trace_ath10k_wmi_diag_container(ar,
250 						item->type,
251 						__le32_to_cpu(item->timestamp),
252 						__le32_to_cpu(item->code),
253 						__le16_to_cpu(item->len),
254 						item->payload);
255 
256 		len -= sizeof(*item);
257 		len -= roundup(__le16_to_cpu(item->len), 4);
258 
259 		data += sizeof(*item);
260 		data += roundup(__le16_to_cpu(item->len), 4);
261 	}
262 
263 	if (num_items != -1 || len != 0)
264 		ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
265 			    num_items, len);
266 
267 	kfree(tb);
268 	return 0;
269 }
270 
271 static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
272 				     struct sk_buff *skb)
273 {
274 	const void **tb;
275 	const void *data;
276 	int ret, len;
277 
278 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
279 	if (IS_ERR(tb)) {
280 		ret = PTR_ERR(tb);
281 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
282 		return ret;
283 	}
284 
285 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
286 	if (!data) {
287 		kfree(tb);
288 		return -EPROTO;
289 	}
290 	len = ath10k_wmi_tlv_len(data);
291 
292 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
293 	trace_ath10k_wmi_diag(ar, data, len);
294 
295 	kfree(tb);
296 	return 0;
297 }
298 
299 /***********/
300 /* TLV ops */
301 /***********/
302 
303 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
304 {
305 	struct wmi_cmd_hdr *cmd_hdr;
306 	enum wmi_tlv_event_id id;
307 
308 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
309 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
310 
311 	if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
312 		return;
313 
314 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
315 
316 	switch (id) {
317 	case WMI_TLV_MGMT_RX_EVENTID:
318 		ath10k_wmi_event_mgmt_rx(ar, skb);
319 		/* mgmt_rx() owns the skb now! */
320 		return;
321 	case WMI_TLV_SCAN_EVENTID:
322 		ath10k_wmi_event_scan(ar, skb);
323 		break;
324 	case WMI_TLV_CHAN_INFO_EVENTID:
325 		ath10k_wmi_event_chan_info(ar, skb);
326 		break;
327 	case WMI_TLV_ECHO_EVENTID:
328 		ath10k_wmi_event_echo(ar, skb);
329 		break;
330 	case WMI_TLV_DEBUG_MESG_EVENTID:
331 		ath10k_wmi_event_debug_mesg(ar, skb);
332 		break;
333 	case WMI_TLV_UPDATE_STATS_EVENTID:
334 		ath10k_wmi_event_update_stats(ar, skb);
335 		break;
336 	case WMI_TLV_VDEV_START_RESP_EVENTID:
337 		ath10k_wmi_event_vdev_start_resp(ar, skb);
338 		break;
339 	case WMI_TLV_VDEV_STOPPED_EVENTID:
340 		ath10k_wmi_event_vdev_stopped(ar, skb);
341 		break;
342 	case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
343 		ath10k_wmi_event_peer_sta_kickout(ar, skb);
344 		break;
345 	case WMI_TLV_HOST_SWBA_EVENTID:
346 		ath10k_wmi_event_host_swba(ar, skb);
347 		break;
348 	case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
349 		ath10k_wmi_event_tbttoffset_update(ar, skb);
350 		break;
351 	case WMI_TLV_PHYERR_EVENTID:
352 		ath10k_wmi_event_phyerr(ar, skb);
353 		break;
354 	case WMI_TLV_ROAM_EVENTID:
355 		ath10k_wmi_event_roam(ar, skb);
356 		break;
357 	case WMI_TLV_PROFILE_MATCH:
358 		ath10k_wmi_event_profile_match(ar, skb);
359 		break;
360 	case WMI_TLV_DEBUG_PRINT_EVENTID:
361 		ath10k_wmi_event_debug_print(ar, skb);
362 		break;
363 	case WMI_TLV_PDEV_QVIT_EVENTID:
364 		ath10k_wmi_event_pdev_qvit(ar, skb);
365 		break;
366 	case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
367 		ath10k_wmi_event_wlan_profile_data(ar, skb);
368 		break;
369 	case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
370 		ath10k_wmi_event_rtt_measurement_report(ar, skb);
371 		break;
372 	case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
373 		ath10k_wmi_event_tsf_measurement_report(ar, skb);
374 		break;
375 	case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
376 		ath10k_wmi_event_rtt_error_report(ar, skb);
377 		break;
378 	case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
379 		ath10k_wmi_event_wow_wakeup_host(ar, skb);
380 		break;
381 	case WMI_TLV_DCS_INTERFERENCE_EVENTID:
382 		ath10k_wmi_event_dcs_interference(ar, skb);
383 		break;
384 	case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
385 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
386 		break;
387 	case WMI_TLV_PDEV_FTM_INTG_EVENTID:
388 		ath10k_wmi_event_pdev_ftm_intg(ar, skb);
389 		break;
390 	case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
391 		ath10k_wmi_event_gtk_offload_status(ar, skb);
392 		break;
393 	case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
394 		ath10k_wmi_event_gtk_rekey_fail(ar, skb);
395 		break;
396 	case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
397 		ath10k_wmi_event_delba_complete(ar, skb);
398 		break;
399 	case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
400 		ath10k_wmi_event_addba_complete(ar, skb);
401 		break;
402 	case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
403 		ath10k_wmi_event_vdev_install_key_complete(ar, skb);
404 		break;
405 	case WMI_TLV_SERVICE_READY_EVENTID:
406 		ath10k_wmi_event_service_ready(ar, skb);
407 		break;
408 	case WMI_TLV_READY_EVENTID:
409 		ath10k_wmi_event_ready(ar, skb);
410 		break;
411 	case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
412 		ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
413 		break;
414 	case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
415 		ath10k_wmi_tlv_event_diag_data(ar, skb);
416 		break;
417 	case WMI_TLV_DIAG_EVENTID:
418 		ath10k_wmi_tlv_event_diag(ar, skb);
419 		break;
420 	default:
421 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
422 		break;
423 	}
424 
425 	dev_kfree_skb(skb);
426 }
427 
428 static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
429 					  struct sk_buff *skb,
430 					  struct wmi_scan_ev_arg *arg)
431 {
432 	const void **tb;
433 	const struct wmi_scan_event *ev;
434 	int ret;
435 
436 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
437 	if (IS_ERR(tb)) {
438 		ret = PTR_ERR(tb);
439 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
440 		return ret;
441 	}
442 
443 	ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
444 	if (!ev) {
445 		kfree(tb);
446 		return -EPROTO;
447 	}
448 
449 	arg->event_type = ev->event_type;
450 	arg->reason = ev->reason;
451 	arg->channel_freq = ev->channel_freq;
452 	arg->scan_req_id = ev->scan_req_id;
453 	arg->scan_id = ev->scan_id;
454 	arg->vdev_id = ev->vdev_id;
455 
456 	kfree(tb);
457 	return 0;
458 }
459 
460 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
461 					     struct sk_buff *skb,
462 					     struct wmi_mgmt_rx_ev_arg *arg)
463 {
464 	const void **tb;
465 	const struct wmi_tlv_mgmt_rx_ev *ev;
466 	const u8 *frame;
467 	u32 msdu_len;
468 	int ret;
469 
470 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
471 	if (IS_ERR(tb)) {
472 		ret = PTR_ERR(tb);
473 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
474 		return ret;
475 	}
476 
477 	ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
478 	frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
479 
480 	if (!ev || !frame) {
481 		kfree(tb);
482 		return -EPROTO;
483 	}
484 
485 	arg->channel = ev->channel;
486 	arg->buf_len = ev->buf_len;
487 	arg->status = ev->status;
488 	arg->snr = ev->snr;
489 	arg->phy_mode = ev->phy_mode;
490 	arg->rate = ev->rate;
491 
492 	msdu_len = __le32_to_cpu(arg->buf_len);
493 
494 	if (skb->len < (frame - skb->data) + msdu_len) {
495 		kfree(tb);
496 		return -EPROTO;
497 	}
498 
499 	/* shift the sk_buff to point to `frame` */
500 	skb_trim(skb, 0);
501 	skb_put(skb, frame - skb->data);
502 	skb_pull(skb, frame - skb->data);
503 	skb_put(skb, msdu_len);
504 
505 	kfree(tb);
506 	return 0;
507 }
508 
509 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
510 					     struct sk_buff *skb,
511 					     struct wmi_ch_info_ev_arg *arg)
512 {
513 	const void **tb;
514 	const struct wmi_chan_info_event *ev;
515 	int ret;
516 
517 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
518 	if (IS_ERR(tb)) {
519 		ret = PTR_ERR(tb);
520 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
521 		return ret;
522 	}
523 
524 	ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
525 	if (!ev) {
526 		kfree(tb);
527 		return -EPROTO;
528 	}
529 
530 	arg->err_code = ev->err_code;
531 	arg->freq = ev->freq;
532 	arg->cmd_flags = ev->cmd_flags;
533 	arg->noise_floor = ev->noise_floor;
534 	arg->rx_clear_count = ev->rx_clear_count;
535 	arg->cycle_count = ev->cycle_count;
536 
537 	kfree(tb);
538 	return 0;
539 }
540 
541 static int
542 ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
543 				     struct wmi_vdev_start_ev_arg *arg)
544 {
545 	const void **tb;
546 	const struct wmi_vdev_start_response_event *ev;
547 	int ret;
548 
549 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
550 	if (IS_ERR(tb)) {
551 		ret = PTR_ERR(tb);
552 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
553 		return ret;
554 	}
555 
556 	ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
557 	if (!ev) {
558 		kfree(tb);
559 		return -EPROTO;
560 	}
561 
562 	skb_pull(skb, sizeof(*ev));
563 	arg->vdev_id = ev->vdev_id;
564 	arg->req_id = ev->req_id;
565 	arg->resp_type = ev->resp_type;
566 	arg->status = ev->status;
567 
568 	kfree(tb);
569 	return 0;
570 }
571 
572 static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
573 					       struct sk_buff *skb,
574 					       struct wmi_peer_kick_ev_arg *arg)
575 {
576 	const void **tb;
577 	const struct wmi_peer_sta_kickout_event *ev;
578 	int ret;
579 
580 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
581 	if (IS_ERR(tb)) {
582 		ret = PTR_ERR(tb);
583 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
584 		return ret;
585 	}
586 
587 	ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
588 	if (!ev) {
589 		kfree(tb);
590 		return -EPROTO;
591 	}
592 
593 	arg->mac_addr = ev->peer_macaddr.addr;
594 
595 	kfree(tb);
596 	return 0;
597 }
598 
599 struct wmi_tlv_swba_parse {
600 	const struct wmi_host_swba_event *ev;
601 	bool tim_done;
602 	bool noa_done;
603 	size_t n_tim;
604 	size_t n_noa;
605 	struct wmi_swba_ev_arg *arg;
606 };
607 
608 static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
609 					 const void *ptr, void *data)
610 {
611 	struct wmi_tlv_swba_parse *swba = data;
612 
613 	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
614 		return -EPROTO;
615 
616 	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
617 		return -ENOBUFS;
618 
619 	swba->arg->tim_info[swba->n_tim++] = ptr;
620 	return 0;
621 }
622 
623 static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
624 					 const void *ptr, void *data)
625 {
626 	struct wmi_tlv_swba_parse *swba = data;
627 
628 	if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
629 		return -EPROTO;
630 
631 	if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
632 		return -ENOBUFS;
633 
634 	swba->arg->noa_info[swba->n_noa++] = ptr;
635 	return 0;
636 }
637 
638 static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
639 				     const void *ptr, void *data)
640 {
641 	struct wmi_tlv_swba_parse *swba = data;
642 	int ret;
643 
644 	switch (tag) {
645 	case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
646 		swba->ev = ptr;
647 		break;
648 	case WMI_TLV_TAG_ARRAY_STRUCT:
649 		if (!swba->tim_done) {
650 			swba->tim_done = true;
651 			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
652 						  ath10k_wmi_tlv_swba_tim_parse,
653 						  swba);
654 			if (ret)
655 				return ret;
656 		} else if (!swba->noa_done) {
657 			swba->noa_done = true;
658 			ret = ath10k_wmi_tlv_iter(ar, ptr, len,
659 						  ath10k_wmi_tlv_swba_noa_parse,
660 						  swba);
661 			if (ret)
662 				return ret;
663 		}
664 		break;
665 	default:
666 		break;
667 	}
668 	return 0;
669 }
670 
671 static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
672 					  struct sk_buff *skb,
673 					  struct wmi_swba_ev_arg *arg)
674 {
675 	struct wmi_tlv_swba_parse swba = { .arg = arg };
676 	u32 map;
677 	size_t n_vdevs;
678 	int ret;
679 
680 	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
681 				  ath10k_wmi_tlv_swba_parse, &swba);
682 	if (ret) {
683 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
684 		return ret;
685 	}
686 
687 	if (!swba.ev)
688 		return -EPROTO;
689 
690 	arg->vdev_map = swba.ev->vdev_map;
691 
692 	for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
693 		if (map & BIT(0))
694 			n_vdevs++;
695 
696 	if (n_vdevs != swba.n_tim ||
697 	    n_vdevs != swba.n_noa)
698 		return -EPROTO;
699 
700 	return 0;
701 }
702 
703 static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
704 					    struct sk_buff *skb,
705 					    struct wmi_phyerr_ev_arg *arg)
706 {
707 	const void **tb;
708 	const struct wmi_tlv_phyerr_ev *ev;
709 	const void *phyerrs;
710 	int ret;
711 
712 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
713 	if (IS_ERR(tb)) {
714 		ret = PTR_ERR(tb);
715 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
716 		return ret;
717 	}
718 
719 	ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
720 	phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
721 
722 	if (!ev || !phyerrs) {
723 		kfree(tb);
724 		return -EPROTO;
725 	}
726 
727 	arg->num_phyerrs  = ev->num_phyerrs;
728 	arg->tsf_l32 = ev->tsf_l32;
729 	arg->tsf_u32 = ev->tsf_u32;
730 	arg->buf_len = ev->buf_len;
731 	arg->phyerrs = phyerrs;
732 
733 	kfree(tb);
734 	return 0;
735 }
736 
737 #define WMI_TLV_ABI_VER_NS0 0x5F414351
738 #define WMI_TLV_ABI_VER_NS1 0x00004C4D
739 #define WMI_TLV_ABI_VER_NS2 0x00000000
740 #define WMI_TLV_ABI_VER_NS3 0x00000000
741 
742 #define WMI_TLV_ABI_VER0_MAJOR 1
743 #define WMI_TLV_ABI_VER0_MINOR 0
744 #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
745 			  (((WMI_TLV_ABI_VER0_MINOR) <<  0) & 0x00FFFFFF))
746 #define WMI_TLV_ABI_VER1 53
747 
748 static int
749 ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
750 			      const void *ptr, void *data)
751 {
752 	struct wmi_svc_rdy_ev_arg *arg = data;
753 	int i;
754 
755 	if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
756 		return -EPROTO;
757 
758 	for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
759 		if (!arg->mem_reqs[i]) {
760 			arg->mem_reqs[i] = ptr;
761 			return 0;
762 		}
763 	}
764 
765 	return -ENOMEM;
766 }
767 
768 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
769 					     struct sk_buff *skb,
770 					     struct wmi_svc_rdy_ev_arg *arg)
771 {
772 	const void **tb;
773 	const struct hal_reg_capabilities *reg;
774 	const struct wmi_tlv_svc_rdy_ev *ev;
775 	const __le32 *svc_bmap;
776 	const struct wlan_host_mem_req *mem_reqs;
777 	int ret;
778 
779 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
780 	if (IS_ERR(tb)) {
781 		ret = PTR_ERR(tb);
782 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
783 		return ret;
784 	}
785 
786 	ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
787 	reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
788 	svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
789 	mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
790 
791 	if (!ev || !reg || !svc_bmap || !mem_reqs) {
792 		kfree(tb);
793 		return -EPROTO;
794 	}
795 
796 	/* This is an internal ABI compatibility check for WMI TLV so check it
797 	 * here instead of the generic WMI code.
798 	 */
799 	ath10k_dbg(ar, ATH10K_DBG_WMI,
800 		   "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
801 		   __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
802 		   __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
803 		   __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
804 		   __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
805 		   __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
806 
807 	if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
808 	    __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
809 	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
810 	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
811 	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
812 		kfree(tb);
813 		return -ENOTSUPP;
814 	}
815 
816 	arg->min_tx_power = ev->hw_min_tx_power;
817 	arg->max_tx_power = ev->hw_max_tx_power;
818 	arg->ht_cap = ev->ht_cap_info;
819 	arg->vht_cap = ev->vht_cap_info;
820 	arg->sw_ver0 = ev->abi.abi_ver0;
821 	arg->sw_ver1 = ev->abi.abi_ver1;
822 	arg->fw_build = ev->fw_build_vers;
823 	arg->phy_capab = ev->phy_capability;
824 	arg->num_rf_chains = ev->num_rf_chains;
825 	arg->eeprom_rd = reg->eeprom_rd;
826 	arg->num_mem_reqs = ev->num_mem_reqs;
827 	arg->service_map = svc_bmap;
828 	arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
829 
830 	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
831 				  ath10k_wmi_tlv_parse_mem_reqs, arg);
832 	if (ret) {
833 		kfree(tb);
834 		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
835 		return ret;
836 	}
837 
838 	kfree(tb);
839 	return 0;
840 }
841 
842 static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
843 					 struct sk_buff *skb,
844 					 struct wmi_rdy_ev_arg *arg)
845 {
846 	const void **tb;
847 	const struct wmi_tlv_rdy_ev *ev;
848 	int ret;
849 
850 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
851 	if (IS_ERR(tb)) {
852 		ret = PTR_ERR(tb);
853 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
854 		return ret;
855 	}
856 
857 	ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
858 	if (!ev) {
859 		kfree(tb);
860 		return -EPROTO;
861 	}
862 
863 	arg->sw_version = ev->abi.abi_ver0;
864 	arg->abi_version = ev->abi.abi_ver1;
865 	arg->status = ev->status;
866 	arg->mac_addr = ev->mac_addr.addr;
867 
868 	kfree(tb);
869 	return 0;
870 }
871 
872 static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
873 					   struct ath10k_fw_stats_vdev *dst)
874 {
875 	int i;
876 
877 	dst->vdev_id = __le32_to_cpu(src->vdev_id);
878 	dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
879 	dst->data_snr = __le32_to_cpu(src->data_snr);
880 	dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
881 	dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
882 	dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
883 	dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
884 	dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
885 	dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
886 
887 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
888 		dst->num_tx_frames[i] =
889 			__le32_to_cpu(src->num_tx_frames[i]);
890 
891 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
892 		dst->num_tx_frames_retries[i] =
893 			__le32_to_cpu(src->num_tx_frames_retries[i]);
894 
895 	for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
896 		dst->num_tx_frames_failures[i] =
897 			__le32_to_cpu(src->num_tx_frames_failures[i]);
898 
899 	for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
900 		dst->tx_rate_history[i] =
901 			__le32_to_cpu(src->tx_rate_history[i]);
902 
903 	for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
904 		dst->beacon_rssi_history[i] =
905 			__le32_to_cpu(src->beacon_rssi_history[i]);
906 }
907 
908 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
909 					   struct sk_buff *skb,
910 					   struct ath10k_fw_stats *stats)
911 {
912 	const void **tb;
913 	const struct wmi_tlv_stats_ev *ev;
914 	const void *data;
915 	u32 num_pdev_stats;
916 	u32 num_vdev_stats;
917 	u32 num_peer_stats;
918 	u32 num_bcnflt_stats;
919 	u32 num_chan_stats;
920 	size_t data_len;
921 	int ret;
922 	int i;
923 
924 	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
925 	if (IS_ERR(tb)) {
926 		ret = PTR_ERR(tb);
927 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
928 		return ret;
929 	}
930 
931 	ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
932 	data = tb[WMI_TLV_TAG_ARRAY_BYTE];
933 
934 	if (!ev || !data) {
935 		kfree(tb);
936 		return -EPROTO;
937 	}
938 
939 	data_len = ath10k_wmi_tlv_len(data);
940 	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
941 	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
942 	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
943 	num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
944 	num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
945 
946 	ath10k_dbg(ar, ATH10K_DBG_WMI,
947 		   "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
948 		   num_pdev_stats, num_vdev_stats, num_peer_stats,
949 		   num_bcnflt_stats, num_chan_stats);
950 
951 	for (i = 0; i < num_pdev_stats; i++) {
952 		const struct wmi_pdev_stats *src;
953 		struct ath10k_fw_stats_pdev *dst;
954 
955 		src = data;
956 		if (data_len < sizeof(*src))
957 			return -EPROTO;
958 
959 		data += sizeof(*src);
960 		data_len -= sizeof(*src);
961 
962 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
963 		if (!dst)
964 			continue;
965 
966 		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
967 		ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
968 		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
969 		list_add_tail(&dst->list, &stats->pdevs);
970 	}
971 
972 	for (i = 0; i < num_vdev_stats; i++) {
973 		const struct wmi_tlv_vdev_stats *src;
974 		struct ath10k_fw_stats_vdev *dst;
975 
976 		src = data;
977 		if (data_len < sizeof(*src))
978 			return -EPROTO;
979 
980 		data += sizeof(*src);
981 		data_len -= sizeof(*src);
982 
983 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
984 		if (!dst)
985 			continue;
986 
987 		ath10k_wmi_tlv_pull_vdev_stats(src, dst);
988 		list_add_tail(&dst->list, &stats->vdevs);
989 	}
990 
991 	for (i = 0; i < num_peer_stats; i++) {
992 		const struct wmi_10x_peer_stats *src;
993 		struct ath10k_fw_stats_peer *dst;
994 
995 		src = data;
996 		if (data_len < sizeof(*src))
997 			return -EPROTO;
998 
999 		data += sizeof(*src);
1000 		data_len -= sizeof(*src);
1001 
1002 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
1003 		if (!dst)
1004 			continue;
1005 
1006 		ath10k_wmi_pull_peer_stats(&src->old, dst);
1007 		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
1008 		list_add_tail(&dst->list, &stats->peers);
1009 	}
1010 
1011 	kfree(tb);
1012 	return 0;
1013 }
1014 
1015 static struct sk_buff *
1016 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
1017 {
1018 	struct wmi_tlv_pdev_suspend *cmd;
1019 	struct wmi_tlv *tlv;
1020 	struct sk_buff *skb;
1021 
1022 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1023 	if (!skb)
1024 		return ERR_PTR(-ENOMEM);
1025 
1026 	tlv = (void *)skb->data;
1027 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
1028 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1029 	cmd = (void *)tlv->value;
1030 	cmd->opt = __cpu_to_le32(opt);
1031 
1032 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
1033 	return skb;
1034 }
1035 
1036 static struct sk_buff *
1037 ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
1038 {
1039 	struct wmi_tlv_resume_cmd *cmd;
1040 	struct wmi_tlv *tlv;
1041 	struct sk_buff *skb;
1042 
1043 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1044 	if (!skb)
1045 		return ERR_PTR(-ENOMEM);
1046 
1047 	tlv = (void *)skb->data;
1048 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
1049 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1050 	cmd = (void *)tlv->value;
1051 	cmd->reserved = __cpu_to_le32(0);
1052 
1053 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
1054 	return skb;
1055 }
1056 
1057 static struct sk_buff *
1058 ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
1059 				  u16 rd, u16 rd2g, u16 rd5g,
1060 				  u16 ctl2g, u16 ctl5g,
1061 				  enum wmi_dfs_region dfs_reg)
1062 {
1063 	struct wmi_tlv_pdev_set_rd_cmd *cmd;
1064 	struct wmi_tlv *tlv;
1065 	struct sk_buff *skb;
1066 
1067 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1068 	if (!skb)
1069 		return ERR_PTR(-ENOMEM);
1070 
1071 	tlv = (void *)skb->data;
1072 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
1073 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1074 	cmd = (void *)tlv->value;
1075 	cmd->regd = __cpu_to_le32(rd);
1076 	cmd->regd_2ghz = __cpu_to_le32(rd2g);
1077 	cmd->regd_5ghz = __cpu_to_le32(rd5g);
1078 	cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
1079 	cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
1080 
1081 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
1082 	return skb;
1083 }
1084 
1085 static struct sk_buff *
1086 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
1087 				     u32 param_value)
1088 {
1089 	struct wmi_tlv_pdev_set_param_cmd *cmd;
1090 	struct wmi_tlv *tlv;
1091 	struct sk_buff *skb;
1092 
1093 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1094 	if (!skb)
1095 		return ERR_PTR(-ENOMEM);
1096 
1097 	tlv = (void *)skb->data;
1098 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
1099 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1100 	cmd = (void *)tlv->value;
1101 	cmd->param_id = __cpu_to_le32(param_id);
1102 	cmd->param_value = __cpu_to_le32(param_value);
1103 
1104 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
1105 	return skb;
1106 }
1107 
1108 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
1109 {
1110 	struct sk_buff *skb;
1111 	struct wmi_tlv *tlv;
1112 	struct wmi_tlv_init_cmd *cmd;
1113 	struct wmi_tlv_resource_config *cfg;
1114 	struct wmi_host_mem_chunks *chunks;
1115 	size_t len, chunks_len;
1116 	void *ptr;
1117 
1118 	chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
1119 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1120 	      (sizeof(*tlv) + sizeof(*cfg)) +
1121 	      (sizeof(*tlv) + chunks_len);
1122 
1123 	skb = ath10k_wmi_alloc_skb(ar, len);
1124 	if (!skb)
1125 		return ERR_PTR(-ENOMEM);
1126 
1127 	ptr = skb->data;
1128 
1129 	tlv = ptr;
1130 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
1131 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1132 	cmd = (void *)tlv->value;
1133 	ptr += sizeof(*tlv);
1134 	ptr += sizeof(*cmd);
1135 
1136 	tlv = ptr;
1137 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
1138 	tlv->len = __cpu_to_le16(sizeof(*cfg));
1139 	cfg = (void *)tlv->value;
1140 	ptr += sizeof(*tlv);
1141 	ptr += sizeof(*cfg);
1142 
1143 	tlv = ptr;
1144 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1145 	tlv->len = __cpu_to_le16(chunks_len);
1146 	chunks = (void *)tlv->value;
1147 
1148 	ptr += sizeof(*tlv);
1149 	ptr += chunks_len;
1150 
1151 	cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
1152 	cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
1153 	cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
1154 	cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
1155 	cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
1156 	cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
1157 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
1158 
1159 	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
1160 	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
1161 
1162 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
1163 		cfg->num_offload_peers = __cpu_to_le32(3);
1164 		cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
1165 	} else {
1166 		cfg->num_offload_peers = __cpu_to_le32(0);
1167 		cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
1168 	}
1169 
1170 	cfg->num_peer_keys = __cpu_to_le32(2);
1171 	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
1172 	cfg->ast_skid_limit = __cpu_to_le32(0x10);
1173 	cfg->tx_chain_mask = __cpu_to_le32(0x7);
1174 	cfg->rx_chain_mask = __cpu_to_le32(0x7);
1175 	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
1176 	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
1177 	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
1178 	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
1179 	cfg->rx_decap_mode = __cpu_to_le32(1);
1180 	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
1181 	cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
1182 	cfg->roam_offload_max_vdev = __cpu_to_le32(3);
1183 	cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
1184 	cfg->num_mcast_groups = __cpu_to_le32(0);
1185 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
1186 	cfg->mcast2ucast_mode = __cpu_to_le32(0);
1187 	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
1188 	cfg->num_wds_entries = __cpu_to_le32(0x20);
1189 	cfg->dma_burst_size = __cpu_to_le32(0);
1190 	cfg->mac_aggr_delim = __cpu_to_le32(0);
1191 	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
1192 	cfg->vow_config = __cpu_to_le32(0);
1193 	cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
1194 	cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
1195 	cfg->max_frag_entries = __cpu_to_le32(2);
1196 	cfg->num_tdls_vdevs = __cpu_to_le32(1);
1197 	cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
1198 	cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
1199 	cfg->num_multicast_filter_entries = __cpu_to_le32(5);
1200 	cfg->num_wow_filters = __cpu_to_le32(0x16);
1201 	cfg->num_keep_alive_pattern = __cpu_to_le32(6);
1202 	cfg->keep_alive_pattern_size = __cpu_to_le32(0);
1203 	cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
1204 	cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
1205 
1206 	ath10k_wmi_put_host_mem_chunks(ar, chunks);
1207 
1208 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
1209 	return skb;
1210 }
1211 
1212 static struct sk_buff *
1213 ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
1214 				 const struct wmi_start_scan_arg *arg)
1215 {
1216 	struct wmi_tlv_start_scan_cmd *cmd;
1217 	struct wmi_tlv *tlv;
1218 	struct sk_buff *skb;
1219 	size_t len, chan_len, ssid_len, bssid_len, ie_len;
1220 	__le32 *chans;
1221 	struct wmi_ssid *ssids;
1222 	struct wmi_mac_addr *addrs;
1223 	void *ptr;
1224 	int i, ret;
1225 
1226 	ret = ath10k_wmi_start_scan_verify(arg);
1227 	if (ret)
1228 		return ERR_PTR(ret);
1229 
1230 	chan_len = arg->n_channels * sizeof(__le32);
1231 	ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
1232 	bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
1233 	ie_len = roundup(arg->ie_len, 4);
1234 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1235 	      (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
1236 	      (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
1237 	      (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
1238 	      (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
1239 
1240 	skb = ath10k_wmi_alloc_skb(ar, len);
1241 	if (!skb)
1242 		return ERR_PTR(-ENOMEM);
1243 
1244 	ptr = (void *)skb->data;
1245 	tlv = ptr;
1246 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
1247 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1248 	cmd = (void *)tlv->value;
1249 
1250 	ath10k_wmi_put_start_scan_common(&cmd->common, arg);
1251 	cmd->burst_duration_ms = __cpu_to_le32(0);
1252 	cmd->num_channels = __cpu_to_le32(arg->n_channels);
1253 	cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
1254 	cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
1255 	cmd->ie_len = __cpu_to_le32(arg->ie_len);
1256 	cmd->num_probes = __cpu_to_le32(3);
1257 
1258 	/* FIXME: There are some scan flag inconsistencies across firmwares,
1259 	 * e.g. WMI-TLV inverts the logic behind the following flag.
1260 	 */
1261 	cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
1262 
1263 	ptr += sizeof(*tlv);
1264 	ptr += sizeof(*cmd);
1265 
1266 	tlv = ptr;
1267 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
1268 	tlv->len = __cpu_to_le16(chan_len);
1269 	chans = (void *)tlv->value;
1270 	for (i = 0; i < arg->n_channels; i++)
1271 		chans[i] = __cpu_to_le32(arg->channels[i]);
1272 
1273 	ptr += sizeof(*tlv);
1274 	ptr += chan_len;
1275 
1276 	tlv = ptr;
1277 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1278 	tlv->len = __cpu_to_le16(ssid_len);
1279 	ssids = (void *)tlv->value;
1280 	for (i = 0; i < arg->n_ssids; i++) {
1281 		ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
1282 		memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
1283 	}
1284 
1285 	ptr += sizeof(*tlv);
1286 	ptr += ssid_len;
1287 
1288 	tlv = ptr;
1289 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
1290 	tlv->len = __cpu_to_le16(bssid_len);
1291 	addrs = (void *)tlv->value;
1292 	for (i = 0; i < arg->n_bssids; i++)
1293 		ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
1294 
1295 	ptr += sizeof(*tlv);
1296 	ptr += bssid_len;
1297 
1298 	tlv = ptr;
1299 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1300 	tlv->len = __cpu_to_le16(ie_len);
1301 	memcpy(tlv->value, arg->ie, arg->ie_len);
1302 
1303 	ptr += sizeof(*tlv);
1304 	ptr += ie_len;
1305 
1306 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
1307 	return skb;
1308 }
1309 
1310 static struct sk_buff *
1311 ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
1312 				const struct wmi_stop_scan_arg *arg)
1313 {
1314 	struct wmi_stop_scan_cmd *cmd;
1315 	struct wmi_tlv *tlv;
1316 	struct sk_buff *skb;
1317 	u32 scan_id;
1318 	u32 req_id;
1319 
1320 	if (arg->req_id > 0xFFF)
1321 		return ERR_PTR(-EINVAL);
1322 	if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
1323 		return ERR_PTR(-EINVAL);
1324 
1325 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1326 	if (!skb)
1327 		return ERR_PTR(-ENOMEM);
1328 
1329 	scan_id = arg->u.scan_id;
1330 	scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
1331 
1332 	req_id = arg->req_id;
1333 	req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
1334 
1335 	tlv = (void *)skb->data;
1336 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
1337 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1338 	cmd = (void *)tlv->value;
1339 	cmd->req_type = __cpu_to_le32(arg->req_type);
1340 	cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
1341 	cmd->scan_id = __cpu_to_le32(scan_id);
1342 	cmd->scan_req_id = __cpu_to_le32(req_id);
1343 
1344 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
1345 	return skb;
1346 }
1347 
1348 static struct sk_buff *
1349 ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
1350 				  u32 vdev_id,
1351 				  enum wmi_vdev_type vdev_type,
1352 				  enum wmi_vdev_subtype vdev_subtype,
1353 				  const u8 mac_addr[ETH_ALEN])
1354 {
1355 	struct wmi_vdev_create_cmd *cmd;
1356 	struct wmi_tlv *tlv;
1357 	struct sk_buff *skb;
1358 
1359 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1360 	if (!skb)
1361 		return ERR_PTR(-ENOMEM);
1362 
1363 	tlv = (void *)skb->data;
1364 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
1365 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1366 	cmd = (void *)tlv->value;
1367 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1368 	cmd->vdev_type = __cpu_to_le32(vdev_type);
1369 	cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
1370 	ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
1371 
1372 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
1373 	return skb;
1374 }
1375 
1376 static struct sk_buff *
1377 ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
1378 {
1379 	struct wmi_vdev_delete_cmd *cmd;
1380 	struct wmi_tlv *tlv;
1381 	struct sk_buff *skb;
1382 
1383 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1384 	if (!skb)
1385 		return ERR_PTR(-ENOMEM);
1386 
1387 	tlv = (void *)skb->data;
1388 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
1389 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1390 	cmd = (void *)tlv->value;
1391 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1392 
1393 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
1394 	return skb;
1395 }
1396 
1397 static struct sk_buff *
1398 ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
1399 				 const struct wmi_vdev_start_request_arg *arg,
1400 				 bool restart)
1401 {
1402 	struct wmi_tlv_vdev_start_cmd *cmd;
1403 	struct wmi_channel *ch;
1404 	struct wmi_p2p_noa_descriptor *noa;
1405 	struct wmi_tlv *tlv;
1406 	struct sk_buff *skb;
1407 	size_t len;
1408 	void *ptr;
1409 	u32 flags = 0;
1410 
1411 	if (WARN_ON(arg->ssid && arg->ssid_len == 0))
1412 		return ERR_PTR(-EINVAL);
1413 	if (WARN_ON(arg->hidden_ssid && !arg->ssid))
1414 		return ERR_PTR(-EINVAL);
1415 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1416 		return ERR_PTR(-EINVAL);
1417 
1418 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1419 	      (sizeof(*tlv) + sizeof(*ch)) +
1420 	      (sizeof(*tlv) + 0);
1421 	skb = ath10k_wmi_alloc_skb(ar, len);
1422 	if (!skb)
1423 		return ERR_PTR(-ENOMEM);
1424 
1425 	if (arg->hidden_ssid)
1426 		flags |= WMI_VDEV_START_HIDDEN_SSID;
1427 	if (arg->pmf_enabled)
1428 		flags |= WMI_VDEV_START_PMF_ENABLED;
1429 
1430 	ptr = (void *)skb->data;
1431 
1432 	tlv = ptr;
1433 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
1434 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1435 	cmd = (void *)tlv->value;
1436 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1437 	cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
1438 	cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
1439 	cmd->flags = __cpu_to_le32(flags);
1440 	cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
1441 	cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
1442 	cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
1443 
1444 	if (arg->ssid) {
1445 		cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
1446 		memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1447 	}
1448 
1449 	ptr += sizeof(*tlv);
1450 	ptr += sizeof(*cmd);
1451 
1452 	tlv = ptr;
1453 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
1454 	tlv->len = __cpu_to_le16(sizeof(*ch));
1455 	ch = (void *)tlv->value;
1456 	ath10k_wmi_put_wmi_channel(ch, &arg->channel);
1457 
1458 	ptr += sizeof(*tlv);
1459 	ptr += sizeof(*ch);
1460 
1461 	tlv = ptr;
1462 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1463 	tlv->len = 0;
1464 	noa = (void *)tlv->value;
1465 
1466 	/* Note: This is a nested TLV containing:
1467 	 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
1468 	 */
1469 
1470 	ptr += sizeof(*tlv);
1471 	ptr += 0;
1472 
1473 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
1474 	return skb;
1475 }
1476 
1477 static struct sk_buff *
1478 ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
1479 {
1480 	struct wmi_vdev_stop_cmd *cmd;
1481 	struct wmi_tlv *tlv;
1482 	struct sk_buff *skb;
1483 
1484 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1485 	if (!skb)
1486 		return ERR_PTR(-ENOMEM);
1487 
1488 	tlv = (void *)skb->data;
1489 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
1490 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1491 	cmd = (void *)tlv->value;
1492 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1493 
1494 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
1495 	return skb;
1496 }
1497 
1498 static struct sk_buff *
1499 ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
1500 			      const u8 *bssid)
1501 
1502 {
1503 	struct wmi_vdev_up_cmd *cmd;
1504 	struct wmi_tlv *tlv;
1505 	struct sk_buff *skb;
1506 
1507 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1508 	if (!skb)
1509 		return ERR_PTR(-ENOMEM);
1510 
1511 	tlv = (void *)skb->data;
1512 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
1513 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1514 	cmd = (void *)tlv->value;
1515 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1516 	cmd->vdev_assoc_id = __cpu_to_le32(aid);
1517 	ether_addr_copy(cmd->vdev_bssid.addr, bssid);
1518 
1519 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
1520 	return skb;
1521 }
1522 
1523 static struct sk_buff *
1524 ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
1525 {
1526 	struct wmi_vdev_down_cmd *cmd;
1527 	struct wmi_tlv *tlv;
1528 	struct sk_buff *skb;
1529 
1530 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1531 	if (!skb)
1532 		return ERR_PTR(-ENOMEM);
1533 
1534 	tlv = (void *)skb->data;
1535 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
1536 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1537 	cmd = (void *)tlv->value;
1538 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1539 
1540 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
1541 	return skb;
1542 }
1543 
1544 static struct sk_buff *
1545 ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
1546 				     u32 param_id, u32 param_value)
1547 {
1548 	struct wmi_vdev_set_param_cmd *cmd;
1549 	struct wmi_tlv *tlv;
1550 	struct sk_buff *skb;
1551 
1552 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1553 	if (!skb)
1554 		return ERR_PTR(-ENOMEM);
1555 
1556 	tlv = (void *)skb->data;
1557 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
1558 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1559 	cmd = (void *)tlv->value;
1560 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1561 	cmd->param_id = __cpu_to_le32(param_id);
1562 	cmd->param_value = __cpu_to_le32(param_value);
1563 
1564 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
1565 	return skb;
1566 }
1567 
1568 static struct sk_buff *
1569 ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
1570 				       const struct wmi_vdev_install_key_arg *arg)
1571 {
1572 	struct wmi_vdev_install_key_cmd *cmd;
1573 	struct wmi_tlv *tlv;
1574 	struct sk_buff *skb;
1575 	size_t len;
1576 	void *ptr;
1577 
1578 	if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
1579 		return ERR_PTR(-EINVAL);
1580 	if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
1581 		return ERR_PTR(-EINVAL);
1582 
1583 	len = sizeof(*tlv) + sizeof(*cmd) +
1584 	      sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
1585 	skb = ath10k_wmi_alloc_skb(ar, len);
1586 	if (!skb)
1587 		return ERR_PTR(-ENOMEM);
1588 
1589 	ptr = (void *)skb->data;
1590 	tlv = ptr;
1591 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
1592 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1593 	cmd = (void *)tlv->value;
1594 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1595 	cmd->key_idx = __cpu_to_le32(arg->key_idx);
1596 	cmd->key_flags = __cpu_to_le32(arg->key_flags);
1597 	cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
1598 	cmd->key_len = __cpu_to_le32(arg->key_len);
1599 	cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
1600 	cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
1601 
1602 	if (arg->macaddr)
1603 		ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
1604 
1605 	ptr += sizeof(*tlv);
1606 	ptr += sizeof(*cmd);
1607 
1608 	tlv = ptr;
1609 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1610 	tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
1611 	if (arg->key_data)
1612 		memcpy(tlv->value, arg->key_data, arg->key_len);
1613 
1614 	ptr += sizeof(*tlv);
1615 	ptr += roundup(arg->key_len, sizeof(__le32));
1616 
1617 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
1618 	return skb;
1619 }
1620 
1621 static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
1622 					 const struct wmi_sta_uapsd_auto_trig_arg *arg)
1623 {
1624 	struct wmi_sta_uapsd_auto_trig_param *ac;
1625 	struct wmi_tlv *tlv;
1626 
1627 	tlv = ptr;
1628 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
1629 	tlv->len = __cpu_to_le16(sizeof(*ac));
1630 	ac = (void *)tlv->value;
1631 
1632 	ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
1633 	ac->user_priority = __cpu_to_le32(arg->user_priority);
1634 	ac->service_interval = __cpu_to_le32(arg->service_interval);
1635 	ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
1636 	ac->delay_interval = __cpu_to_le32(arg->delay_interval);
1637 
1638 	ath10k_dbg(ar, ATH10K_DBG_WMI,
1639 		   "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
1640 		   ac->wmm_ac, ac->user_priority, ac->service_interval,
1641 		   ac->suspend_interval, ac->delay_interval);
1642 
1643 	return ptr + sizeof(*tlv) + sizeof(*ac);
1644 }
1645 
1646 static struct sk_buff *
1647 ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
1648 				     const u8 peer_addr[ETH_ALEN],
1649 				     const struct wmi_sta_uapsd_auto_trig_arg *args,
1650 				     u32 num_ac)
1651 {
1652 	struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
1653 	struct wmi_sta_uapsd_auto_trig_param *ac;
1654 	struct wmi_tlv *tlv;
1655 	struct sk_buff *skb;
1656 	size_t len;
1657 	size_t ac_tlv_len;
1658 	void *ptr;
1659 	int i;
1660 
1661 	ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
1662 	len = sizeof(*tlv) + sizeof(*cmd) +
1663 	      sizeof(*tlv) + ac_tlv_len;
1664 	skb = ath10k_wmi_alloc_skb(ar, len);
1665 	if (!skb)
1666 		return ERR_PTR(-ENOMEM);
1667 
1668 	ptr = (void *)skb->data;
1669 	tlv = ptr;
1670 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
1671 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1672 	cmd = (void *)tlv->value;
1673 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1674 	cmd->num_ac = __cpu_to_le32(num_ac);
1675 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1676 
1677 	ptr += sizeof(*tlv);
1678 	ptr += sizeof(*cmd);
1679 
1680 	tlv = ptr;
1681 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
1682 	tlv->len = __cpu_to_le16(ac_tlv_len);
1683 	ac = (void *)tlv->value;
1684 
1685 	ptr += sizeof(*tlv);
1686 	for (i = 0; i < num_ac; i++)
1687 		ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
1688 
1689 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
1690 	return skb;
1691 }
1692 
1693 static void *ath10k_wmi_tlv_put_wmm(void *ptr,
1694 				    const struct wmi_wmm_params_arg *arg)
1695 {
1696 	struct wmi_wmm_params *wmm;
1697 	struct wmi_tlv *tlv;
1698 
1699 	tlv = ptr;
1700 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
1701 	tlv->len = __cpu_to_le16(sizeof(*wmm));
1702 	wmm = (void *)tlv->value;
1703 	ath10k_wmi_set_wmm_param(wmm, arg);
1704 
1705 	return ptr + sizeof(*tlv) + sizeof(*wmm);
1706 }
1707 
1708 static struct sk_buff *
1709 ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
1710 				    const struct wmi_wmm_params_all_arg *arg)
1711 {
1712 	struct wmi_tlv_vdev_set_wmm_cmd *cmd;
1713 	struct wmi_tlv *tlv;
1714 	struct sk_buff *skb;
1715 	size_t len;
1716 	void *ptr;
1717 
1718 	len = sizeof(*tlv) + sizeof(*cmd);
1719 	skb = ath10k_wmi_alloc_skb(ar, len);
1720 	if (!skb)
1721 		return ERR_PTR(-ENOMEM);
1722 
1723 	ptr = (void *)skb->data;
1724 	tlv = ptr;
1725 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
1726 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1727 	cmd = (void *)tlv->value;
1728 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1729 
1730 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
1731 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
1732 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
1733 	ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
1734 
1735 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
1736 	return skb;
1737 }
1738 
1739 static struct sk_buff *
1740 ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
1741 				    const struct wmi_sta_keepalive_arg *arg)
1742 {
1743 	struct wmi_tlv_sta_keepalive_cmd *cmd;
1744 	struct wmi_sta_keepalive_arp_resp *arp;
1745 	struct sk_buff *skb;
1746 	struct wmi_tlv *tlv;
1747 	void *ptr;
1748 	size_t len;
1749 
1750 	len = sizeof(*tlv) + sizeof(*cmd) +
1751 	      sizeof(*tlv) + sizeof(*arp);
1752 	skb = ath10k_wmi_alloc_skb(ar, len);
1753 	if (!skb)
1754 		return ERR_PTR(-ENOMEM);
1755 
1756 	ptr = (void *)skb->data;
1757 	tlv = ptr;
1758 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
1759 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1760 	cmd = (void *)tlv->value;
1761 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1762 	cmd->enabled = __cpu_to_le32(arg->enabled);
1763 	cmd->method = __cpu_to_le32(arg->method);
1764 	cmd->interval = __cpu_to_le32(arg->interval);
1765 
1766 	ptr += sizeof(*tlv);
1767 	ptr += sizeof(*cmd);
1768 
1769 	tlv = ptr;
1770 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
1771 	tlv->len = __cpu_to_le16(sizeof(*arp));
1772 	arp = (void *)tlv->value;
1773 
1774 	arp->src_ip4_addr = arg->src_ip4_addr;
1775 	arp->dest_ip4_addr = arg->dest_ip4_addr;
1776 	ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
1777 
1778 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
1779 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
1780 	return skb;
1781 }
1782 
1783 static struct sk_buff *
1784 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
1785 				  const u8 peer_addr[ETH_ALEN])
1786 {
1787 	struct wmi_tlv_peer_create_cmd *cmd;
1788 	struct wmi_tlv *tlv;
1789 	struct sk_buff *skb;
1790 
1791 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1792 	if (!skb)
1793 		return ERR_PTR(-ENOMEM);
1794 
1795 	tlv = (void *)skb->data;
1796 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
1797 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1798 	cmd = (void *)tlv->value;
1799 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1800 	cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
1801 	ether_addr_copy(cmd->peer_addr.addr, peer_addr);
1802 
1803 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
1804 	return skb;
1805 }
1806 
1807 static struct sk_buff *
1808 ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
1809 				  const u8 peer_addr[ETH_ALEN])
1810 {
1811 	struct wmi_peer_delete_cmd *cmd;
1812 	struct wmi_tlv *tlv;
1813 	struct sk_buff *skb;
1814 
1815 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1816 	if (!skb)
1817 		return ERR_PTR(-ENOMEM);
1818 
1819 	tlv = (void *)skb->data;
1820 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
1821 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1822 	cmd = (void *)tlv->value;
1823 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1824 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1825 
1826 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
1827 	return skb;
1828 }
1829 
1830 static struct sk_buff *
1831 ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
1832 				 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
1833 {
1834 	struct wmi_peer_flush_tids_cmd *cmd;
1835 	struct wmi_tlv *tlv;
1836 	struct sk_buff *skb;
1837 
1838 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1839 	if (!skb)
1840 		return ERR_PTR(-ENOMEM);
1841 
1842 	tlv = (void *)skb->data;
1843 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
1844 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1845 	cmd = (void *)tlv->value;
1846 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1847 	cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
1848 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1849 
1850 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
1851 	return skb;
1852 }
1853 
1854 static struct sk_buff *
1855 ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
1856 				     const u8 *peer_addr,
1857 				     enum wmi_peer_param param_id,
1858 				     u32 param_value)
1859 {
1860 	struct wmi_peer_set_param_cmd *cmd;
1861 	struct wmi_tlv *tlv;
1862 	struct sk_buff *skb;
1863 
1864 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1865 	if (!skb)
1866 		return ERR_PTR(-ENOMEM);
1867 
1868 	tlv = (void *)skb->data;
1869 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
1870 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1871 	cmd = (void *)tlv->value;
1872 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1873 	cmd->param_id = __cpu_to_le32(param_id);
1874 	cmd->param_value = __cpu_to_le32(param_value);
1875 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1876 
1877 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
1878 	return skb;
1879 }
1880 
1881 static struct sk_buff *
1882 ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
1883 				 const struct wmi_peer_assoc_complete_arg *arg)
1884 {
1885 	struct wmi_tlv_peer_assoc_cmd *cmd;
1886 	struct wmi_vht_rate_set *vht_rate;
1887 	struct wmi_tlv *tlv;
1888 	struct sk_buff *skb;
1889 	size_t len, legacy_rate_len, ht_rate_len;
1890 	void *ptr;
1891 
1892 	if (arg->peer_mpdu_density > 16)
1893 		return ERR_PTR(-EINVAL);
1894 	if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
1895 		return ERR_PTR(-EINVAL);
1896 	if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
1897 		return ERR_PTR(-EINVAL);
1898 
1899 	legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
1900 				  sizeof(__le32));
1901 	ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
1902 	len = (sizeof(*tlv) + sizeof(*cmd)) +
1903 	      (sizeof(*tlv) + legacy_rate_len) +
1904 	      (sizeof(*tlv) + ht_rate_len) +
1905 	      (sizeof(*tlv) + sizeof(*vht_rate));
1906 	skb = ath10k_wmi_alloc_skb(ar, len);
1907 	if (!skb)
1908 		return ERR_PTR(-ENOMEM);
1909 
1910 	ptr = (void *)skb->data;
1911 	tlv = ptr;
1912 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
1913 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1914 	cmd = (void *)tlv->value;
1915 
1916 	cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
1917 	cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
1918 	cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
1919 	cmd->flags = __cpu_to_le32(arg->peer_flags);
1920 	cmd->caps = __cpu_to_le32(arg->peer_caps);
1921 	cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
1922 	cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
1923 	cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
1924 	cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
1925 	cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
1926 	cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
1927 	cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
1928 	cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
1929 	cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
1930 	cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
1931 	ether_addr_copy(cmd->mac_addr.addr, arg->addr);
1932 
1933 	ptr += sizeof(*tlv);
1934 	ptr += sizeof(*cmd);
1935 
1936 	tlv = ptr;
1937 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1938 	tlv->len = __cpu_to_le16(legacy_rate_len);
1939 	memcpy(tlv->value, arg->peer_legacy_rates.rates,
1940 	       arg->peer_legacy_rates.num_rates);
1941 
1942 	ptr += sizeof(*tlv);
1943 	ptr += legacy_rate_len;
1944 
1945 	tlv = ptr;
1946 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
1947 	tlv->len = __cpu_to_le16(ht_rate_len);
1948 	memcpy(tlv->value, arg->peer_ht_rates.rates,
1949 	       arg->peer_ht_rates.num_rates);
1950 
1951 	ptr += sizeof(*tlv);
1952 	ptr += ht_rate_len;
1953 
1954 	tlv = ptr;
1955 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
1956 	tlv->len = __cpu_to_le16(sizeof(*vht_rate));
1957 	vht_rate = (void *)tlv->value;
1958 
1959 	vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
1960 	vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
1961 	vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
1962 	vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
1963 
1964 	ptr += sizeof(*tlv);
1965 	ptr += sizeof(*vht_rate);
1966 
1967 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
1968 	return skb;
1969 }
1970 
1971 static struct sk_buff *
1972 ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
1973 				 enum wmi_sta_ps_mode psmode)
1974 {
1975 	struct wmi_sta_powersave_mode_cmd *cmd;
1976 	struct wmi_tlv *tlv;
1977 	struct sk_buff *skb;
1978 
1979 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
1980 	if (!skb)
1981 		return ERR_PTR(-ENOMEM);
1982 
1983 	tlv = (void *)skb->data;
1984 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
1985 	tlv->len = __cpu_to_le16(sizeof(*cmd));
1986 	cmd = (void *)tlv->value;
1987 	cmd->vdev_id = __cpu_to_le32(vdev_id);
1988 	cmd->sta_ps_mode = __cpu_to_le32(psmode);
1989 
1990 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
1991 	return skb;
1992 }
1993 
1994 static struct sk_buff *
1995 ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
1996 				 enum wmi_sta_powersave_param param_id,
1997 				 u32 param_value)
1998 {
1999 	struct wmi_sta_powersave_param_cmd *cmd;
2000 	struct wmi_tlv *tlv;
2001 	struct sk_buff *skb;
2002 
2003 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2004 	if (!skb)
2005 		return ERR_PTR(-ENOMEM);
2006 
2007 	tlv = (void *)skb->data;
2008 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
2009 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2010 	cmd = (void *)tlv->value;
2011 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2012 	cmd->param_id = __cpu_to_le32(param_id);
2013 	cmd->param_value = __cpu_to_le32(param_value);
2014 
2015 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
2016 	return skb;
2017 }
2018 
2019 static struct sk_buff *
2020 ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
2021 				enum wmi_ap_ps_peer_param param_id, u32 value)
2022 {
2023 	struct wmi_ap_ps_peer_cmd *cmd;
2024 	struct wmi_tlv *tlv;
2025 	struct sk_buff *skb;
2026 
2027 	if (!mac)
2028 		return ERR_PTR(-EINVAL);
2029 
2030 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
2031 	if (!skb)
2032 		return ERR_PTR(-ENOMEM);
2033 
2034 	tlv = (void *)skb->data;
2035 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
2036 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2037 	cmd = (void *)tlv->value;
2038 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2039 	cmd->param_id = __cpu_to_le32(param_id);
2040 	cmd->param_value = __cpu_to_le32(value);
2041 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
2042 
2043 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
2044 	return skb;
2045 }
2046 
2047 static struct sk_buff *
2048 ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
2049 				     const struct wmi_scan_chan_list_arg *arg)
2050 {
2051 	struct wmi_tlv_scan_chan_list_cmd *cmd;
2052 	struct wmi_channel *ci;
2053 	struct wmi_channel_arg *ch;
2054 	struct wmi_tlv *tlv;
2055 	struct sk_buff *skb;
2056 	size_t chans_len, len;
2057 	int i;
2058 	void *ptr, *chans;
2059 
2060 	chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
2061 	len = (sizeof(*tlv) + sizeof(*cmd)) +
2062 	      (sizeof(*tlv) + chans_len);
2063 
2064 	skb = ath10k_wmi_alloc_skb(ar, len);
2065 	if (!skb)
2066 		return ERR_PTR(-ENOMEM);
2067 
2068 	ptr = (void *)skb->data;
2069 	tlv = ptr;
2070 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
2071 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2072 	cmd = (void *)tlv->value;
2073 	cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
2074 
2075 	ptr += sizeof(*tlv);
2076 	ptr += sizeof(*cmd);
2077 
2078 	tlv = ptr;
2079 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
2080 	tlv->len = __cpu_to_le16(chans_len);
2081 	chans = (void *)tlv->value;
2082 
2083 	for (i = 0; i < arg->n_channels; i++) {
2084 		ch = &arg->channels[i];
2085 
2086 		tlv = chans;
2087 		tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
2088 		tlv->len = __cpu_to_le16(sizeof(*ci));
2089 		ci = (void *)tlv->value;
2090 
2091 		ath10k_wmi_put_wmi_channel(ci, ch);
2092 
2093 		chans += sizeof(*tlv);
2094 		chans += sizeof(*ci);
2095 	}
2096 
2097 	ptr += sizeof(*tlv);
2098 	ptr += chans_len;
2099 
2100 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
2101 	return skb;
2102 }
2103 
2104 static struct sk_buff *
2105 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
2106 				 const void *bcn, size_t bcn_len,
2107 				 u32 bcn_paddr, bool dtim_zero,
2108 				 bool deliver_cab)
2109 
2110 {
2111 	struct wmi_bcn_tx_ref_cmd *cmd;
2112 	struct wmi_tlv *tlv;
2113 	struct sk_buff *skb;
2114 	struct ieee80211_hdr *hdr;
2115 	u16 fc;
2116 
2117 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2118 	if (!skb)
2119 		return ERR_PTR(-ENOMEM);
2120 
2121 	hdr = (struct ieee80211_hdr *)bcn;
2122 	fc = le16_to_cpu(hdr->frame_control);
2123 
2124 	tlv = (void *)skb->data;
2125 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
2126 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2127 	cmd = (void *)tlv->value;
2128 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2129 	cmd->data_len = __cpu_to_le32(bcn_len);
2130 	cmd->data_ptr = __cpu_to_le32(bcn_paddr);
2131 	cmd->msdu_id = 0;
2132 	cmd->frame_control = __cpu_to_le32(fc);
2133 	cmd->flags = 0;
2134 
2135 	if (dtim_zero)
2136 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
2137 
2138 	if (deliver_cab)
2139 		cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
2140 
2141 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
2142 	return skb;
2143 }
2144 
2145 static struct sk_buff *
2146 ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
2147 				   const struct wmi_wmm_params_all_arg *arg)
2148 {
2149 	struct wmi_tlv_pdev_set_wmm_cmd *cmd;
2150 	struct wmi_wmm_params *wmm;
2151 	struct wmi_tlv *tlv;
2152 	struct sk_buff *skb;
2153 	size_t len;
2154 	void *ptr;
2155 
2156 	len = (sizeof(*tlv) + sizeof(*cmd)) +
2157 	      (4 * (sizeof(*tlv) + sizeof(*wmm)));
2158 	skb = ath10k_wmi_alloc_skb(ar, len);
2159 	if (!skb)
2160 		return ERR_PTR(-ENOMEM);
2161 
2162 	ptr = (void *)skb->data;
2163 
2164 	tlv = ptr;
2165 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
2166 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2167 	cmd = (void *)tlv->value;
2168 
2169 	/* nothing to set here */
2170 
2171 	ptr += sizeof(*tlv);
2172 	ptr += sizeof(*cmd);
2173 
2174 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
2175 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
2176 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
2177 	ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
2178 
2179 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
2180 	return skb;
2181 }
2182 
2183 static struct sk_buff *
2184 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
2185 {
2186 	struct wmi_request_stats_cmd *cmd;
2187 	struct wmi_tlv *tlv;
2188 	struct sk_buff *skb;
2189 
2190 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2191 	if (!skb)
2192 		return ERR_PTR(-ENOMEM);
2193 
2194 	tlv = (void *)skb->data;
2195 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
2196 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2197 	cmd = (void *)tlv->value;
2198 	cmd->stats_id = __cpu_to_le32(stats_mask);
2199 
2200 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
2201 	return skb;
2202 }
2203 
2204 static struct sk_buff *
2205 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
2206 				    enum wmi_force_fw_hang_type type,
2207 				    u32 delay_ms)
2208 {
2209 	struct wmi_force_fw_hang_cmd *cmd;
2210 	struct wmi_tlv *tlv;
2211 	struct sk_buff *skb;
2212 
2213 	skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
2214 	if (!skb)
2215 		return ERR_PTR(-ENOMEM);
2216 
2217 	tlv = (void *)skb->data;
2218 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
2219 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2220 	cmd = (void *)tlv->value;
2221 	cmd->type = __cpu_to_le32(type);
2222 	cmd->delay_ms = __cpu_to_le32(delay_ms);
2223 
2224 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
2225 	return skb;
2226 }
2227 
2228 static struct sk_buff *
2229 ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
2230 				 u32 log_level) {
2231 	struct wmi_tlv_dbglog_cmd *cmd;
2232 	struct wmi_tlv *tlv;
2233 	struct sk_buff *skb;
2234 	size_t len, bmap_len;
2235 	u32 value;
2236 	void *ptr;
2237 
2238 	if (module_enable) {
2239 		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2240 				module_enable,
2241 				WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
2242 	} else {
2243 		value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
2244 				WMI_TLV_DBGLOG_ALL_MODULES,
2245 				WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
2246 	}
2247 
2248 	bmap_len = 0;
2249 	len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
2250 	skb = ath10k_wmi_alloc_skb(ar, len);
2251 	if (!skb)
2252 		return ERR_PTR(-ENOMEM);
2253 
2254 	ptr = (void *)skb->data;
2255 
2256 	tlv = ptr;
2257 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
2258 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2259 	cmd = (void *)tlv->value;
2260 	cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
2261 	cmd->value = __cpu_to_le32(value);
2262 
2263 	ptr += sizeof(*tlv);
2264 	ptr += sizeof(*cmd);
2265 
2266 	tlv = ptr;
2267 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
2268 	tlv->len = __cpu_to_le16(bmap_len);
2269 
2270 	/* nothing to do here */
2271 
2272 	ptr += sizeof(*tlv);
2273 	ptr += sizeof(bmap_len);
2274 
2275 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
2276 	return skb;
2277 }
2278 
2279 static struct sk_buff *
2280 ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
2281 {
2282 	struct wmi_tlv_pktlog_enable *cmd;
2283 	struct wmi_tlv *tlv;
2284 	struct sk_buff *skb;
2285 	void *ptr;
2286 	size_t len;
2287 
2288 	len = sizeof(*tlv) + sizeof(*cmd);
2289 	skb = ath10k_wmi_alloc_skb(ar, len);
2290 	if (!skb)
2291 		return ERR_PTR(-ENOMEM);
2292 
2293 	ptr = (void *)skb->data;
2294 	tlv = ptr;
2295 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
2296 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2297 	cmd = (void *)tlv->value;
2298 	cmd->filter = __cpu_to_le32(filter);
2299 
2300 	ptr += sizeof(*tlv);
2301 	ptr += sizeof(*cmd);
2302 
2303 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
2304 		   filter);
2305 	return skb;
2306 }
2307 
2308 static struct sk_buff *
2309 ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
2310 {
2311 	struct wmi_tlv_pktlog_disable *cmd;
2312 	struct wmi_tlv *tlv;
2313 	struct sk_buff *skb;
2314 	void *ptr;
2315 	size_t len;
2316 
2317 	len = sizeof(*tlv) + sizeof(*cmd);
2318 	skb = ath10k_wmi_alloc_skb(ar, len);
2319 	if (!skb)
2320 		return ERR_PTR(-ENOMEM);
2321 
2322 	ptr = (void *)skb->data;
2323 	tlv = ptr;
2324 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
2325 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2326 	cmd = (void *)tlv->value;
2327 
2328 	ptr += sizeof(*tlv);
2329 	ptr += sizeof(*cmd);
2330 
2331 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
2332 	return skb;
2333 }
2334 
2335 static struct sk_buff *
2336 ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
2337 			       u32 tim_ie_offset, struct sk_buff *bcn,
2338 			       u32 prb_caps, u32 prb_erp, void *prb_ies,
2339 			       size_t prb_ies_len)
2340 {
2341 	struct wmi_tlv_bcn_tmpl_cmd *cmd;
2342 	struct wmi_tlv_bcn_prb_info *info;
2343 	struct wmi_tlv *tlv;
2344 	struct sk_buff *skb;
2345 	void *ptr;
2346 	size_t len;
2347 
2348 	if (WARN_ON(prb_ies_len > 0 && !prb_ies))
2349 		return ERR_PTR(-EINVAL);
2350 
2351 	len = sizeof(*tlv) + sizeof(*cmd) +
2352 	      sizeof(*tlv) + sizeof(*info) + prb_ies_len +
2353 	      sizeof(*tlv) + roundup(bcn->len, 4);
2354 	skb = ath10k_wmi_alloc_skb(ar, len);
2355 	if (!skb)
2356 		return ERR_PTR(-ENOMEM);
2357 
2358 	ptr = (void *)skb->data;
2359 	tlv = ptr;
2360 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
2361 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2362 	cmd = (void *)tlv->value;
2363 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2364 	cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
2365 	cmd->buf_len = __cpu_to_le32(bcn->len);
2366 
2367 	ptr += sizeof(*tlv);
2368 	ptr += sizeof(*cmd);
2369 
2370 	/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
2371 	 * then it is then impossible to pass original ie len.
2372 	 * This chunk is not used yet so if setting probe resp template yields
2373 	 * problems with beaconing or crashes firmware look here.
2374 	 */
2375 	tlv = ptr;
2376 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2377 	tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
2378 	info = (void *)tlv->value;
2379 	info->caps = __cpu_to_le32(prb_caps);
2380 	info->erp = __cpu_to_le32(prb_erp);
2381 	memcpy(info->ies, prb_ies, prb_ies_len);
2382 
2383 	ptr += sizeof(*tlv);
2384 	ptr += sizeof(*info);
2385 	ptr += prb_ies_len;
2386 
2387 	tlv = ptr;
2388 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2389 	tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
2390 	memcpy(tlv->value, bcn->data, bcn->len);
2391 
2392 	/* FIXME: Adjust TSF? */
2393 
2394 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
2395 		   vdev_id);
2396 	return skb;
2397 }
2398 
2399 static struct sk_buff *
2400 ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
2401 			       struct sk_buff *prb)
2402 {
2403 	struct wmi_tlv_prb_tmpl_cmd *cmd;
2404 	struct wmi_tlv_bcn_prb_info *info;
2405 	struct wmi_tlv *tlv;
2406 	struct sk_buff *skb;
2407 	void *ptr;
2408 	size_t len;
2409 
2410 	len = sizeof(*tlv) + sizeof(*cmd) +
2411 	      sizeof(*tlv) + sizeof(*info) +
2412 	      sizeof(*tlv) + roundup(prb->len, 4);
2413 	skb = ath10k_wmi_alloc_skb(ar, len);
2414 	if (!skb)
2415 		return ERR_PTR(-ENOMEM);
2416 
2417 	ptr = (void *)skb->data;
2418 	tlv = ptr;
2419 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
2420 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2421 	cmd = (void *)tlv->value;
2422 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2423 	cmd->buf_len = __cpu_to_le32(prb->len);
2424 
2425 	ptr += sizeof(*tlv);
2426 	ptr += sizeof(*cmd);
2427 
2428 	tlv = ptr;
2429 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
2430 	tlv->len = __cpu_to_le16(sizeof(*info));
2431 	info = (void *)tlv->value;
2432 	info->caps = 0;
2433 	info->erp = 0;
2434 
2435 	ptr += sizeof(*tlv);
2436 	ptr += sizeof(*info);
2437 
2438 	tlv = ptr;
2439 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2440 	tlv->len = __cpu_to_le16(roundup(prb->len, 4));
2441 	memcpy(tlv->value, prb->data, prb->len);
2442 
2443 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
2444 		   vdev_id);
2445 	return skb;
2446 }
2447 
2448 static struct sk_buff *
2449 ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
2450 				    const u8 *p2p_ie)
2451 {
2452 	struct wmi_tlv_p2p_go_bcn_ie *cmd;
2453 	struct wmi_tlv *tlv;
2454 	struct sk_buff *skb;
2455 	void *ptr;
2456 	size_t len;
2457 
2458 	len = sizeof(*tlv) + sizeof(*cmd) +
2459 	      sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
2460 	skb = ath10k_wmi_alloc_skb(ar, len);
2461 	if (!skb)
2462 		return ERR_PTR(-ENOMEM);
2463 
2464 	ptr = (void *)skb->data;
2465 	tlv = ptr;
2466 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
2467 	tlv->len = __cpu_to_le16(sizeof(*cmd));
2468 	cmd = (void *)tlv->value;
2469 	cmd->vdev_id = __cpu_to_le32(vdev_id);
2470 	cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
2471 
2472 	ptr += sizeof(*tlv);
2473 	ptr += sizeof(*cmd);
2474 
2475 	tlv = ptr;
2476 	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
2477 	tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
2478 	memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
2479 
2480 	ptr += sizeof(*tlv);
2481 	ptr += roundup(p2p_ie[1] + 2, 4);
2482 
2483 	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
2484 		   vdev_id);
2485 	return skb;
2486 }
2487 
2488 /****************/
2489 /* TLV mappings */
2490 /****************/
2491 
2492 static struct wmi_cmd_map wmi_tlv_cmd_map = {
2493 	.init_cmdid = WMI_TLV_INIT_CMDID,
2494 	.start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
2495 	.stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
2496 	.scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
2497 	.scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
2498 	.pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
2499 	.pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
2500 	.pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
2501 	.pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
2502 	.pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
2503 	.pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
2504 	.pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
2505 	.pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
2506 	.pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
2507 	.pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
2508 	.pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
2509 	.pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
2510 	.pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
2511 	.vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
2512 	.vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
2513 	.vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
2514 	.vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
2515 	.vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
2516 	.vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
2517 	.vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
2518 	.vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
2519 	.vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
2520 	.peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
2521 	.peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
2522 	.peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
2523 	.peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
2524 	.peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
2525 	.peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
2526 	.peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
2527 	.peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
2528 	.bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
2529 	.pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
2530 	.bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
2531 	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
2532 	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
2533 	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
2534 	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
2535 	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
2536 	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
2537 	.addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
2538 	.delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
2539 	.addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
2540 	.send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
2541 	.sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
2542 	.sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
2543 	.sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
2544 	.pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
2545 	.pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
2546 	.roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
2547 	.roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
2548 	.roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
2549 	.roam_scan_rssi_change_threshold =
2550 				WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
2551 	.roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
2552 	.ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
2553 	.ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
2554 	.ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
2555 	.p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
2556 	.p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
2557 	.p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
2558 	.p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
2559 	.p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
2560 	.ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
2561 	.ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
2562 	.peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
2563 	.wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
2564 	.wlan_profile_set_hist_intvl_cmdid =
2565 				WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
2566 	.wlan_profile_get_profile_data_cmdid =
2567 				WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
2568 	.wlan_profile_enable_profile_id_cmdid =
2569 				WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
2570 	.wlan_profile_list_profile_id_cmdid =
2571 				WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
2572 	.pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
2573 	.pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
2574 	.add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
2575 	.rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
2576 	.wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
2577 	.wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
2578 	.wow_enable_disable_wake_event_cmdid =
2579 				WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
2580 	.wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
2581 	.wow_hostwakeup_from_sleep_cmdid =
2582 				WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
2583 	.rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
2584 	.rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
2585 	.vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
2586 	.vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
2587 	.request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
2588 	.set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
2589 	.network_list_offload_config_cmdid =
2590 				WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
2591 	.gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
2592 	.csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
2593 	.csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
2594 	.chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
2595 	.peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
2596 	.peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
2597 	.sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
2598 	.sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
2599 	.sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
2600 	.echo_cmdid = WMI_TLV_ECHO_CMDID,
2601 	.pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
2602 	.dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
2603 	.pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
2604 	.pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
2605 	.vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
2606 	.vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
2607 	.force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
2608 	.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
2609 	.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
2610 	.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
2611 	.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
2612 };
2613 
2614 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
2615 	.tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
2616 	.rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
2617 	.txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
2618 	.txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
2619 	.txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
2620 	.beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
2621 	.beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
2622 	.resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
2623 	.protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
2624 	.dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
2625 	.non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
2626 	.agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
2627 	.sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
2628 	.ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
2629 	.ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
2630 	.ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
2631 	.ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
2632 	.ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
2633 	.ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
2634 	.ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
2635 	.ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
2636 	.ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
2637 	.ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
2638 	.l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
2639 	.dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
2640 	.pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
2641 	.pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
2642 	.pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
2643 	.pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
2644 	.pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
2645 	.vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
2646 	.peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
2647 	.bcnflt_stats_update_period =
2648 				WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
2649 	.pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
2650 	.arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
2651 	.dcs = WMI_TLV_PDEV_PARAM_DCS,
2652 	.ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
2653 	.ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
2654 	.ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
2655 	.ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
2656 	.ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
2657 	.dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
2658 	.proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
2659 	.idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
2660 	.power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
2661 	.fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
2662 	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
2663 	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
2664 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
2665 };
2666 
2667 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
2668 	.rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
2669 	.fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
2670 	.beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
2671 	.listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
2672 	.multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
2673 	.mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
2674 	.slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
2675 	.preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
2676 	.swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
2677 	.wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
2678 	.wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
2679 	.wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
2680 	.dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
2681 	.wmi_vdev_oc_scheduler_air_time_limit =
2682 				WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
2683 	.wds = WMI_TLV_VDEV_PARAM_WDS,
2684 	.atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
2685 	.bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
2686 	.bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
2687 	.bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
2688 	.feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
2689 	.chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
2690 	.chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
2691 	.disable_htprotection =	WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
2692 	.sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
2693 	.mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
2694 	.protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
2695 	.fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
2696 	.sgi = WMI_TLV_VDEV_PARAM_SGI,
2697 	.ldpc = WMI_TLV_VDEV_PARAM_LDPC,
2698 	.tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
2699 	.rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
2700 	.intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
2701 	.def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
2702 	.nss = WMI_TLV_VDEV_PARAM_NSS,
2703 	.bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
2704 	.mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
2705 	.mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
2706 	.dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
2707 	.unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
2708 	.ap_keepalive_min_idle_inactive_time_secs =
2709 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
2710 	.ap_keepalive_max_idle_inactive_time_secs =
2711 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
2712 	.ap_keepalive_max_unresponsive_time_secs =
2713 		WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
2714 	.ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
2715 	.mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
2716 	.enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
2717 	.txbf = WMI_TLV_VDEV_PARAM_TXBF,
2718 	.packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
2719 	.drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
2720 	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
2721 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
2722 					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
2723 };
2724 
2725 static const struct wmi_ops wmi_tlv_ops = {
2726 	.rx = ath10k_wmi_tlv_op_rx,
2727 	.map_svc = wmi_tlv_svc_map,
2728 
2729 	.pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
2730 	.pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
2731 	.pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
2732 	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
2733 	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
2734 	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
2735 	.pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
2736 	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
2737 	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
2738 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
2739 
2740 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
2741 	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
2742 	.gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
2743 	.gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
2744 	.gen_init = ath10k_wmi_tlv_op_gen_init,
2745 	.gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
2746 	.gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
2747 	.gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
2748 	.gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
2749 	.gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
2750 	.gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
2751 	.gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
2752 	.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
2753 	.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
2754 	.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
2755 	.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
2756 	.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
2757 	.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
2758 	.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
2759 	.gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
2760 	.gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
2761 	.gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
2762 	.gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
2763 	.gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
2764 	.gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
2765 	.gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
2766 	.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
2767 	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
2768 	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
2769 	/* .gen_mgmt_tx = not implemented; HTT is used */
2770 	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
2771 	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
2772 	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
2773 	/* .gen_pdev_set_quiet_mode not implemented */
2774 	/* .gen_pdev_get_temperature not implemented */
2775 	/* .gen_addba_clear_resp not implemented */
2776 	/* .gen_addba_send not implemented */
2777 	/* .gen_addba_set_resp not implemented */
2778 	/* .gen_delba_send not implemented */
2779 	.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
2780 	.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
2781 	.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
2782 	.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
2783 	.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
2784 };
2785 
2786 /************/
2787 /* TLV init */
2788 /************/
2789 
2790 void ath10k_wmi_tlv_attach(struct ath10k *ar)
2791 {
2792 	ar->wmi.cmd = &wmi_tlv_cmd_map;
2793 	ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
2794 	ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
2795 	ar->wmi.ops = &wmi_tlv_ops;
2796 }
2797