1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20 
21 struct ath10k;
22 struct sk_buff;
23 
24 struct wmi_ops {
25 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27 
28 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 			 struct wmi_scan_ev_arg *arg);
30 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 			    struct wmi_mgmt_rx_ev_arg *arg);
32 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 			    struct wmi_ch_info_ev_arg *arg);
34 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 			       struct wmi_vdev_start_ev_arg *arg);
36 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 			      struct wmi_peer_kick_ev_arg *arg);
38 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 			 struct wmi_swba_ev_arg *arg);
40 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 			       struct wmi_phyerr_hdr_arg *arg);
42 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 			   int left_len, struct wmi_phyerr_ev_arg *arg);
44 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 			    struct wmi_svc_rdy_ev_arg *arg);
46 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 			struct wmi_rdy_ev_arg *arg);
48 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 			     struct ath10k_fw_stats *stats);
50 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_roam_ev_arg *arg);
52 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 			      struct wmi_wow_ev_arg *arg);
54 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
55 			    struct wmi_echo_ev_arg *arg);
56 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
57 
58 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
59 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
60 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
61 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
62 					   enum wmi_dfs_region dfs_reg);
63 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
64 					      u32 value);
65 	struct sk_buff *(*gen_init)(struct ath10k *ar);
66 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
67 					  const struct wmi_start_scan_arg *arg);
68 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
69 					 const struct wmi_stop_scan_arg *arg);
70 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
71 					   enum wmi_vdev_type type,
72 					   enum wmi_vdev_subtype subtype,
73 					   const u8 macaddr[ETH_ALEN]);
74 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
75 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
76 					  const struct wmi_vdev_start_request_arg *arg,
77 					  bool restart);
78 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
79 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
80 				       const u8 *bssid);
81 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
82 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
83 					      u32 param_id, u32 param_value);
84 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
85 						const struct wmi_vdev_install_key_arg *arg);
86 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
87 						  const struct wmi_vdev_spectral_conf_arg *arg);
88 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
89 						    u32 trigger, u32 enable);
90 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
91 					     const struct wmi_wmm_params_all_arg *arg);
92 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
93 					   const u8 peer_addr[ETH_ALEN],
94 					   enum wmi_peer_type peer_type);
95 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
96 					   const u8 peer_addr[ETH_ALEN]);
97 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
98 					  const u8 peer_addr[ETH_ALEN],
99 					  u32 tid_bitmap);
100 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
101 					      const u8 *peer_addr,
102 					      enum wmi_peer_param param_id,
103 					      u32 param_value);
104 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
105 					  const struct wmi_peer_assoc_complete_arg *arg);
106 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
107 					  enum wmi_sta_ps_mode psmode);
108 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
109 					  enum wmi_sta_powersave_param param_id,
110 					  u32 value);
111 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
112 					 const u8 *mac,
113 					 enum wmi_ap_ps_peer_param param_id,
114 					 u32 value);
115 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
116 					      const struct wmi_scan_chan_list_arg *arg);
117 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
118 					  const void *bcn, size_t bcn_len,
119 					  u32 bcn_paddr, bool dtim_zero,
120 					  bool deliver_cab);
121 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
122 					    const struct wmi_wmm_params_all_arg *arg);
123 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
124 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
125 					     enum wmi_force_fw_hang_type type,
126 					     u32 delay_ms);
127 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
128 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
129 					  u32 log_level);
130 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
131 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
132 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
133 						   u32 period, u32 duration,
134 						   u32 next_offset,
135 						   u32 enabled);
136 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
137 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
138 						const u8 *mac);
139 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
140 					  const u8 *mac, u32 tid, u32 buf_size);
141 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
142 					      const u8 *mac, u32 tid,
143 					      u32 status);
144 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
145 					  const u8 *mac, u32 tid, u32 initiator,
146 					  u32 reason);
147 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
148 					u32 tim_ie_offset, struct sk_buff *bcn,
149 					u32 prb_caps, u32 prb_erp,
150 					void *prb_ies, size_t prb_ies_len);
151 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
152 					struct sk_buff *bcn);
153 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
154 					     const u8 *p2p_ie);
155 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
156 					      const u8 peer_addr[ETH_ALEN],
157 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
158 					      u32 num_ac);
159 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
160 					     const struct wmi_sta_keepalive_arg *arg);
161 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
162 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
163 						    enum wmi_wow_wakeup_event event,
164 						    u32 enable);
165 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
166 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
167 					       u32 pattern_id,
168 					       const u8 *pattern,
169 					       const u8 *mask,
170 					       int pattern_len,
171 					       int pattern_offset);
172 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
173 					       u32 pattern_id);
174 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
175 						    u32 vdev_id,
176 						    enum wmi_tdls_state state);
177 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
178 						const struct wmi_tdls_peer_update_cmd_arg *arg,
179 						const struct wmi_tdls_peer_capab_arg *cap,
180 						const struct wmi_channel_arg *chan);
181 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
182 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
183 						   u32 param);
184 	void (*fw_stats_fill)(struct ath10k *ar,
185 			      struct ath10k_fw_stats *fw_stats,
186 			      char *buf);
187 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
188 							u8 enable,
189 							u32 detect_level,
190 							u32 detect_margin);
191 	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
192 					       enum wmi_host_platform_type type,
193 					       u32 fw_feature_bitmap);
194 	int (*get_vdev_subtype)(struct ath10k *ar,
195 				enum wmi_vdev_subtype subtype);
196 	struct sk_buff *(*gen_pdev_bss_chan_info_req)
197 					(struct ath10k *ar,
198 					 enum wmi_bss_survey_req_type type);
199 	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
200 };
201 
202 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
203 
204 static inline int
205 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
206 {
207 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
208 		return -EOPNOTSUPP;
209 
210 	ar->wmi.ops->rx(ar, skb);
211 	return 0;
212 }
213 
214 static inline int
215 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
216 		   size_t len)
217 {
218 	if (!ar->wmi.ops->map_svc)
219 		return -EOPNOTSUPP;
220 
221 	ar->wmi.ops->map_svc(in, out, len);
222 	return 0;
223 }
224 
225 static inline int
226 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
227 		     struct wmi_scan_ev_arg *arg)
228 {
229 	if (!ar->wmi.ops->pull_scan)
230 		return -EOPNOTSUPP;
231 
232 	return ar->wmi.ops->pull_scan(ar, skb, arg);
233 }
234 
235 static inline int
236 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
237 			struct wmi_mgmt_rx_ev_arg *arg)
238 {
239 	if (!ar->wmi.ops->pull_mgmt_rx)
240 		return -EOPNOTSUPP;
241 
242 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
243 }
244 
245 static inline int
246 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
247 			struct wmi_ch_info_ev_arg *arg)
248 {
249 	if (!ar->wmi.ops->pull_ch_info)
250 		return -EOPNOTSUPP;
251 
252 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
253 }
254 
255 static inline int
256 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
257 			   struct wmi_vdev_start_ev_arg *arg)
258 {
259 	if (!ar->wmi.ops->pull_vdev_start)
260 		return -EOPNOTSUPP;
261 
262 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
263 }
264 
265 static inline int
266 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
267 			  struct wmi_peer_kick_ev_arg *arg)
268 {
269 	if (!ar->wmi.ops->pull_peer_kick)
270 		return -EOPNOTSUPP;
271 
272 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
273 }
274 
275 static inline int
276 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
277 		     struct wmi_swba_ev_arg *arg)
278 {
279 	if (!ar->wmi.ops->pull_swba)
280 		return -EOPNOTSUPP;
281 
282 	return ar->wmi.ops->pull_swba(ar, skb, arg);
283 }
284 
285 static inline int
286 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
287 			   struct wmi_phyerr_hdr_arg *arg)
288 {
289 	if (!ar->wmi.ops->pull_phyerr_hdr)
290 		return -EOPNOTSUPP;
291 
292 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
293 }
294 
295 static inline int
296 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
297 		       int left_len, struct wmi_phyerr_ev_arg *arg)
298 {
299 	if (!ar->wmi.ops->pull_phyerr)
300 		return -EOPNOTSUPP;
301 
302 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
303 }
304 
305 static inline int
306 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
307 			struct wmi_svc_rdy_ev_arg *arg)
308 {
309 	if (!ar->wmi.ops->pull_svc_rdy)
310 		return -EOPNOTSUPP;
311 
312 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
313 }
314 
315 static inline int
316 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
317 		    struct wmi_rdy_ev_arg *arg)
318 {
319 	if (!ar->wmi.ops->pull_rdy)
320 		return -EOPNOTSUPP;
321 
322 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
323 }
324 
325 static inline int
326 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
327 			 struct ath10k_fw_stats *stats)
328 {
329 	if (!ar->wmi.ops->pull_fw_stats)
330 		return -EOPNOTSUPP;
331 
332 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
333 }
334 
335 static inline int
336 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
337 			struct wmi_roam_ev_arg *arg)
338 {
339 	if (!ar->wmi.ops->pull_roam_ev)
340 		return -EOPNOTSUPP;
341 
342 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
343 }
344 
345 static inline int
346 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
347 			  struct wmi_wow_ev_arg *arg)
348 {
349 	if (!ar->wmi.ops->pull_wow_event)
350 		return -EOPNOTSUPP;
351 
352 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
353 }
354 
355 static inline int
356 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
357 			struct wmi_echo_ev_arg *arg)
358 {
359 	if (!ar->wmi.ops->pull_echo_ev)
360 		return -EOPNOTSUPP;
361 
362 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
363 }
364 
365 static inline enum wmi_txbf_conf
366 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
367 {
368 	if (!ar->wmi.ops->get_txbf_conf_scheme)
369 		return WMI_TXBF_CONF_UNSUPPORTED;
370 
371 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
372 }
373 
374 static inline int
375 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
376 {
377 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
378 	struct sk_buff *skb;
379 	int ret;
380 
381 	if (!ar->wmi.ops->gen_mgmt_tx)
382 		return -EOPNOTSUPP;
383 
384 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
385 	if (IS_ERR(skb))
386 		return PTR_ERR(skb);
387 
388 	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
389 	if (ret)
390 		return ret;
391 
392 	/* FIXME There's no ACK event for Management Tx. This probably
393 	 * shouldn't be called here either.
394 	 */
395 	info->flags |= IEEE80211_TX_STAT_ACK;
396 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
397 
398 	return 0;
399 }
400 
401 static inline int
402 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
403 			      u16 ctl2g, u16 ctl5g,
404 			      enum wmi_dfs_region dfs_reg)
405 {
406 	struct sk_buff *skb;
407 
408 	if (!ar->wmi.ops->gen_pdev_set_rd)
409 		return -EOPNOTSUPP;
410 
411 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
412 					   dfs_reg);
413 	if (IS_ERR(skb))
414 		return PTR_ERR(skb);
415 
416 	return ath10k_wmi_cmd_send(ar, skb,
417 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
418 }
419 
420 static inline int
421 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
422 {
423 	struct sk_buff *skb;
424 
425 	if (!ar->wmi.ops->gen_pdev_suspend)
426 		return -EOPNOTSUPP;
427 
428 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
429 	if (IS_ERR(skb))
430 		return PTR_ERR(skb);
431 
432 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
433 }
434 
435 static inline int
436 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
437 {
438 	struct sk_buff *skb;
439 
440 	if (!ar->wmi.ops->gen_pdev_resume)
441 		return -EOPNOTSUPP;
442 
443 	skb = ar->wmi.ops->gen_pdev_resume(ar);
444 	if (IS_ERR(skb))
445 		return PTR_ERR(skb);
446 
447 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
448 }
449 
450 static inline int
451 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
452 {
453 	struct sk_buff *skb;
454 
455 	if (!ar->wmi.ops->gen_pdev_set_param)
456 		return -EOPNOTSUPP;
457 
458 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
459 	if (IS_ERR(skb))
460 		return PTR_ERR(skb);
461 
462 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
463 }
464 
465 static inline int
466 ath10k_wmi_cmd_init(struct ath10k *ar)
467 {
468 	struct sk_buff *skb;
469 
470 	if (!ar->wmi.ops->gen_init)
471 		return -EOPNOTSUPP;
472 
473 	skb = ar->wmi.ops->gen_init(ar);
474 	if (IS_ERR(skb))
475 		return PTR_ERR(skb);
476 
477 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
478 }
479 
480 static inline int
481 ath10k_wmi_start_scan(struct ath10k *ar,
482 		      const struct wmi_start_scan_arg *arg)
483 {
484 	struct sk_buff *skb;
485 
486 	if (!ar->wmi.ops->gen_start_scan)
487 		return -EOPNOTSUPP;
488 
489 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
490 	if (IS_ERR(skb))
491 		return PTR_ERR(skb);
492 
493 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
494 }
495 
496 static inline int
497 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
498 {
499 	struct sk_buff *skb;
500 
501 	if (!ar->wmi.ops->gen_stop_scan)
502 		return -EOPNOTSUPP;
503 
504 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
505 	if (IS_ERR(skb))
506 		return PTR_ERR(skb);
507 
508 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
509 }
510 
511 static inline int
512 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
513 		       enum wmi_vdev_type type,
514 		       enum wmi_vdev_subtype subtype,
515 		       const u8 macaddr[ETH_ALEN])
516 {
517 	struct sk_buff *skb;
518 
519 	if (!ar->wmi.ops->gen_vdev_create)
520 		return -EOPNOTSUPP;
521 
522 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
523 	if (IS_ERR(skb))
524 		return PTR_ERR(skb);
525 
526 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
527 }
528 
529 static inline int
530 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
531 {
532 	struct sk_buff *skb;
533 
534 	if (!ar->wmi.ops->gen_vdev_delete)
535 		return -EOPNOTSUPP;
536 
537 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
538 	if (IS_ERR(skb))
539 		return PTR_ERR(skb);
540 
541 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
542 }
543 
544 static inline int
545 ath10k_wmi_vdev_start(struct ath10k *ar,
546 		      const struct wmi_vdev_start_request_arg *arg)
547 {
548 	struct sk_buff *skb;
549 
550 	if (!ar->wmi.ops->gen_vdev_start)
551 		return -EOPNOTSUPP;
552 
553 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
554 	if (IS_ERR(skb))
555 		return PTR_ERR(skb);
556 
557 	return ath10k_wmi_cmd_send(ar, skb,
558 				   ar->wmi.cmd->vdev_start_request_cmdid);
559 }
560 
561 static inline int
562 ath10k_wmi_vdev_restart(struct ath10k *ar,
563 			const struct wmi_vdev_start_request_arg *arg)
564 {
565 	struct sk_buff *skb;
566 
567 	if (!ar->wmi.ops->gen_vdev_start)
568 		return -EOPNOTSUPP;
569 
570 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
571 	if (IS_ERR(skb))
572 		return PTR_ERR(skb);
573 
574 	return ath10k_wmi_cmd_send(ar, skb,
575 				   ar->wmi.cmd->vdev_restart_request_cmdid);
576 }
577 
578 static inline int
579 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
580 {
581 	struct sk_buff *skb;
582 
583 	if (!ar->wmi.ops->gen_vdev_stop)
584 		return -EOPNOTSUPP;
585 
586 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
587 	if (IS_ERR(skb))
588 		return PTR_ERR(skb);
589 
590 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
591 }
592 
593 static inline int
594 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
595 {
596 	struct sk_buff *skb;
597 
598 	if (!ar->wmi.ops->gen_vdev_up)
599 		return -EOPNOTSUPP;
600 
601 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
602 	if (IS_ERR(skb))
603 		return PTR_ERR(skb);
604 
605 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
606 }
607 
608 static inline int
609 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
610 {
611 	struct sk_buff *skb;
612 
613 	if (!ar->wmi.ops->gen_vdev_down)
614 		return -EOPNOTSUPP;
615 
616 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
617 	if (IS_ERR(skb))
618 		return PTR_ERR(skb);
619 
620 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
621 }
622 
623 static inline int
624 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
625 			  u32 param_value)
626 {
627 	struct sk_buff *skb;
628 
629 	if (!ar->wmi.ops->gen_vdev_set_param)
630 		return -EOPNOTSUPP;
631 
632 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
633 					      param_value);
634 	if (IS_ERR(skb))
635 		return PTR_ERR(skb);
636 
637 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
638 }
639 
640 static inline int
641 ath10k_wmi_vdev_install_key(struct ath10k *ar,
642 			    const struct wmi_vdev_install_key_arg *arg)
643 {
644 	struct sk_buff *skb;
645 
646 	if (!ar->wmi.ops->gen_vdev_install_key)
647 		return -EOPNOTSUPP;
648 
649 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
650 	if (IS_ERR(skb))
651 		return PTR_ERR(skb);
652 
653 	return ath10k_wmi_cmd_send(ar, skb,
654 				   ar->wmi.cmd->vdev_install_key_cmdid);
655 }
656 
657 static inline int
658 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
659 			      const struct wmi_vdev_spectral_conf_arg *arg)
660 {
661 	struct sk_buff *skb;
662 	u32 cmd_id;
663 
664 	if (!ar->wmi.ops->gen_vdev_spectral_conf)
665 		return -EOPNOTSUPP;
666 
667 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
668 	if (IS_ERR(skb))
669 		return PTR_ERR(skb);
670 
671 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
672 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
673 }
674 
675 static inline int
676 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
677 				u32 enable)
678 {
679 	struct sk_buff *skb;
680 	u32 cmd_id;
681 
682 	if (!ar->wmi.ops->gen_vdev_spectral_enable)
683 		return -EOPNOTSUPP;
684 
685 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
686 						    enable);
687 	if (IS_ERR(skb))
688 		return PTR_ERR(skb);
689 
690 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
691 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
692 }
693 
694 static inline int
695 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
696 			  const u8 peer_addr[ETH_ALEN],
697 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
698 			  u32 num_ac)
699 {
700 	struct sk_buff *skb;
701 	u32 cmd_id;
702 
703 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
704 		return -EOPNOTSUPP;
705 
706 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
707 					      num_ac);
708 	if (IS_ERR(skb))
709 		return PTR_ERR(skb);
710 
711 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
712 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
713 }
714 
715 static inline int
716 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
717 			 const struct wmi_wmm_params_all_arg *arg)
718 {
719 	struct sk_buff *skb;
720 	u32 cmd_id;
721 
722 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
723 	if (IS_ERR(skb))
724 		return PTR_ERR(skb);
725 
726 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
727 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
728 }
729 
730 static inline int
731 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
732 		       const u8 peer_addr[ETH_ALEN],
733 		       enum wmi_peer_type peer_type)
734 {
735 	struct sk_buff *skb;
736 
737 	if (!ar->wmi.ops->gen_peer_create)
738 		return -EOPNOTSUPP;
739 
740 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
741 	if (IS_ERR(skb))
742 		return PTR_ERR(skb);
743 
744 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
745 }
746 
747 static inline int
748 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
749 		       const u8 peer_addr[ETH_ALEN])
750 {
751 	struct sk_buff *skb;
752 
753 	if (!ar->wmi.ops->gen_peer_delete)
754 		return -EOPNOTSUPP;
755 
756 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
757 	if (IS_ERR(skb))
758 		return PTR_ERR(skb);
759 
760 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
761 }
762 
763 static inline int
764 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
765 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
766 {
767 	struct sk_buff *skb;
768 
769 	if (!ar->wmi.ops->gen_peer_flush)
770 		return -EOPNOTSUPP;
771 
772 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
773 	if (IS_ERR(skb))
774 		return PTR_ERR(skb);
775 
776 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
777 }
778 
779 static inline int
780 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
781 			  enum wmi_peer_param param_id, u32 param_value)
782 {
783 	struct sk_buff *skb;
784 
785 	if (!ar->wmi.ops->gen_peer_set_param)
786 		return -EOPNOTSUPP;
787 
788 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
789 					      param_value);
790 	if (IS_ERR(skb))
791 		return PTR_ERR(skb);
792 
793 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
794 }
795 
796 static inline int
797 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
798 		      enum wmi_sta_ps_mode psmode)
799 {
800 	struct sk_buff *skb;
801 
802 	if (!ar->wmi.ops->gen_set_psmode)
803 		return -EOPNOTSUPP;
804 
805 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
806 	if (IS_ERR(skb))
807 		return PTR_ERR(skb);
808 
809 	return ath10k_wmi_cmd_send(ar, skb,
810 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
811 }
812 
813 static inline int
814 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
815 			    enum wmi_sta_powersave_param param_id, u32 value)
816 {
817 	struct sk_buff *skb;
818 
819 	if (!ar->wmi.ops->gen_set_sta_ps)
820 		return -EOPNOTSUPP;
821 
822 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
823 	if (IS_ERR(skb))
824 		return PTR_ERR(skb);
825 
826 	return ath10k_wmi_cmd_send(ar, skb,
827 				   ar->wmi.cmd->sta_powersave_param_cmdid);
828 }
829 
830 static inline int
831 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
832 			   enum wmi_ap_ps_peer_param param_id, u32 value)
833 {
834 	struct sk_buff *skb;
835 
836 	if (!ar->wmi.ops->gen_set_ap_ps)
837 		return -EOPNOTSUPP;
838 
839 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
840 	if (IS_ERR(skb))
841 		return PTR_ERR(skb);
842 
843 	return ath10k_wmi_cmd_send(ar, skb,
844 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
845 }
846 
847 static inline int
848 ath10k_wmi_scan_chan_list(struct ath10k *ar,
849 			  const struct wmi_scan_chan_list_arg *arg)
850 {
851 	struct sk_buff *skb;
852 
853 	if (!ar->wmi.ops->gen_scan_chan_list)
854 		return -EOPNOTSUPP;
855 
856 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
857 	if (IS_ERR(skb))
858 		return PTR_ERR(skb);
859 
860 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
861 }
862 
863 static inline int
864 ath10k_wmi_peer_assoc(struct ath10k *ar,
865 		      const struct wmi_peer_assoc_complete_arg *arg)
866 {
867 	struct sk_buff *skb;
868 
869 	if (!ar->wmi.ops->gen_peer_assoc)
870 		return -EOPNOTSUPP;
871 
872 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
873 	if (IS_ERR(skb))
874 		return PTR_ERR(skb);
875 
876 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
877 }
878 
879 static inline int
880 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
881 				  const void *bcn, size_t bcn_len,
882 				  u32 bcn_paddr, bool dtim_zero,
883 				  bool deliver_cab)
884 {
885 	struct sk_buff *skb;
886 	int ret;
887 
888 	if (!ar->wmi.ops->gen_beacon_dma)
889 		return -EOPNOTSUPP;
890 
891 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
892 					  dtim_zero, deliver_cab);
893 	if (IS_ERR(skb))
894 		return PTR_ERR(skb);
895 
896 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
897 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
898 	if (ret) {
899 		dev_kfree_skb(skb);
900 		return ret;
901 	}
902 
903 	return 0;
904 }
905 
906 static inline int
907 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
908 			       const struct wmi_wmm_params_all_arg *arg)
909 {
910 	struct sk_buff *skb;
911 
912 	if (!ar->wmi.ops->gen_pdev_set_wmm)
913 		return -EOPNOTSUPP;
914 
915 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
916 	if (IS_ERR(skb))
917 		return PTR_ERR(skb);
918 
919 	return ath10k_wmi_cmd_send(ar, skb,
920 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
921 }
922 
923 static inline int
924 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
925 {
926 	struct sk_buff *skb;
927 
928 	if (!ar->wmi.ops->gen_request_stats)
929 		return -EOPNOTSUPP;
930 
931 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
932 	if (IS_ERR(skb))
933 		return PTR_ERR(skb);
934 
935 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
936 }
937 
938 static inline int
939 ath10k_wmi_force_fw_hang(struct ath10k *ar,
940 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
941 {
942 	struct sk_buff *skb;
943 
944 	if (!ar->wmi.ops->gen_force_fw_hang)
945 		return -EOPNOTSUPP;
946 
947 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
948 	if (IS_ERR(skb))
949 		return PTR_ERR(skb);
950 
951 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
952 }
953 
954 static inline int
955 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
956 {
957 	struct sk_buff *skb;
958 
959 	if (!ar->wmi.ops->gen_dbglog_cfg)
960 		return -EOPNOTSUPP;
961 
962 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
963 	if (IS_ERR(skb))
964 		return PTR_ERR(skb);
965 
966 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
967 }
968 
969 static inline int
970 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
971 {
972 	struct sk_buff *skb;
973 
974 	if (!ar->wmi.ops->gen_pktlog_enable)
975 		return -EOPNOTSUPP;
976 
977 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
978 	if (IS_ERR(skb))
979 		return PTR_ERR(skb);
980 
981 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
982 }
983 
984 static inline int
985 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
986 {
987 	struct sk_buff *skb;
988 
989 	if (!ar->wmi.ops->gen_pktlog_disable)
990 		return -EOPNOTSUPP;
991 
992 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
993 	if (IS_ERR(skb))
994 		return PTR_ERR(skb);
995 
996 	return ath10k_wmi_cmd_send(ar, skb,
997 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
998 }
999 
1000 static inline int
1001 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1002 			       u32 next_offset, u32 enabled)
1003 {
1004 	struct sk_buff *skb;
1005 
1006 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1007 		return -EOPNOTSUPP;
1008 
1009 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1010 						   next_offset, enabled);
1011 	if (IS_ERR(skb))
1012 		return PTR_ERR(skb);
1013 
1014 	return ath10k_wmi_cmd_send(ar, skb,
1015 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1016 }
1017 
1018 static inline int
1019 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1020 {
1021 	struct sk_buff *skb;
1022 
1023 	if (!ar->wmi.ops->gen_pdev_get_temperature)
1024 		return -EOPNOTSUPP;
1025 
1026 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1027 	if (IS_ERR(skb))
1028 		return PTR_ERR(skb);
1029 
1030 	return ath10k_wmi_cmd_send(ar, skb,
1031 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1032 }
1033 
1034 static inline int
1035 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1036 {
1037 	struct sk_buff *skb;
1038 
1039 	if (!ar->wmi.ops->gen_addba_clear_resp)
1040 		return -EOPNOTSUPP;
1041 
1042 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1043 	if (IS_ERR(skb))
1044 		return PTR_ERR(skb);
1045 
1046 	return ath10k_wmi_cmd_send(ar, skb,
1047 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1048 }
1049 
1050 static inline int
1051 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1052 		      u32 tid, u32 buf_size)
1053 {
1054 	struct sk_buff *skb;
1055 
1056 	if (!ar->wmi.ops->gen_addba_send)
1057 		return -EOPNOTSUPP;
1058 
1059 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1060 	if (IS_ERR(skb))
1061 		return PTR_ERR(skb);
1062 
1063 	return ath10k_wmi_cmd_send(ar, skb,
1064 				   ar->wmi.cmd->addba_send_cmdid);
1065 }
1066 
1067 static inline int
1068 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1069 			  u32 tid, u32 status)
1070 {
1071 	struct sk_buff *skb;
1072 
1073 	if (!ar->wmi.ops->gen_addba_set_resp)
1074 		return -EOPNOTSUPP;
1075 
1076 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1077 	if (IS_ERR(skb))
1078 		return PTR_ERR(skb);
1079 
1080 	return ath10k_wmi_cmd_send(ar, skb,
1081 				   ar->wmi.cmd->addba_set_resp_cmdid);
1082 }
1083 
1084 static inline int
1085 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1086 		      u32 tid, u32 initiator, u32 reason)
1087 {
1088 	struct sk_buff *skb;
1089 
1090 	if (!ar->wmi.ops->gen_delba_send)
1091 		return -EOPNOTSUPP;
1092 
1093 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1094 					  reason);
1095 	if (IS_ERR(skb))
1096 		return PTR_ERR(skb);
1097 
1098 	return ath10k_wmi_cmd_send(ar, skb,
1099 				   ar->wmi.cmd->delba_send_cmdid);
1100 }
1101 
1102 static inline int
1103 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1104 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1105 		    void *prb_ies, size_t prb_ies_len)
1106 {
1107 	struct sk_buff *skb;
1108 
1109 	if (!ar->wmi.ops->gen_bcn_tmpl)
1110 		return -EOPNOTSUPP;
1111 
1112 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1113 					prb_caps, prb_erp, prb_ies,
1114 					prb_ies_len);
1115 	if (IS_ERR(skb))
1116 		return PTR_ERR(skb);
1117 
1118 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1119 }
1120 
1121 static inline int
1122 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1123 {
1124 	struct sk_buff *skb;
1125 
1126 	if (!ar->wmi.ops->gen_prb_tmpl)
1127 		return -EOPNOTSUPP;
1128 
1129 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1130 	if (IS_ERR(skb))
1131 		return PTR_ERR(skb);
1132 
1133 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1134 }
1135 
1136 static inline int
1137 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1138 {
1139 	struct sk_buff *skb;
1140 
1141 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1142 		return -EOPNOTSUPP;
1143 
1144 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1145 	if (IS_ERR(skb))
1146 		return PTR_ERR(skb);
1147 
1148 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1149 }
1150 
1151 static inline int
1152 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1153 			 const struct wmi_sta_keepalive_arg *arg)
1154 {
1155 	struct sk_buff *skb;
1156 	u32 cmd_id;
1157 
1158 	if (!ar->wmi.ops->gen_sta_keepalive)
1159 		return -EOPNOTSUPP;
1160 
1161 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1162 	if (IS_ERR(skb))
1163 		return PTR_ERR(skb);
1164 
1165 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1166 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1167 }
1168 
1169 static inline int
1170 ath10k_wmi_wow_enable(struct ath10k *ar)
1171 {
1172 	struct sk_buff *skb;
1173 	u32 cmd_id;
1174 
1175 	if (!ar->wmi.ops->gen_wow_enable)
1176 		return -EOPNOTSUPP;
1177 
1178 	skb = ar->wmi.ops->gen_wow_enable(ar);
1179 	if (IS_ERR(skb))
1180 		return PTR_ERR(skb);
1181 
1182 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1183 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1184 }
1185 
1186 static inline int
1187 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1188 				enum wmi_wow_wakeup_event event,
1189 				u32 enable)
1190 {
1191 	struct sk_buff *skb;
1192 	u32 cmd_id;
1193 
1194 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1195 		return -EOPNOTSUPP;
1196 
1197 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1198 	if (IS_ERR(skb))
1199 		return PTR_ERR(skb);
1200 
1201 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1202 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1203 }
1204 
1205 static inline int
1206 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1207 {
1208 	struct sk_buff *skb;
1209 	u32 cmd_id;
1210 
1211 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1212 		return -EOPNOTSUPP;
1213 
1214 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1215 	if (IS_ERR(skb))
1216 		return PTR_ERR(skb);
1217 
1218 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1219 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1220 }
1221 
1222 static inline int
1223 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1224 			   const u8 *pattern, const u8 *mask,
1225 			   int pattern_len, int pattern_offset)
1226 {
1227 	struct sk_buff *skb;
1228 	u32 cmd_id;
1229 
1230 	if (!ar->wmi.ops->gen_wow_add_pattern)
1231 		return -EOPNOTSUPP;
1232 
1233 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1234 					       pattern, mask, pattern_len,
1235 					       pattern_offset);
1236 	if (IS_ERR(skb))
1237 		return PTR_ERR(skb);
1238 
1239 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1240 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1241 }
1242 
1243 static inline int
1244 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1245 {
1246 	struct sk_buff *skb;
1247 	u32 cmd_id;
1248 
1249 	if (!ar->wmi.ops->gen_wow_del_pattern)
1250 		return -EOPNOTSUPP;
1251 
1252 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1253 	if (IS_ERR(skb))
1254 		return PTR_ERR(skb);
1255 
1256 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1257 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1258 }
1259 
1260 static inline int
1261 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1262 				enum wmi_tdls_state state)
1263 {
1264 	struct sk_buff *skb;
1265 
1266 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1267 		return -EOPNOTSUPP;
1268 
1269 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1270 	if (IS_ERR(skb))
1271 		return PTR_ERR(skb);
1272 
1273 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1274 }
1275 
1276 static inline int
1277 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1278 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1279 			    const struct wmi_tdls_peer_capab_arg *cap,
1280 			    const struct wmi_channel_arg *chan)
1281 {
1282 	struct sk_buff *skb;
1283 
1284 	if (!ar->wmi.ops->gen_tdls_peer_update)
1285 		return -EOPNOTSUPP;
1286 
1287 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1288 	if (IS_ERR(skb))
1289 		return PTR_ERR(skb);
1290 
1291 	return ath10k_wmi_cmd_send(ar, skb,
1292 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1293 }
1294 
1295 static inline int
1296 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1297 {
1298 	struct sk_buff *skb;
1299 
1300 	if (!ar->wmi.ops->gen_adaptive_qcs)
1301 		return -EOPNOTSUPP;
1302 
1303 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1304 	if (IS_ERR(skb))
1305 		return PTR_ERR(skb);
1306 
1307 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1308 }
1309 
1310 static inline int
1311 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1312 {
1313 	struct sk_buff *skb;
1314 
1315 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1316 		return -EOPNOTSUPP;
1317 
1318 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1319 
1320 	if (IS_ERR(skb))
1321 		return PTR_ERR(skb);
1322 
1323 	return ath10k_wmi_cmd_send(ar, skb,
1324 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1325 }
1326 
1327 static inline int
1328 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1329 			 char *buf)
1330 {
1331 	if (!ar->wmi.ops->fw_stats_fill)
1332 		return -EOPNOTSUPP;
1333 
1334 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1335 	return 0;
1336 }
1337 
1338 static inline int
1339 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1340 				    u32 detect_level, u32 detect_margin)
1341 {
1342 	struct sk_buff *skb;
1343 
1344 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1345 		return -EOPNOTSUPP;
1346 
1347 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1348 							detect_level,
1349 							detect_margin);
1350 
1351 	if (IS_ERR(skb))
1352 		return PTR_ERR(skb);
1353 
1354 	return ath10k_wmi_cmd_send(ar, skb,
1355 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1356 }
1357 
1358 static inline int
1359 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1360 			       enum wmi_host_platform_type type,
1361 			       u32 fw_feature_bitmap)
1362 {
1363 	struct sk_buff *skb;
1364 
1365 	if (!ar->wmi.ops->ext_resource_config)
1366 		return -EOPNOTSUPP;
1367 
1368 	skb = ar->wmi.ops->ext_resource_config(ar, type,
1369 					       fw_feature_bitmap);
1370 
1371 	if (IS_ERR(skb))
1372 		return PTR_ERR(skb);
1373 
1374 	return ath10k_wmi_cmd_send(ar, skb,
1375 				   ar->wmi.cmd->ext_resource_cfg_cmdid);
1376 }
1377 
1378 static inline int
1379 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1380 {
1381 	if (!ar->wmi.ops->get_vdev_subtype)
1382 		return -EOPNOTSUPP;
1383 
1384 	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1385 }
1386 
1387 static inline int
1388 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1389 				      enum wmi_bss_survey_req_type type)
1390 {
1391 	struct ath10k_wmi *wmi = &ar->wmi;
1392 	struct sk_buff *skb;
1393 
1394 	if (!wmi->ops->gen_pdev_bss_chan_info_req)
1395 		return -EOPNOTSUPP;
1396 
1397 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1398 	if (IS_ERR(skb))
1399 		return PTR_ERR(skb);
1400 
1401 	return ath10k_wmi_cmd_send(ar, skb,
1402 				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
1403 }
1404 
1405 static inline int
1406 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1407 {
1408 	struct ath10k_wmi *wmi = &ar->wmi;
1409 	struct sk_buff *skb;
1410 
1411 	if (!wmi->ops->gen_echo)
1412 		return -EOPNOTSUPP;
1413 
1414 	skb = wmi->ops->gen_echo(ar, value);
1415 	if (IS_ERR(skb))
1416 		return PTR_ERR(skb);
1417 
1418 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1419 }
1420 
1421 #endif
1422