1 /* SPDX-License-Identifier: ISC */
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _WMI_OPS_H_
9 #define _WMI_OPS_H_
10 
11 struct ath10k;
12 struct sk_buff;
13 
14 struct wmi_ops {
15 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
16 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
17 	void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
18 
19 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
20 			 struct wmi_scan_ev_arg *arg);
21 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
22 			    struct wmi_mgmt_rx_ev_arg *arg);
23 	int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
24 				  struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
25 	int (*pull_mgmt_tx_bundle_compl)(
26 				struct ath10k *ar, struct sk_buff *skb,
27 				struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
28 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
29 			    struct wmi_ch_info_ev_arg *arg);
30 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
31 			       struct wmi_vdev_start_ev_arg *arg);
32 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
33 			      struct wmi_peer_kick_ev_arg *arg);
34 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
35 			 struct wmi_swba_ev_arg *arg);
36 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
37 			       struct wmi_phyerr_hdr_arg *arg);
38 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
39 			   int left_len, struct wmi_phyerr_ev_arg *arg);
40 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
41 			    struct wmi_svc_rdy_ev_arg *arg);
42 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
43 			struct wmi_rdy_ev_arg *arg);
44 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
45 			     struct ath10k_fw_stats *stats);
46 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
47 			    struct wmi_roam_ev_arg *arg);
48 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
49 			      struct wmi_wow_ev_arg *arg);
50 	int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_echo_ev_arg *arg);
52 	int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
53 				  struct wmi_dfs_status_ev_arg *arg);
54 	int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
55 			      struct wmi_svc_avail_ev_arg *arg);
56 
57 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
58 
59 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
60 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
61 	struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
62 						     const u8 macaddr[ETH_ALEN]);
63 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
64 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
65 					   enum wmi_dfs_region dfs_reg);
66 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
67 					      u32 value);
68 	struct sk_buff *(*gen_init)(struct ath10k *ar);
69 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
70 					  const struct wmi_start_scan_arg *arg);
71 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
72 					 const struct wmi_stop_scan_arg *arg);
73 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
74 					   enum wmi_vdev_type type,
75 					   enum wmi_vdev_subtype subtype,
76 					   const u8 macaddr[ETH_ALEN]);
77 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
78 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
79 					  const struct wmi_vdev_start_request_arg *arg,
80 					  bool restart);
81 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
82 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
83 				       const u8 *bssid);
84 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
85 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
86 					      u32 param_id, u32 param_value);
87 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
88 						const struct wmi_vdev_install_key_arg *arg);
89 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
90 						  const struct wmi_vdev_spectral_conf_arg *arg);
91 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
92 						    u32 trigger, u32 enable);
93 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
94 					     const struct wmi_wmm_params_all_arg *arg);
95 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
96 					   const u8 peer_addr[ETH_ALEN],
97 					   enum wmi_peer_type peer_type);
98 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
99 					   const u8 peer_addr[ETH_ALEN]);
100 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
101 					  const u8 peer_addr[ETH_ALEN],
102 					  u32 tid_bitmap);
103 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
104 					      const u8 *peer_addr,
105 					      enum wmi_peer_param param_id,
106 					      u32 param_value);
107 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
108 					  const struct wmi_peer_assoc_complete_arg *arg);
109 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
110 					  enum wmi_sta_ps_mode psmode);
111 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
112 					  enum wmi_sta_powersave_param param_id,
113 					  u32 value);
114 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
115 					 const u8 *mac,
116 					 enum wmi_ap_ps_peer_param param_id,
117 					 u32 value);
118 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
119 					      const struct wmi_scan_chan_list_arg *arg);
120 	struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
121 						 u32 prob_req_oui);
122 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
123 					  const void *bcn, size_t bcn_len,
124 					  u32 bcn_paddr, bool dtim_zero,
125 					  bool deliver_cab);
126 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
127 					    const struct wmi_wmm_params_all_arg *arg);
128 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
129 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
130 					     enum wmi_force_fw_hang_type type,
131 					     u32 delay_ms);
132 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
133 	struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
134 					    struct sk_buff *skb,
135 					    dma_addr_t paddr);
136 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
137 					  u32 log_level);
138 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
139 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
140 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
141 						   u32 period, u32 duration,
142 						   u32 next_offset,
143 						   u32 enabled);
144 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
145 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
146 						const u8 *mac);
147 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
148 					  const u8 *mac, u32 tid, u32 buf_size);
149 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
150 					      const u8 *mac, u32 tid,
151 					      u32 status);
152 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
153 					  const u8 *mac, u32 tid, u32 initiator,
154 					  u32 reason);
155 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
156 					u32 tim_ie_offset, struct sk_buff *bcn,
157 					u32 prb_caps, u32 prb_erp,
158 					void *prb_ies, size_t prb_ies_len);
159 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
160 					struct sk_buff *bcn);
161 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
162 					     const u8 *p2p_ie);
163 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
164 					      const u8 peer_addr[ETH_ALEN],
165 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
166 					      u32 num_ac);
167 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
168 					     const struct wmi_sta_keepalive_arg *arg);
169 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
170 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
171 						    enum wmi_wow_wakeup_event event,
172 						    u32 enable);
173 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
174 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
175 					       u32 pattern_id,
176 					       const u8 *pattern,
177 					       const u8 *mask,
178 					       int pattern_len,
179 					       int pattern_offset);
180 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
181 					       u32 pattern_id);
182 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
183 						    u32 vdev_id,
184 						    enum wmi_tdls_state state);
185 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
186 						const struct wmi_tdls_peer_update_cmd_arg *arg,
187 						const struct wmi_tdls_peer_capab_arg *cap,
188 						const struct wmi_channel_arg *chan);
189 	struct sk_buff *(*gen_radar_found)
190 			(struct ath10k *ar,
191 			 const struct ath10k_radar_found_info *arg);
192 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
193 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
194 						   u32 param);
195 	void (*fw_stats_fill)(struct ath10k *ar,
196 			      struct ath10k_fw_stats *fw_stats,
197 			      char *buf);
198 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
199 							u8 enable,
200 							u32 detect_level,
201 							u32 detect_margin);
202 	struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
203 					       enum wmi_host_platform_type type,
204 					       u32 fw_feature_bitmap);
205 	int (*get_vdev_subtype)(struct ath10k *ar,
206 				enum wmi_vdev_subtype subtype);
207 	struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
208 					      u32 vdev_id,
209 					      struct wmi_pno_scan_req *pno_scan);
210 	struct sk_buff *(*gen_pdev_bss_chan_info_req)
211 					(struct ath10k *ar,
212 					 enum wmi_bss_survey_req_type type);
213 	struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
214 	struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
215 							u32 param);
216 	struct sk_buff *(*gen_bb_timing)
217 			(struct ath10k *ar,
218 			 const struct wmi_bb_timing_cfg_arg *arg);
219 
220 };
221 
222 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
223 
224 static inline int
225 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
226 {
227 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
228 		return -EOPNOTSUPP;
229 
230 	ar->wmi.ops->rx(ar, skb);
231 	return 0;
232 }
233 
234 static inline int
235 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
236 		   size_t len)
237 {
238 	if (!ar->wmi.ops->map_svc)
239 		return -EOPNOTSUPP;
240 
241 	ar->wmi.ops->map_svc(in, out, len);
242 	return 0;
243 }
244 
245 static inline int
246 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
247 		       size_t len)
248 {
249 	if (!ar->wmi.ops->map_svc_ext)
250 		return -EOPNOTSUPP;
251 
252 	ar->wmi.ops->map_svc_ext(in, out, len);
253 	return 0;
254 }
255 
256 static inline int
257 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
258 		     struct wmi_scan_ev_arg *arg)
259 {
260 	if (!ar->wmi.ops->pull_scan)
261 		return -EOPNOTSUPP;
262 
263 	return ar->wmi.ops->pull_scan(ar, skb, arg);
264 }
265 
266 static inline int
267 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
268 			      struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
269 {
270 	if (!ar->wmi.ops->pull_mgmt_tx_compl)
271 		return -EOPNOTSUPP;
272 
273 	return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
274 }
275 
276 static inline int
277 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
278 				     struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
279 {
280 	if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
281 		return -EOPNOTSUPP;
282 
283 	return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
284 }
285 
286 static inline int
287 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
288 			struct wmi_mgmt_rx_ev_arg *arg)
289 {
290 	if (!ar->wmi.ops->pull_mgmt_rx)
291 		return -EOPNOTSUPP;
292 
293 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
294 }
295 
296 static inline int
297 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
298 			struct wmi_ch_info_ev_arg *arg)
299 {
300 	if (!ar->wmi.ops->pull_ch_info)
301 		return -EOPNOTSUPP;
302 
303 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
304 }
305 
306 static inline int
307 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
308 			   struct wmi_vdev_start_ev_arg *arg)
309 {
310 	if (!ar->wmi.ops->pull_vdev_start)
311 		return -EOPNOTSUPP;
312 
313 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
314 }
315 
316 static inline int
317 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
318 			  struct wmi_peer_kick_ev_arg *arg)
319 {
320 	if (!ar->wmi.ops->pull_peer_kick)
321 		return -EOPNOTSUPP;
322 
323 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
324 }
325 
326 static inline int
327 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
328 		     struct wmi_swba_ev_arg *arg)
329 {
330 	if (!ar->wmi.ops->pull_swba)
331 		return -EOPNOTSUPP;
332 
333 	return ar->wmi.ops->pull_swba(ar, skb, arg);
334 }
335 
336 static inline int
337 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
338 			   struct wmi_phyerr_hdr_arg *arg)
339 {
340 	if (!ar->wmi.ops->pull_phyerr_hdr)
341 		return -EOPNOTSUPP;
342 
343 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
344 }
345 
346 static inline int
347 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
348 		       int left_len, struct wmi_phyerr_ev_arg *arg)
349 {
350 	if (!ar->wmi.ops->pull_phyerr)
351 		return -EOPNOTSUPP;
352 
353 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
354 }
355 
356 static inline int
357 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
358 			struct wmi_svc_rdy_ev_arg *arg)
359 {
360 	if (!ar->wmi.ops->pull_svc_rdy)
361 		return -EOPNOTSUPP;
362 
363 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
364 }
365 
366 static inline int
367 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
368 		    struct wmi_rdy_ev_arg *arg)
369 {
370 	if (!ar->wmi.ops->pull_rdy)
371 		return -EOPNOTSUPP;
372 
373 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
374 }
375 
376 static inline int
377 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
378 			  struct wmi_svc_avail_ev_arg *arg)
379 {
380 	if (!ar->wmi.ops->pull_svc_avail)
381 		return -EOPNOTSUPP;
382 	return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
383 }
384 
385 static inline int
386 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
387 			 struct ath10k_fw_stats *stats)
388 {
389 	if (!ar->wmi.ops->pull_fw_stats)
390 		return -EOPNOTSUPP;
391 
392 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
393 }
394 
395 static inline int
396 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
397 			struct wmi_roam_ev_arg *arg)
398 {
399 	if (!ar->wmi.ops->pull_roam_ev)
400 		return -EOPNOTSUPP;
401 
402 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
403 }
404 
405 static inline int
406 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
407 			  struct wmi_wow_ev_arg *arg)
408 {
409 	if (!ar->wmi.ops->pull_wow_event)
410 		return -EOPNOTSUPP;
411 
412 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
413 }
414 
415 static inline int
416 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
417 			struct wmi_echo_ev_arg *arg)
418 {
419 	if (!ar->wmi.ops->pull_echo_ev)
420 		return -EOPNOTSUPP;
421 
422 	return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
423 }
424 
425 static inline int
426 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
427 			   struct wmi_dfs_status_ev_arg *arg)
428 {
429 	if (!ar->wmi.ops->pull_dfs_status_ev)
430 		return -EOPNOTSUPP;
431 
432 	return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
433 }
434 
435 static inline enum wmi_txbf_conf
436 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
437 {
438 	if (!ar->wmi.ops->get_txbf_conf_scheme)
439 		return WMI_TXBF_CONF_UNSUPPORTED;
440 
441 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
442 }
443 
444 static inline int
445 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
446 			dma_addr_t paddr)
447 {
448 	struct sk_buff *skb;
449 	int ret;
450 
451 	if (!ar->wmi.ops->gen_mgmt_tx_send)
452 		return -EOPNOTSUPP;
453 
454 	skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
455 	if (IS_ERR(skb))
456 		return PTR_ERR(skb);
457 
458 	ret = ath10k_wmi_cmd_send(ar, skb,
459 				  ar->wmi.cmd->mgmt_tx_send_cmdid);
460 	if (ret)
461 		return ret;
462 
463 	return 0;
464 }
465 
466 static inline int
467 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
468 {
469 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
470 	struct sk_buff *skb;
471 	int ret;
472 
473 	if (!ar->wmi.ops->gen_mgmt_tx)
474 		return -EOPNOTSUPP;
475 
476 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
477 	if (IS_ERR(skb))
478 		return PTR_ERR(skb);
479 
480 	ret = ath10k_wmi_cmd_send(ar, skb,
481 				  ar->wmi.cmd->mgmt_tx_cmdid);
482 	if (ret)
483 		return ret;
484 
485 	/* FIXME There's no ACK event for Management Tx. This probably
486 	 * shouldn't be called here either.
487 	 */
488 	info->flags |= IEEE80211_TX_STAT_ACK;
489 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
490 
491 	return 0;
492 }
493 
494 static inline int
495 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
496 			      u16 ctl2g, u16 ctl5g,
497 			      enum wmi_dfs_region dfs_reg)
498 {
499 	struct sk_buff *skb;
500 
501 	if (!ar->wmi.ops->gen_pdev_set_rd)
502 		return -EOPNOTSUPP;
503 
504 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
505 					   dfs_reg);
506 	if (IS_ERR(skb))
507 		return PTR_ERR(skb);
508 
509 	return ath10k_wmi_cmd_send(ar, skb,
510 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
511 }
512 
513 static inline int
514 ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
515 {
516 	struct sk_buff *skb;
517 
518 	if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
519 		return -EOPNOTSUPP;
520 
521 	skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
522 	if (IS_ERR(skb))
523 		return PTR_ERR(skb);
524 
525 	return ath10k_wmi_cmd_send(ar, skb,
526 				   ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
527 }
528 
529 static inline int
530 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
531 {
532 	struct sk_buff *skb;
533 
534 	if (!ar->wmi.ops->gen_pdev_suspend)
535 		return -EOPNOTSUPP;
536 
537 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
538 	if (IS_ERR(skb))
539 		return PTR_ERR(skb);
540 
541 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
542 }
543 
544 static inline int
545 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
546 {
547 	struct sk_buff *skb;
548 
549 	if (!ar->wmi.ops->gen_pdev_resume)
550 		return -EOPNOTSUPP;
551 
552 	skb = ar->wmi.ops->gen_pdev_resume(ar);
553 	if (IS_ERR(skb))
554 		return PTR_ERR(skb);
555 
556 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
557 }
558 
559 static inline int
560 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
561 {
562 	struct sk_buff *skb;
563 
564 	if (!ar->wmi.ops->gen_pdev_set_param)
565 		return -EOPNOTSUPP;
566 
567 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
568 	if (IS_ERR(skb))
569 		return PTR_ERR(skb);
570 
571 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
572 }
573 
574 static inline int
575 ath10k_wmi_cmd_init(struct ath10k *ar)
576 {
577 	struct sk_buff *skb;
578 
579 	if (!ar->wmi.ops->gen_init)
580 		return -EOPNOTSUPP;
581 
582 	skb = ar->wmi.ops->gen_init(ar);
583 	if (IS_ERR(skb))
584 		return PTR_ERR(skb);
585 
586 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
587 }
588 
589 static inline int
590 ath10k_wmi_start_scan(struct ath10k *ar,
591 		      const struct wmi_start_scan_arg *arg)
592 {
593 	struct sk_buff *skb;
594 
595 	if (!ar->wmi.ops->gen_start_scan)
596 		return -EOPNOTSUPP;
597 
598 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
599 	if (IS_ERR(skb))
600 		return PTR_ERR(skb);
601 
602 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
603 }
604 
605 static inline int
606 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
607 {
608 	struct sk_buff *skb;
609 
610 	if (!ar->wmi.ops->gen_stop_scan)
611 		return -EOPNOTSUPP;
612 
613 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
614 	if (IS_ERR(skb))
615 		return PTR_ERR(skb);
616 
617 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
618 }
619 
620 static inline int
621 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
622 		       enum wmi_vdev_type type,
623 		       enum wmi_vdev_subtype subtype,
624 		       const u8 macaddr[ETH_ALEN])
625 {
626 	struct sk_buff *skb;
627 
628 	if (!ar->wmi.ops->gen_vdev_create)
629 		return -EOPNOTSUPP;
630 
631 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
632 	if (IS_ERR(skb))
633 		return PTR_ERR(skb);
634 
635 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
636 }
637 
638 static inline int
639 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
640 {
641 	struct sk_buff *skb;
642 
643 	if (!ar->wmi.ops->gen_vdev_delete)
644 		return -EOPNOTSUPP;
645 
646 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
647 	if (IS_ERR(skb))
648 		return PTR_ERR(skb);
649 
650 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
651 }
652 
653 static inline int
654 ath10k_wmi_vdev_start(struct ath10k *ar,
655 		      const struct wmi_vdev_start_request_arg *arg)
656 {
657 	struct sk_buff *skb;
658 
659 	if (!ar->wmi.ops->gen_vdev_start)
660 		return -EOPNOTSUPP;
661 
662 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
663 	if (IS_ERR(skb))
664 		return PTR_ERR(skb);
665 
666 	return ath10k_wmi_cmd_send(ar, skb,
667 				   ar->wmi.cmd->vdev_start_request_cmdid);
668 }
669 
670 static inline int
671 ath10k_wmi_vdev_restart(struct ath10k *ar,
672 			const struct wmi_vdev_start_request_arg *arg)
673 {
674 	struct sk_buff *skb;
675 
676 	if (!ar->wmi.ops->gen_vdev_start)
677 		return -EOPNOTSUPP;
678 
679 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
680 	if (IS_ERR(skb))
681 		return PTR_ERR(skb);
682 
683 	return ath10k_wmi_cmd_send(ar, skb,
684 				   ar->wmi.cmd->vdev_restart_request_cmdid);
685 }
686 
687 static inline int
688 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
689 {
690 	struct sk_buff *skb;
691 
692 	if (!ar->wmi.ops->gen_vdev_stop)
693 		return -EOPNOTSUPP;
694 
695 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
696 	if (IS_ERR(skb))
697 		return PTR_ERR(skb);
698 
699 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
700 }
701 
702 static inline int
703 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
704 {
705 	struct sk_buff *skb;
706 
707 	if (!ar->wmi.ops->gen_vdev_up)
708 		return -EOPNOTSUPP;
709 
710 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
711 	if (IS_ERR(skb))
712 		return PTR_ERR(skb);
713 
714 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
715 }
716 
717 static inline int
718 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
719 {
720 	struct sk_buff *skb;
721 
722 	if (!ar->wmi.ops->gen_vdev_down)
723 		return -EOPNOTSUPP;
724 
725 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
726 	if (IS_ERR(skb))
727 		return PTR_ERR(skb);
728 
729 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
730 }
731 
732 static inline int
733 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
734 			  u32 param_value)
735 {
736 	struct sk_buff *skb;
737 
738 	if (!ar->wmi.ops->gen_vdev_set_param)
739 		return -EOPNOTSUPP;
740 
741 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
742 					      param_value);
743 	if (IS_ERR(skb))
744 		return PTR_ERR(skb);
745 
746 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
747 }
748 
749 static inline int
750 ath10k_wmi_vdev_install_key(struct ath10k *ar,
751 			    const struct wmi_vdev_install_key_arg *arg)
752 {
753 	struct sk_buff *skb;
754 
755 	if (!ar->wmi.ops->gen_vdev_install_key)
756 		return -EOPNOTSUPP;
757 
758 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
759 	if (IS_ERR(skb))
760 		return PTR_ERR(skb);
761 
762 	return ath10k_wmi_cmd_send(ar, skb,
763 				   ar->wmi.cmd->vdev_install_key_cmdid);
764 }
765 
766 static inline int
767 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
768 			      const struct wmi_vdev_spectral_conf_arg *arg)
769 {
770 	struct sk_buff *skb;
771 	u32 cmd_id;
772 
773 	if (!ar->wmi.ops->gen_vdev_spectral_conf)
774 		return -EOPNOTSUPP;
775 
776 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
777 	if (IS_ERR(skb))
778 		return PTR_ERR(skb);
779 
780 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
781 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
782 }
783 
784 static inline int
785 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
786 				u32 enable)
787 {
788 	struct sk_buff *skb;
789 	u32 cmd_id;
790 
791 	if (!ar->wmi.ops->gen_vdev_spectral_enable)
792 		return -EOPNOTSUPP;
793 
794 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
795 						    enable);
796 	if (IS_ERR(skb))
797 		return PTR_ERR(skb);
798 
799 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
800 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
801 }
802 
803 static inline int
804 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
805 			  const u8 peer_addr[ETH_ALEN],
806 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
807 			  u32 num_ac)
808 {
809 	struct sk_buff *skb;
810 	u32 cmd_id;
811 
812 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
813 		return -EOPNOTSUPP;
814 
815 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
816 					      num_ac);
817 	if (IS_ERR(skb))
818 		return PTR_ERR(skb);
819 
820 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
821 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
822 }
823 
824 static inline int
825 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
826 			 const struct wmi_wmm_params_all_arg *arg)
827 {
828 	struct sk_buff *skb;
829 	u32 cmd_id;
830 
831 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
832 	if (IS_ERR(skb))
833 		return PTR_ERR(skb);
834 
835 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
836 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
837 }
838 
839 static inline int
840 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
841 		       const u8 peer_addr[ETH_ALEN],
842 		       enum wmi_peer_type peer_type)
843 {
844 	struct sk_buff *skb;
845 
846 	if (!ar->wmi.ops->gen_peer_create)
847 		return -EOPNOTSUPP;
848 
849 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
850 	if (IS_ERR(skb))
851 		return PTR_ERR(skb);
852 
853 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
854 }
855 
856 static inline int
857 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
858 		       const u8 peer_addr[ETH_ALEN])
859 {
860 	struct sk_buff *skb;
861 
862 	if (!ar->wmi.ops->gen_peer_delete)
863 		return -EOPNOTSUPP;
864 
865 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
866 	if (IS_ERR(skb))
867 		return PTR_ERR(skb);
868 
869 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
870 }
871 
872 static inline int
873 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
874 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
875 {
876 	struct sk_buff *skb;
877 
878 	if (!ar->wmi.ops->gen_peer_flush)
879 		return -EOPNOTSUPP;
880 
881 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
882 	if (IS_ERR(skb))
883 		return PTR_ERR(skb);
884 
885 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
886 }
887 
888 static inline int
889 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
890 			  enum wmi_peer_param param_id, u32 param_value)
891 {
892 	struct sk_buff *skb;
893 
894 	if (!ar->wmi.ops->gen_peer_set_param)
895 		return -EOPNOTSUPP;
896 
897 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
898 					      param_value);
899 	if (IS_ERR(skb))
900 		return PTR_ERR(skb);
901 
902 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
903 }
904 
905 static inline int
906 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
907 		      enum wmi_sta_ps_mode psmode)
908 {
909 	struct sk_buff *skb;
910 
911 	if (!ar->wmi.ops->gen_set_psmode)
912 		return -EOPNOTSUPP;
913 
914 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
915 	if (IS_ERR(skb))
916 		return PTR_ERR(skb);
917 
918 	return ath10k_wmi_cmd_send(ar, skb,
919 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
920 }
921 
922 static inline int
923 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
924 			    enum wmi_sta_powersave_param param_id, u32 value)
925 {
926 	struct sk_buff *skb;
927 
928 	if (!ar->wmi.ops->gen_set_sta_ps)
929 		return -EOPNOTSUPP;
930 
931 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
932 	if (IS_ERR(skb))
933 		return PTR_ERR(skb);
934 
935 	return ath10k_wmi_cmd_send(ar, skb,
936 				   ar->wmi.cmd->sta_powersave_param_cmdid);
937 }
938 
939 static inline int
940 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
941 			   enum wmi_ap_ps_peer_param param_id, u32 value)
942 {
943 	struct sk_buff *skb;
944 
945 	if (!ar->wmi.ops->gen_set_ap_ps)
946 		return -EOPNOTSUPP;
947 
948 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
949 	if (IS_ERR(skb))
950 		return PTR_ERR(skb);
951 
952 	return ath10k_wmi_cmd_send(ar, skb,
953 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
954 }
955 
956 static inline int
957 ath10k_wmi_scan_chan_list(struct ath10k *ar,
958 			  const struct wmi_scan_chan_list_arg *arg)
959 {
960 	struct sk_buff *skb;
961 
962 	if (!ar->wmi.ops->gen_scan_chan_list)
963 		return -EOPNOTSUPP;
964 
965 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
966 	if (IS_ERR(skb))
967 		return PTR_ERR(skb);
968 
969 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
970 }
971 
972 static inline int
973 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
974 {
975 	struct sk_buff *skb;
976 	u32 prob_req_oui;
977 
978 	prob_req_oui = (((u32)mac_addr[0]) << 16) |
979 		       (((u32)mac_addr[1]) << 8) | mac_addr[2];
980 
981 	if (!ar->wmi.ops->gen_scan_prob_req_oui)
982 		return -EOPNOTSUPP;
983 
984 	skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
985 	if (IS_ERR(skb))
986 		return PTR_ERR(skb);
987 
988 	return ath10k_wmi_cmd_send(ar, skb,
989 			ar->wmi.cmd->scan_prob_req_oui_cmdid);
990 }
991 
992 static inline int
993 ath10k_wmi_peer_assoc(struct ath10k *ar,
994 		      const struct wmi_peer_assoc_complete_arg *arg)
995 {
996 	struct sk_buff *skb;
997 
998 	if (!ar->wmi.ops->gen_peer_assoc)
999 		return -EOPNOTSUPP;
1000 
1001 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
1002 	if (IS_ERR(skb))
1003 		return PTR_ERR(skb);
1004 
1005 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
1006 }
1007 
1008 static inline int
1009 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
1010 				  const void *bcn, size_t bcn_len,
1011 				  u32 bcn_paddr, bool dtim_zero,
1012 				  bool deliver_cab)
1013 {
1014 	struct sk_buff *skb;
1015 	int ret;
1016 
1017 	if (!ar->wmi.ops->gen_beacon_dma)
1018 		return -EOPNOTSUPP;
1019 
1020 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1021 					  dtim_zero, deliver_cab);
1022 	if (IS_ERR(skb))
1023 		return PTR_ERR(skb);
1024 
1025 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1026 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
1027 	if (ret) {
1028 		dev_kfree_skb(skb);
1029 		return ret;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
1035 static inline int
1036 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1037 			       const struct wmi_wmm_params_all_arg *arg)
1038 {
1039 	struct sk_buff *skb;
1040 
1041 	if (!ar->wmi.ops->gen_pdev_set_wmm)
1042 		return -EOPNOTSUPP;
1043 
1044 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1045 	if (IS_ERR(skb))
1046 		return PTR_ERR(skb);
1047 
1048 	return ath10k_wmi_cmd_send(ar, skb,
1049 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1050 }
1051 
1052 static inline int
1053 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1054 {
1055 	struct sk_buff *skb;
1056 
1057 	if (!ar->wmi.ops->gen_request_stats)
1058 		return -EOPNOTSUPP;
1059 
1060 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1061 	if (IS_ERR(skb))
1062 		return PTR_ERR(skb);
1063 
1064 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1065 }
1066 
1067 static inline int
1068 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1069 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
1070 {
1071 	struct sk_buff *skb;
1072 
1073 	if (!ar->wmi.ops->gen_force_fw_hang)
1074 		return -EOPNOTSUPP;
1075 
1076 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1077 	if (IS_ERR(skb))
1078 		return PTR_ERR(skb);
1079 
1080 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1081 }
1082 
1083 static inline int
1084 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1085 {
1086 	struct sk_buff *skb;
1087 
1088 	if (!ar->wmi.ops->gen_dbglog_cfg)
1089 		return -EOPNOTSUPP;
1090 
1091 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1092 	if (IS_ERR(skb))
1093 		return PTR_ERR(skb);
1094 
1095 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1096 }
1097 
1098 static inline int
1099 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1100 {
1101 	struct sk_buff *skb;
1102 
1103 	if (!ar->wmi.ops->gen_pktlog_enable)
1104 		return -EOPNOTSUPP;
1105 
1106 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1107 	if (IS_ERR(skb))
1108 		return PTR_ERR(skb);
1109 
1110 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1111 }
1112 
1113 static inline int
1114 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1115 {
1116 	struct sk_buff *skb;
1117 
1118 	if (!ar->wmi.ops->gen_pktlog_disable)
1119 		return -EOPNOTSUPP;
1120 
1121 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
1122 	if (IS_ERR(skb))
1123 		return PTR_ERR(skb);
1124 
1125 	return ath10k_wmi_cmd_send(ar, skb,
1126 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1127 }
1128 
1129 static inline int
1130 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1131 			       u32 next_offset, u32 enabled)
1132 {
1133 	struct sk_buff *skb;
1134 
1135 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1136 		return -EOPNOTSUPP;
1137 
1138 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1139 						   next_offset, enabled);
1140 	if (IS_ERR(skb))
1141 		return PTR_ERR(skb);
1142 
1143 	return ath10k_wmi_cmd_send(ar, skb,
1144 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1145 }
1146 
1147 static inline int
1148 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1149 {
1150 	struct sk_buff *skb;
1151 
1152 	if (!ar->wmi.ops->gen_pdev_get_temperature)
1153 		return -EOPNOTSUPP;
1154 
1155 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1156 	if (IS_ERR(skb))
1157 		return PTR_ERR(skb);
1158 
1159 	return ath10k_wmi_cmd_send(ar, skb,
1160 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1161 }
1162 
1163 static inline int
1164 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1165 {
1166 	struct sk_buff *skb;
1167 
1168 	if (!ar->wmi.ops->gen_addba_clear_resp)
1169 		return -EOPNOTSUPP;
1170 
1171 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1172 	if (IS_ERR(skb))
1173 		return PTR_ERR(skb);
1174 
1175 	return ath10k_wmi_cmd_send(ar, skb,
1176 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1177 }
1178 
1179 static inline int
1180 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1181 		      u32 tid, u32 buf_size)
1182 {
1183 	struct sk_buff *skb;
1184 
1185 	if (!ar->wmi.ops->gen_addba_send)
1186 		return -EOPNOTSUPP;
1187 
1188 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1189 	if (IS_ERR(skb))
1190 		return PTR_ERR(skb);
1191 
1192 	return ath10k_wmi_cmd_send(ar, skb,
1193 				   ar->wmi.cmd->addba_send_cmdid);
1194 }
1195 
1196 static inline int
1197 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1198 			  u32 tid, u32 status)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	if (!ar->wmi.ops->gen_addba_set_resp)
1203 		return -EOPNOTSUPP;
1204 
1205 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1206 	if (IS_ERR(skb))
1207 		return PTR_ERR(skb);
1208 
1209 	return ath10k_wmi_cmd_send(ar, skb,
1210 				   ar->wmi.cmd->addba_set_resp_cmdid);
1211 }
1212 
1213 static inline int
1214 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1215 		      u32 tid, u32 initiator, u32 reason)
1216 {
1217 	struct sk_buff *skb;
1218 
1219 	if (!ar->wmi.ops->gen_delba_send)
1220 		return -EOPNOTSUPP;
1221 
1222 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1223 					  reason);
1224 	if (IS_ERR(skb))
1225 		return PTR_ERR(skb);
1226 
1227 	return ath10k_wmi_cmd_send(ar, skb,
1228 				   ar->wmi.cmd->delba_send_cmdid);
1229 }
1230 
1231 static inline int
1232 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1233 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1234 		    void *prb_ies, size_t prb_ies_len)
1235 {
1236 	struct sk_buff *skb;
1237 
1238 	if (!ar->wmi.ops->gen_bcn_tmpl)
1239 		return -EOPNOTSUPP;
1240 
1241 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1242 					prb_caps, prb_erp, prb_ies,
1243 					prb_ies_len);
1244 	if (IS_ERR(skb))
1245 		return PTR_ERR(skb);
1246 
1247 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1248 }
1249 
1250 static inline int
1251 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1252 {
1253 	struct sk_buff *skb;
1254 
1255 	if (!ar->wmi.ops->gen_prb_tmpl)
1256 		return -EOPNOTSUPP;
1257 
1258 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1259 	if (IS_ERR(skb))
1260 		return PTR_ERR(skb);
1261 
1262 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1263 }
1264 
1265 static inline int
1266 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1267 {
1268 	struct sk_buff *skb;
1269 
1270 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1271 		return -EOPNOTSUPP;
1272 
1273 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1274 	if (IS_ERR(skb))
1275 		return PTR_ERR(skb);
1276 
1277 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1278 }
1279 
1280 static inline int
1281 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1282 			 const struct wmi_sta_keepalive_arg *arg)
1283 {
1284 	struct sk_buff *skb;
1285 	u32 cmd_id;
1286 
1287 	if (!ar->wmi.ops->gen_sta_keepalive)
1288 		return -EOPNOTSUPP;
1289 
1290 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1291 	if (IS_ERR(skb))
1292 		return PTR_ERR(skb);
1293 
1294 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1295 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1296 }
1297 
1298 static inline int
1299 ath10k_wmi_wow_enable(struct ath10k *ar)
1300 {
1301 	struct sk_buff *skb;
1302 	u32 cmd_id;
1303 
1304 	if (!ar->wmi.ops->gen_wow_enable)
1305 		return -EOPNOTSUPP;
1306 
1307 	skb = ar->wmi.ops->gen_wow_enable(ar);
1308 	if (IS_ERR(skb))
1309 		return PTR_ERR(skb);
1310 
1311 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1312 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1313 }
1314 
1315 static inline int
1316 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1317 				enum wmi_wow_wakeup_event event,
1318 				u32 enable)
1319 {
1320 	struct sk_buff *skb;
1321 	u32 cmd_id;
1322 
1323 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1324 		return -EOPNOTSUPP;
1325 
1326 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1327 	if (IS_ERR(skb))
1328 		return PTR_ERR(skb);
1329 
1330 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1331 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1332 }
1333 
1334 static inline int
1335 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1336 {
1337 	struct sk_buff *skb;
1338 	u32 cmd_id;
1339 
1340 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1341 		return -EOPNOTSUPP;
1342 
1343 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1344 	if (IS_ERR(skb))
1345 		return PTR_ERR(skb);
1346 
1347 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1348 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1349 }
1350 
1351 static inline int
1352 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1353 			   const u8 *pattern, const u8 *mask,
1354 			   int pattern_len, int pattern_offset)
1355 {
1356 	struct sk_buff *skb;
1357 	u32 cmd_id;
1358 
1359 	if (!ar->wmi.ops->gen_wow_add_pattern)
1360 		return -EOPNOTSUPP;
1361 
1362 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1363 					       pattern, mask, pattern_len,
1364 					       pattern_offset);
1365 	if (IS_ERR(skb))
1366 		return PTR_ERR(skb);
1367 
1368 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1369 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1370 }
1371 
1372 static inline int
1373 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1374 {
1375 	struct sk_buff *skb;
1376 	u32 cmd_id;
1377 
1378 	if (!ar->wmi.ops->gen_wow_del_pattern)
1379 		return -EOPNOTSUPP;
1380 
1381 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1382 	if (IS_ERR(skb))
1383 		return PTR_ERR(skb);
1384 
1385 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1386 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1387 }
1388 
1389 static inline int
1390 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
1391 			  struct wmi_pno_scan_req  *pno_scan)
1392 {
1393 	struct sk_buff *skb;
1394 	u32 cmd_id;
1395 
1396 	if (!ar->wmi.ops->gen_wow_config_pno)
1397 		return -EOPNOTSUPP;
1398 
1399 	skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
1400 	if (IS_ERR(skb))
1401 		return PTR_ERR(skb);
1402 
1403 	cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
1404 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1405 }
1406 
1407 static inline int
1408 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1409 				enum wmi_tdls_state state)
1410 {
1411 	struct sk_buff *skb;
1412 
1413 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1414 		return -EOPNOTSUPP;
1415 
1416 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1417 	if (IS_ERR(skb))
1418 		return PTR_ERR(skb);
1419 
1420 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1421 }
1422 
1423 static inline int
1424 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1425 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1426 			    const struct wmi_tdls_peer_capab_arg *cap,
1427 			    const struct wmi_channel_arg *chan)
1428 {
1429 	struct sk_buff *skb;
1430 
1431 	if (!ar->wmi.ops->gen_tdls_peer_update)
1432 		return -EOPNOTSUPP;
1433 
1434 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1435 	if (IS_ERR(skb))
1436 		return PTR_ERR(skb);
1437 
1438 	return ath10k_wmi_cmd_send(ar, skb,
1439 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1440 }
1441 
1442 static inline int
1443 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1444 {
1445 	struct sk_buff *skb;
1446 
1447 	if (!ar->wmi.ops->gen_adaptive_qcs)
1448 		return -EOPNOTSUPP;
1449 
1450 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1451 	if (IS_ERR(skb))
1452 		return PTR_ERR(skb);
1453 
1454 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1455 }
1456 
1457 static inline int
1458 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1459 {
1460 	struct sk_buff *skb;
1461 
1462 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1463 		return -EOPNOTSUPP;
1464 
1465 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1466 
1467 	if (IS_ERR(skb))
1468 		return PTR_ERR(skb);
1469 
1470 	return ath10k_wmi_cmd_send(ar, skb,
1471 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1472 }
1473 
1474 static inline int
1475 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1476 			 char *buf)
1477 {
1478 	if (!ar->wmi.ops->fw_stats_fill)
1479 		return -EOPNOTSUPP;
1480 
1481 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1482 	return 0;
1483 }
1484 
1485 static inline int
1486 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1487 				    u32 detect_level, u32 detect_margin)
1488 {
1489 	struct sk_buff *skb;
1490 
1491 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1492 		return -EOPNOTSUPP;
1493 
1494 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1495 							detect_level,
1496 							detect_margin);
1497 
1498 	if (IS_ERR(skb))
1499 		return PTR_ERR(skb);
1500 
1501 	return ath10k_wmi_cmd_send(ar, skb,
1502 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1503 }
1504 
1505 static inline int
1506 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1507 			       enum wmi_host_platform_type type,
1508 			       u32 fw_feature_bitmap)
1509 {
1510 	struct sk_buff *skb;
1511 
1512 	if (!ar->wmi.ops->ext_resource_config)
1513 		return -EOPNOTSUPP;
1514 
1515 	skb = ar->wmi.ops->ext_resource_config(ar, type,
1516 					       fw_feature_bitmap);
1517 
1518 	if (IS_ERR(skb))
1519 		return PTR_ERR(skb);
1520 
1521 	return ath10k_wmi_cmd_send(ar, skb,
1522 				   ar->wmi.cmd->ext_resource_cfg_cmdid);
1523 }
1524 
1525 static inline int
1526 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1527 {
1528 	if (!ar->wmi.ops->get_vdev_subtype)
1529 		return -EOPNOTSUPP;
1530 
1531 	return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1532 }
1533 
1534 static inline int
1535 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1536 				      enum wmi_bss_survey_req_type type)
1537 {
1538 	struct ath10k_wmi *wmi = &ar->wmi;
1539 	struct sk_buff *skb;
1540 
1541 	if (!wmi->ops->gen_pdev_bss_chan_info_req)
1542 		return -EOPNOTSUPP;
1543 
1544 	skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1545 	if (IS_ERR(skb))
1546 		return PTR_ERR(skb);
1547 
1548 	return ath10k_wmi_cmd_send(ar, skb,
1549 				   wmi->cmd->pdev_bss_chan_info_request_cmdid);
1550 }
1551 
1552 static inline int
1553 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1554 {
1555 	struct ath10k_wmi *wmi = &ar->wmi;
1556 	struct sk_buff *skb;
1557 
1558 	if (!wmi->ops->gen_echo)
1559 		return -EOPNOTSUPP;
1560 
1561 	skb = wmi->ops->gen_echo(ar, value);
1562 	if (IS_ERR(skb))
1563 		return PTR_ERR(skb);
1564 
1565 	return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1566 }
1567 
1568 static inline int
1569 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1570 {
1571 	struct sk_buff *skb;
1572 
1573 	if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1574 		return -EOPNOTSUPP;
1575 
1576 	skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1577 
1578 	if (IS_ERR(skb))
1579 		return PTR_ERR(skb);
1580 
1581 	return ath10k_wmi_cmd_send(ar, skb,
1582 				   ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1583 }
1584 
1585 static inline int
1586 ath10k_wmi_report_radar_found(struct ath10k *ar,
1587 			      const struct ath10k_radar_found_info *arg)
1588 {
1589 	struct sk_buff *skb;
1590 
1591 	if (!ar->wmi.ops->gen_radar_found)
1592 		return -EOPNOTSUPP;
1593 
1594 	skb = ar->wmi.ops->gen_radar_found(ar, arg);
1595 	if (IS_ERR(skb))
1596 		return PTR_ERR(skb);
1597 
1598 	return ath10k_wmi_cmd_send(ar, skb,
1599 				   ar->wmi.cmd->radar_found_cmdid);
1600 }
1601 
1602 static inline int
1603 ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
1604 			  const struct wmi_bb_timing_cfg_arg *arg)
1605 {
1606 	struct sk_buff *skb;
1607 
1608 	if (!ar->wmi.ops->gen_bb_timing)
1609 		return -EOPNOTSUPP;
1610 
1611 	skb = ar->wmi.ops->gen_bb_timing(ar, arg);
1612 
1613 	if (IS_ERR(skb))
1614 		return PTR_ERR(skb);
1615 
1616 	return ath10k_wmi_cmd_send(ar, skb,
1617 				   ar->wmi.cmd->set_bb_timing_cmdid);
1618 }
1619 #endif
1620