1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
20 
21 struct ath10k;
22 struct sk_buff;
23 
24 struct wmi_ops {
25 	void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 	void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
27 
28 	int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 			 struct wmi_scan_ev_arg *arg);
30 	int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 			    struct wmi_mgmt_rx_ev_arg *arg);
32 	int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 			    struct wmi_ch_info_ev_arg *arg);
34 	int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 			       struct wmi_vdev_start_ev_arg *arg);
36 	int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 			      struct wmi_peer_kick_ev_arg *arg);
38 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 			 struct wmi_swba_ev_arg *arg);
40 	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 			       struct wmi_phyerr_hdr_arg *arg);
42 	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 			   int left_len, struct wmi_phyerr_ev_arg *arg);
44 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 			    struct wmi_svc_rdy_ev_arg *arg);
46 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 			struct wmi_rdy_ev_arg *arg);
48 	int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 			     struct ath10k_fw_stats *stats);
50 	int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 			    struct wmi_roam_ev_arg *arg);
52 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 			      struct wmi_wow_ev_arg *arg);
54 	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
55 
56 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
57 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
58 	struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
59 					   u16 rd5g, u16 ctl2g, u16 ctl5g,
60 					   enum wmi_dfs_region dfs_reg);
61 	struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
62 					      u32 value);
63 	struct sk_buff *(*gen_init)(struct ath10k *ar);
64 	struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
65 					  const struct wmi_start_scan_arg *arg);
66 	struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
67 					 const struct wmi_stop_scan_arg *arg);
68 	struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
69 					   enum wmi_vdev_type type,
70 					   enum wmi_vdev_subtype subtype,
71 					   const u8 macaddr[ETH_ALEN]);
72 	struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
73 	struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
74 					  const struct wmi_vdev_start_request_arg *arg,
75 					  bool restart);
76 	struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
77 	struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
78 				       const u8 *bssid);
79 	struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
80 	struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
81 					      u32 param_id, u32 param_value);
82 	struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
83 						const struct wmi_vdev_install_key_arg *arg);
84 	struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
85 						  const struct wmi_vdev_spectral_conf_arg *arg);
86 	struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
87 						    u32 trigger, u32 enable);
88 	struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
89 					     const struct wmi_wmm_params_all_arg *arg);
90 	struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
91 					   const u8 peer_addr[ETH_ALEN],
92 					   enum wmi_peer_type peer_type);
93 	struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
94 					   const u8 peer_addr[ETH_ALEN]);
95 	struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
96 					  const u8 peer_addr[ETH_ALEN],
97 					  u32 tid_bitmap);
98 	struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
99 					      const u8 *peer_addr,
100 					      enum wmi_peer_param param_id,
101 					      u32 param_value);
102 	struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
103 					  const struct wmi_peer_assoc_complete_arg *arg);
104 	struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
105 					  enum wmi_sta_ps_mode psmode);
106 	struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
107 					  enum wmi_sta_powersave_param param_id,
108 					  u32 value);
109 	struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
110 					 const u8 *mac,
111 					 enum wmi_ap_ps_peer_param param_id,
112 					 u32 value);
113 	struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
114 					      const struct wmi_scan_chan_list_arg *arg);
115 	struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
116 					  const void *bcn, size_t bcn_len,
117 					  u32 bcn_paddr, bool dtim_zero,
118 					  bool deliver_cab);
119 	struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
120 					    const struct wmi_wmm_params_all_arg *arg);
121 	struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
122 	struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
123 					     enum wmi_force_fw_hang_type type,
124 					     u32 delay_ms);
125 	struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
126 	struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
127 					  u32 log_level);
128 	struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
129 	struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
130 	struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
131 						   u32 period, u32 duration,
132 						   u32 next_offset,
133 						   u32 enabled);
134 	struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
135 	struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
136 						const u8 *mac);
137 	struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
138 					  const u8 *mac, u32 tid, u32 buf_size);
139 	struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
140 					      const u8 *mac, u32 tid,
141 					      u32 status);
142 	struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
143 					  const u8 *mac, u32 tid, u32 initiator,
144 					  u32 reason);
145 	struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
146 					u32 tim_ie_offset, struct sk_buff *bcn,
147 					u32 prb_caps, u32 prb_erp,
148 					void *prb_ies, size_t prb_ies_len);
149 	struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
150 					struct sk_buff *bcn);
151 	struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
152 					     const u8 *p2p_ie);
153 	struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
154 					      const u8 peer_addr[ETH_ALEN],
155 					      const struct wmi_sta_uapsd_auto_trig_arg *args,
156 					      u32 num_ac);
157 	struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
158 					     const struct wmi_sta_keepalive_arg *arg);
159 	struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
160 	struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
161 						    enum wmi_wow_wakeup_event event,
162 						    u32 enable);
163 	struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
164 	struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
165 					       u32 pattern_id,
166 					       const u8 *pattern,
167 					       const u8 *mask,
168 					       int pattern_len,
169 					       int pattern_offset);
170 	struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
171 					       u32 pattern_id);
172 	struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
173 						    u32 vdev_id,
174 						    enum wmi_tdls_state state);
175 	struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
176 						const struct wmi_tdls_peer_update_cmd_arg *arg,
177 						const struct wmi_tdls_peer_capab_arg *cap,
178 						const struct wmi_channel_arg *chan);
179 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
180 	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
181 						   u32 param);
182 	void (*fw_stats_fill)(struct ath10k *ar,
183 			      struct ath10k_fw_stats *fw_stats,
184 			      char *buf);
185 	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
186 							u8 enable,
187 							u32 detect_level,
188 							u32 detect_margin);
189 };
190 
191 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
192 
193 static inline int
194 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
195 {
196 	if (WARN_ON_ONCE(!ar->wmi.ops->rx))
197 		return -EOPNOTSUPP;
198 
199 	ar->wmi.ops->rx(ar, skb);
200 	return 0;
201 }
202 
203 static inline int
204 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
205 		   size_t len)
206 {
207 	if (!ar->wmi.ops->map_svc)
208 		return -EOPNOTSUPP;
209 
210 	ar->wmi.ops->map_svc(in, out, len);
211 	return 0;
212 }
213 
214 static inline int
215 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
216 		     struct wmi_scan_ev_arg *arg)
217 {
218 	if (!ar->wmi.ops->pull_scan)
219 		return -EOPNOTSUPP;
220 
221 	return ar->wmi.ops->pull_scan(ar, skb, arg);
222 }
223 
224 static inline int
225 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
226 			struct wmi_mgmt_rx_ev_arg *arg)
227 {
228 	if (!ar->wmi.ops->pull_mgmt_rx)
229 		return -EOPNOTSUPP;
230 
231 	return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
232 }
233 
234 static inline int
235 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
236 			struct wmi_ch_info_ev_arg *arg)
237 {
238 	if (!ar->wmi.ops->pull_ch_info)
239 		return -EOPNOTSUPP;
240 
241 	return ar->wmi.ops->pull_ch_info(ar, skb, arg);
242 }
243 
244 static inline int
245 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
246 			   struct wmi_vdev_start_ev_arg *arg)
247 {
248 	if (!ar->wmi.ops->pull_vdev_start)
249 		return -EOPNOTSUPP;
250 
251 	return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
252 }
253 
254 static inline int
255 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
256 			  struct wmi_peer_kick_ev_arg *arg)
257 {
258 	if (!ar->wmi.ops->pull_peer_kick)
259 		return -EOPNOTSUPP;
260 
261 	return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
262 }
263 
264 static inline int
265 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
266 		     struct wmi_swba_ev_arg *arg)
267 {
268 	if (!ar->wmi.ops->pull_swba)
269 		return -EOPNOTSUPP;
270 
271 	return ar->wmi.ops->pull_swba(ar, skb, arg);
272 }
273 
274 static inline int
275 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
276 			   struct wmi_phyerr_hdr_arg *arg)
277 {
278 	if (!ar->wmi.ops->pull_phyerr_hdr)
279 		return -EOPNOTSUPP;
280 
281 	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
282 }
283 
284 static inline int
285 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
286 		       int left_len, struct wmi_phyerr_ev_arg *arg)
287 {
288 	if (!ar->wmi.ops->pull_phyerr)
289 		return -EOPNOTSUPP;
290 
291 	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
292 }
293 
294 static inline int
295 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
296 			struct wmi_svc_rdy_ev_arg *arg)
297 {
298 	if (!ar->wmi.ops->pull_svc_rdy)
299 		return -EOPNOTSUPP;
300 
301 	return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
302 }
303 
304 static inline int
305 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
306 		    struct wmi_rdy_ev_arg *arg)
307 {
308 	if (!ar->wmi.ops->pull_rdy)
309 		return -EOPNOTSUPP;
310 
311 	return ar->wmi.ops->pull_rdy(ar, skb, arg);
312 }
313 
314 static inline int
315 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
316 			 struct ath10k_fw_stats *stats)
317 {
318 	if (!ar->wmi.ops->pull_fw_stats)
319 		return -EOPNOTSUPP;
320 
321 	return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
322 }
323 
324 static inline int
325 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
326 			struct wmi_roam_ev_arg *arg)
327 {
328 	if (!ar->wmi.ops->pull_roam_ev)
329 		return -EOPNOTSUPP;
330 
331 	return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
332 }
333 
334 static inline int
335 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
336 			  struct wmi_wow_ev_arg *arg)
337 {
338 	if (!ar->wmi.ops->pull_wow_event)
339 		return -EOPNOTSUPP;
340 
341 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
342 }
343 
344 static inline enum wmi_txbf_conf
345 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
346 {
347 	if (!ar->wmi.ops->get_txbf_conf_scheme)
348 		return WMI_TXBF_CONF_UNSUPPORTED;
349 
350 	return ar->wmi.ops->get_txbf_conf_scheme(ar);
351 }
352 
353 static inline int
354 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
355 {
356 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
357 	struct sk_buff *skb;
358 	int ret;
359 
360 	if (!ar->wmi.ops->gen_mgmt_tx)
361 		return -EOPNOTSUPP;
362 
363 	skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
364 	if (IS_ERR(skb))
365 		return PTR_ERR(skb);
366 
367 	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
368 	if (ret)
369 		return ret;
370 
371 	/* FIXME There's no ACK event for Management Tx. This probably
372 	 * shouldn't be called here either. */
373 	info->flags |= IEEE80211_TX_STAT_ACK;
374 	ieee80211_tx_status_irqsafe(ar->hw, msdu);
375 
376 	return 0;
377 }
378 
379 static inline int
380 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
381 			      u16 ctl2g, u16 ctl5g,
382 			      enum wmi_dfs_region dfs_reg)
383 {
384 	struct sk_buff *skb;
385 
386 	if (!ar->wmi.ops->gen_pdev_set_rd)
387 		return -EOPNOTSUPP;
388 
389 	skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
390 					   dfs_reg);
391 	if (IS_ERR(skb))
392 		return PTR_ERR(skb);
393 
394 	return ath10k_wmi_cmd_send(ar, skb,
395 				   ar->wmi.cmd->pdev_set_regdomain_cmdid);
396 }
397 
398 static inline int
399 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
400 {
401 	struct sk_buff *skb;
402 
403 	if (!ar->wmi.ops->gen_pdev_suspend)
404 		return -EOPNOTSUPP;
405 
406 	skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
407 	if (IS_ERR(skb))
408 		return PTR_ERR(skb);
409 
410 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
411 }
412 
413 static inline int
414 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
415 {
416 	struct sk_buff *skb;
417 
418 	if (!ar->wmi.ops->gen_pdev_resume)
419 		return -EOPNOTSUPP;
420 
421 	skb = ar->wmi.ops->gen_pdev_resume(ar);
422 	if (IS_ERR(skb))
423 		return PTR_ERR(skb);
424 
425 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
426 }
427 
428 static inline int
429 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
430 {
431 	struct sk_buff *skb;
432 
433 	if (!ar->wmi.ops->gen_pdev_set_param)
434 		return -EOPNOTSUPP;
435 
436 	skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
437 	if (IS_ERR(skb))
438 		return PTR_ERR(skb);
439 
440 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
441 }
442 
443 static inline int
444 ath10k_wmi_cmd_init(struct ath10k *ar)
445 {
446 	struct sk_buff *skb;
447 
448 	if (!ar->wmi.ops->gen_init)
449 		return -EOPNOTSUPP;
450 
451 	skb = ar->wmi.ops->gen_init(ar);
452 	if (IS_ERR(skb))
453 		return PTR_ERR(skb);
454 
455 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
456 }
457 
458 static inline int
459 ath10k_wmi_start_scan(struct ath10k *ar,
460 		      const struct wmi_start_scan_arg *arg)
461 {
462 	struct sk_buff *skb;
463 
464 	if (!ar->wmi.ops->gen_start_scan)
465 		return -EOPNOTSUPP;
466 
467 	skb = ar->wmi.ops->gen_start_scan(ar, arg);
468 	if (IS_ERR(skb))
469 		return PTR_ERR(skb);
470 
471 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
472 }
473 
474 static inline int
475 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
476 {
477 	struct sk_buff *skb;
478 
479 	if (!ar->wmi.ops->gen_stop_scan)
480 		return -EOPNOTSUPP;
481 
482 	skb = ar->wmi.ops->gen_stop_scan(ar, arg);
483 	if (IS_ERR(skb))
484 		return PTR_ERR(skb);
485 
486 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
487 }
488 
489 static inline int
490 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
491 		       enum wmi_vdev_type type,
492 		       enum wmi_vdev_subtype subtype,
493 		       const u8 macaddr[ETH_ALEN])
494 {
495 	struct sk_buff *skb;
496 
497 	if (!ar->wmi.ops->gen_vdev_create)
498 		return -EOPNOTSUPP;
499 
500 	skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
501 	if (IS_ERR(skb))
502 		return PTR_ERR(skb);
503 
504 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
505 }
506 
507 static inline int
508 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
509 {
510 	struct sk_buff *skb;
511 
512 	if (!ar->wmi.ops->gen_vdev_delete)
513 		return -EOPNOTSUPP;
514 
515 	skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
516 	if (IS_ERR(skb))
517 		return PTR_ERR(skb);
518 
519 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
520 }
521 
522 static inline int
523 ath10k_wmi_vdev_start(struct ath10k *ar,
524 		      const struct wmi_vdev_start_request_arg *arg)
525 {
526 	struct sk_buff *skb;
527 
528 	if (!ar->wmi.ops->gen_vdev_start)
529 		return -EOPNOTSUPP;
530 
531 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
532 	if (IS_ERR(skb))
533 		return PTR_ERR(skb);
534 
535 	return ath10k_wmi_cmd_send(ar, skb,
536 				   ar->wmi.cmd->vdev_start_request_cmdid);
537 }
538 
539 static inline int
540 ath10k_wmi_vdev_restart(struct ath10k *ar,
541 			const struct wmi_vdev_start_request_arg *arg)
542 {
543 	struct sk_buff *skb;
544 
545 	if (!ar->wmi.ops->gen_vdev_start)
546 		return -EOPNOTSUPP;
547 
548 	skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
549 	if (IS_ERR(skb))
550 		return PTR_ERR(skb);
551 
552 	return ath10k_wmi_cmd_send(ar, skb,
553 				   ar->wmi.cmd->vdev_restart_request_cmdid);
554 }
555 
556 static inline int
557 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
558 {
559 	struct sk_buff *skb;
560 
561 	if (!ar->wmi.ops->gen_vdev_stop)
562 		return -EOPNOTSUPP;
563 
564 	skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
565 	if (IS_ERR(skb))
566 		return PTR_ERR(skb);
567 
568 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
569 }
570 
571 static inline int
572 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
573 {
574 	struct sk_buff *skb;
575 
576 	if (!ar->wmi.ops->gen_vdev_up)
577 		return -EOPNOTSUPP;
578 
579 	skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
580 	if (IS_ERR(skb))
581 		return PTR_ERR(skb);
582 
583 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
584 }
585 
586 static inline int
587 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
588 {
589 	struct sk_buff *skb;
590 
591 	if (!ar->wmi.ops->gen_vdev_down)
592 		return -EOPNOTSUPP;
593 
594 	skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
595 	if (IS_ERR(skb))
596 		return PTR_ERR(skb);
597 
598 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
599 }
600 
601 static inline int
602 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
603 			  u32 param_value)
604 {
605 	struct sk_buff *skb;
606 
607 	if (!ar->wmi.ops->gen_vdev_set_param)
608 		return -EOPNOTSUPP;
609 
610 	skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
611 					      param_value);
612 	if (IS_ERR(skb))
613 		return PTR_ERR(skb);
614 
615 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
616 }
617 
618 static inline int
619 ath10k_wmi_vdev_install_key(struct ath10k *ar,
620 			    const struct wmi_vdev_install_key_arg *arg)
621 {
622 	struct sk_buff *skb;
623 
624 	if (!ar->wmi.ops->gen_vdev_install_key)
625 		return -EOPNOTSUPP;
626 
627 	skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
628 	if (IS_ERR(skb))
629 		return PTR_ERR(skb);
630 
631 	return ath10k_wmi_cmd_send(ar, skb,
632 				   ar->wmi.cmd->vdev_install_key_cmdid);
633 }
634 
635 static inline int
636 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
637 			      const struct wmi_vdev_spectral_conf_arg *arg)
638 {
639 	struct sk_buff *skb;
640 	u32 cmd_id;
641 
642 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
643 	if (IS_ERR(skb))
644 		return PTR_ERR(skb);
645 
646 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
647 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
648 }
649 
650 static inline int
651 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
652 				u32 enable)
653 {
654 	struct sk_buff *skb;
655 	u32 cmd_id;
656 
657 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
658 						    enable);
659 	if (IS_ERR(skb))
660 		return PTR_ERR(skb);
661 
662 	cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
663 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
664 }
665 
666 static inline int
667 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
668 			  const u8 peer_addr[ETH_ALEN],
669 			  const struct wmi_sta_uapsd_auto_trig_arg *args,
670 			  u32 num_ac)
671 {
672 	struct sk_buff *skb;
673 	u32 cmd_id;
674 
675 	if (!ar->wmi.ops->gen_vdev_sta_uapsd)
676 		return -EOPNOTSUPP;
677 
678 	skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
679 					      num_ac);
680 	if (IS_ERR(skb))
681 		return PTR_ERR(skb);
682 
683 	cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
684 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
685 }
686 
687 static inline int
688 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
689 			 const struct wmi_wmm_params_all_arg *arg)
690 {
691 	struct sk_buff *skb;
692 	u32 cmd_id;
693 
694 	skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
695 	if (IS_ERR(skb))
696 		return PTR_ERR(skb);
697 
698 	cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
699 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
700 }
701 
702 static inline int
703 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
704 		       const u8 peer_addr[ETH_ALEN],
705 		       enum wmi_peer_type peer_type)
706 {
707 	struct sk_buff *skb;
708 
709 	if (!ar->wmi.ops->gen_peer_create)
710 		return -EOPNOTSUPP;
711 
712 	skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
713 	if (IS_ERR(skb))
714 		return PTR_ERR(skb);
715 
716 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
717 }
718 
719 static inline int
720 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
721 		       const u8 peer_addr[ETH_ALEN])
722 {
723 	struct sk_buff *skb;
724 
725 	if (!ar->wmi.ops->gen_peer_delete)
726 		return -EOPNOTSUPP;
727 
728 	skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
729 	if (IS_ERR(skb))
730 		return PTR_ERR(skb);
731 
732 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
733 }
734 
735 static inline int
736 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
737 		      const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
738 {
739 	struct sk_buff *skb;
740 
741 	if (!ar->wmi.ops->gen_peer_flush)
742 		return -EOPNOTSUPP;
743 
744 	skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
745 	if (IS_ERR(skb))
746 		return PTR_ERR(skb);
747 
748 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
749 }
750 
751 static inline int
752 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
753 			  enum wmi_peer_param param_id, u32 param_value)
754 {
755 	struct sk_buff *skb;
756 
757 	if (!ar->wmi.ops->gen_peer_set_param)
758 		return -EOPNOTSUPP;
759 
760 	skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
761 					      param_value);
762 	if (IS_ERR(skb))
763 		return PTR_ERR(skb);
764 
765 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
766 }
767 
768 static inline int
769 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
770 		      enum wmi_sta_ps_mode psmode)
771 {
772 	struct sk_buff *skb;
773 
774 	if (!ar->wmi.ops->gen_set_psmode)
775 		return -EOPNOTSUPP;
776 
777 	skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
778 	if (IS_ERR(skb))
779 		return PTR_ERR(skb);
780 
781 	return ath10k_wmi_cmd_send(ar, skb,
782 				   ar->wmi.cmd->sta_powersave_mode_cmdid);
783 }
784 
785 static inline int
786 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
787 			    enum wmi_sta_powersave_param param_id, u32 value)
788 {
789 	struct sk_buff *skb;
790 
791 	if (!ar->wmi.ops->gen_set_sta_ps)
792 		return -EOPNOTSUPP;
793 
794 	skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
795 	if (IS_ERR(skb))
796 		return PTR_ERR(skb);
797 
798 	return ath10k_wmi_cmd_send(ar, skb,
799 				   ar->wmi.cmd->sta_powersave_param_cmdid);
800 }
801 
802 static inline int
803 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
804 			   enum wmi_ap_ps_peer_param param_id, u32 value)
805 {
806 	struct sk_buff *skb;
807 
808 	if (!ar->wmi.ops->gen_set_ap_ps)
809 		return -EOPNOTSUPP;
810 
811 	skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
812 	if (IS_ERR(skb))
813 		return PTR_ERR(skb);
814 
815 	return ath10k_wmi_cmd_send(ar, skb,
816 				   ar->wmi.cmd->ap_ps_peer_param_cmdid);
817 }
818 
819 static inline int
820 ath10k_wmi_scan_chan_list(struct ath10k *ar,
821 			  const struct wmi_scan_chan_list_arg *arg)
822 {
823 	struct sk_buff *skb;
824 
825 	if (!ar->wmi.ops->gen_scan_chan_list)
826 		return -EOPNOTSUPP;
827 
828 	skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
829 	if (IS_ERR(skb))
830 		return PTR_ERR(skb);
831 
832 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
833 }
834 
835 static inline int
836 ath10k_wmi_peer_assoc(struct ath10k *ar,
837 		      const struct wmi_peer_assoc_complete_arg *arg)
838 {
839 	struct sk_buff *skb;
840 
841 	if (!ar->wmi.ops->gen_peer_assoc)
842 		return -EOPNOTSUPP;
843 
844 	skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
845 	if (IS_ERR(skb))
846 		return PTR_ERR(skb);
847 
848 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
849 }
850 
851 static inline int
852 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
853 				  const void *bcn, size_t bcn_len,
854 				  u32 bcn_paddr, bool dtim_zero,
855 				  bool deliver_cab)
856 {
857 	struct sk_buff *skb;
858 	int ret;
859 
860 	if (!ar->wmi.ops->gen_beacon_dma)
861 		return -EOPNOTSUPP;
862 
863 	skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
864 					  dtim_zero, deliver_cab);
865 	if (IS_ERR(skb))
866 		return PTR_ERR(skb);
867 
868 	ret = ath10k_wmi_cmd_send_nowait(ar, skb,
869 					 ar->wmi.cmd->pdev_send_bcn_cmdid);
870 	if (ret) {
871 		dev_kfree_skb(skb);
872 		return ret;
873 	}
874 
875 	return 0;
876 }
877 
878 static inline int
879 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
880 			       const struct wmi_wmm_params_all_arg *arg)
881 {
882 	struct sk_buff *skb;
883 
884 	if (!ar->wmi.ops->gen_pdev_set_wmm)
885 		return -EOPNOTSUPP;
886 
887 	skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
888 	if (IS_ERR(skb))
889 		return PTR_ERR(skb);
890 
891 	return ath10k_wmi_cmd_send(ar, skb,
892 				   ar->wmi.cmd->pdev_set_wmm_params_cmdid);
893 }
894 
895 static inline int
896 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
897 {
898 	struct sk_buff *skb;
899 
900 	if (!ar->wmi.ops->gen_request_stats)
901 		return -EOPNOTSUPP;
902 
903 	skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
904 	if (IS_ERR(skb))
905 		return PTR_ERR(skb);
906 
907 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
908 }
909 
910 static inline int
911 ath10k_wmi_force_fw_hang(struct ath10k *ar,
912 			 enum wmi_force_fw_hang_type type, u32 delay_ms)
913 {
914 	struct sk_buff *skb;
915 
916 	if (!ar->wmi.ops->gen_force_fw_hang)
917 		return -EOPNOTSUPP;
918 
919 	skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
920 	if (IS_ERR(skb))
921 		return PTR_ERR(skb);
922 
923 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
924 }
925 
926 static inline int
927 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
928 {
929 	struct sk_buff *skb;
930 
931 	if (!ar->wmi.ops->gen_dbglog_cfg)
932 		return -EOPNOTSUPP;
933 
934 	skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
935 	if (IS_ERR(skb))
936 		return PTR_ERR(skb);
937 
938 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
939 }
940 
941 static inline int
942 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
943 {
944 	struct sk_buff *skb;
945 
946 	if (!ar->wmi.ops->gen_pktlog_enable)
947 		return -EOPNOTSUPP;
948 
949 	skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
950 	if (IS_ERR(skb))
951 		return PTR_ERR(skb);
952 
953 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
954 }
955 
956 static inline int
957 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
958 {
959 	struct sk_buff *skb;
960 
961 	if (!ar->wmi.ops->gen_pktlog_disable)
962 		return -EOPNOTSUPP;
963 
964 	skb = ar->wmi.ops->gen_pktlog_disable(ar);
965 	if (IS_ERR(skb))
966 		return PTR_ERR(skb);
967 
968 	return ath10k_wmi_cmd_send(ar, skb,
969 				   ar->wmi.cmd->pdev_pktlog_disable_cmdid);
970 }
971 
972 static inline int
973 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
974 			       u32 next_offset, u32 enabled)
975 {
976 	struct sk_buff *skb;
977 
978 	if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
979 		return -EOPNOTSUPP;
980 
981 	skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
982 						   next_offset, enabled);
983 	if (IS_ERR(skb))
984 		return PTR_ERR(skb);
985 
986 	return ath10k_wmi_cmd_send(ar, skb,
987 				   ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
988 }
989 
990 static inline int
991 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
992 {
993 	struct sk_buff *skb;
994 
995 	if (!ar->wmi.ops->gen_pdev_get_temperature)
996 		return -EOPNOTSUPP;
997 
998 	skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
999 	if (IS_ERR(skb))
1000 		return PTR_ERR(skb);
1001 
1002 	return ath10k_wmi_cmd_send(ar, skb,
1003 				   ar->wmi.cmd->pdev_get_temperature_cmdid);
1004 }
1005 
1006 static inline int
1007 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1008 {
1009 	struct sk_buff *skb;
1010 
1011 	if (!ar->wmi.ops->gen_addba_clear_resp)
1012 		return -EOPNOTSUPP;
1013 
1014 	skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1015 	if (IS_ERR(skb))
1016 		return PTR_ERR(skb);
1017 
1018 	return ath10k_wmi_cmd_send(ar, skb,
1019 				   ar->wmi.cmd->addba_clear_resp_cmdid);
1020 }
1021 
1022 static inline int
1023 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1024 		      u32 tid, u32 buf_size)
1025 {
1026 	struct sk_buff *skb;
1027 
1028 	if (!ar->wmi.ops->gen_addba_send)
1029 		return -EOPNOTSUPP;
1030 
1031 	skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1032 	if (IS_ERR(skb))
1033 		return PTR_ERR(skb);
1034 
1035 	return ath10k_wmi_cmd_send(ar, skb,
1036 				   ar->wmi.cmd->addba_send_cmdid);
1037 }
1038 
1039 static inline int
1040 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1041 			  u32 tid, u32 status)
1042 {
1043 	struct sk_buff *skb;
1044 
1045 	if (!ar->wmi.ops->gen_addba_set_resp)
1046 		return -EOPNOTSUPP;
1047 
1048 	skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1049 	if (IS_ERR(skb))
1050 		return PTR_ERR(skb);
1051 
1052 	return ath10k_wmi_cmd_send(ar, skb,
1053 				   ar->wmi.cmd->addba_set_resp_cmdid);
1054 }
1055 
1056 static inline int
1057 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1058 		      u32 tid, u32 initiator, u32 reason)
1059 {
1060 	struct sk_buff *skb;
1061 
1062 	if (!ar->wmi.ops->gen_delba_send)
1063 		return -EOPNOTSUPP;
1064 
1065 	skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1066 					  reason);
1067 	if (IS_ERR(skb))
1068 		return PTR_ERR(skb);
1069 
1070 	return ath10k_wmi_cmd_send(ar, skb,
1071 				   ar->wmi.cmd->delba_send_cmdid);
1072 }
1073 
1074 static inline int
1075 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1076 		    struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1077 		    void *prb_ies, size_t prb_ies_len)
1078 {
1079 	struct sk_buff *skb;
1080 
1081 	if (!ar->wmi.ops->gen_bcn_tmpl)
1082 		return -EOPNOTSUPP;
1083 
1084 	skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1085 					prb_caps, prb_erp, prb_ies,
1086 					prb_ies_len);
1087 	if (IS_ERR(skb))
1088 		return PTR_ERR(skb);
1089 
1090 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1091 }
1092 
1093 static inline int
1094 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1095 {
1096 	struct sk_buff *skb;
1097 
1098 	if (!ar->wmi.ops->gen_prb_tmpl)
1099 		return -EOPNOTSUPP;
1100 
1101 	skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1102 	if (IS_ERR(skb))
1103 		return PTR_ERR(skb);
1104 
1105 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1106 }
1107 
1108 static inline int
1109 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1110 {
1111 	struct sk_buff *skb;
1112 
1113 	if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1114 		return -EOPNOTSUPP;
1115 
1116 	skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1117 	if (IS_ERR(skb))
1118 		return PTR_ERR(skb);
1119 
1120 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1121 }
1122 
1123 static inline int
1124 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1125 			 const struct wmi_sta_keepalive_arg *arg)
1126 {
1127 	struct sk_buff *skb;
1128 	u32 cmd_id;
1129 
1130 	if (!ar->wmi.ops->gen_sta_keepalive)
1131 		return -EOPNOTSUPP;
1132 
1133 	skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1134 	if (IS_ERR(skb))
1135 		return PTR_ERR(skb);
1136 
1137 	cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1138 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1139 }
1140 
1141 static inline int
1142 ath10k_wmi_wow_enable(struct ath10k *ar)
1143 {
1144 	struct sk_buff *skb;
1145 	u32 cmd_id;
1146 
1147 	if (!ar->wmi.ops->gen_wow_enable)
1148 		return -EOPNOTSUPP;
1149 
1150 	skb = ar->wmi.ops->gen_wow_enable(ar);
1151 	if (IS_ERR(skb))
1152 		return PTR_ERR(skb);
1153 
1154 	cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1155 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1156 }
1157 
1158 static inline int
1159 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1160 				enum wmi_wow_wakeup_event event,
1161 				u32 enable)
1162 {
1163 	struct sk_buff *skb;
1164 	u32 cmd_id;
1165 
1166 	if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1167 		return -EOPNOTSUPP;
1168 
1169 	skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1170 	if (IS_ERR(skb))
1171 		return PTR_ERR(skb);
1172 
1173 	cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1174 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1175 }
1176 
1177 static inline int
1178 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1179 {
1180 	struct sk_buff *skb;
1181 	u32 cmd_id;
1182 
1183 	if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1184 		return -EOPNOTSUPP;
1185 
1186 	skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1187 	if (IS_ERR(skb))
1188 		return PTR_ERR(skb);
1189 
1190 	cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1191 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1192 }
1193 
1194 static inline int
1195 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1196 			   const u8 *pattern, const u8 *mask,
1197 			   int pattern_len, int pattern_offset)
1198 {
1199 	struct sk_buff *skb;
1200 	u32 cmd_id;
1201 
1202 	if (!ar->wmi.ops->gen_wow_add_pattern)
1203 		return -EOPNOTSUPP;
1204 
1205 	skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1206 					       pattern, mask, pattern_len,
1207 					       pattern_offset);
1208 	if (IS_ERR(skb))
1209 		return PTR_ERR(skb);
1210 
1211 	cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1212 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1213 }
1214 
1215 static inline int
1216 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1217 {
1218 	struct sk_buff *skb;
1219 	u32 cmd_id;
1220 
1221 	if (!ar->wmi.ops->gen_wow_del_pattern)
1222 		return -EOPNOTSUPP;
1223 
1224 	skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1225 	if (IS_ERR(skb))
1226 		return PTR_ERR(skb);
1227 
1228 	cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1229 	return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1230 }
1231 
1232 static inline int
1233 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1234 				enum wmi_tdls_state state)
1235 {
1236 	struct sk_buff *skb;
1237 
1238 	if (!ar->wmi.ops->gen_update_fw_tdls_state)
1239 		return -EOPNOTSUPP;
1240 
1241 	skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1242 	if (IS_ERR(skb))
1243 		return PTR_ERR(skb);
1244 
1245 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1246 }
1247 
1248 static inline int
1249 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1250 			    const struct wmi_tdls_peer_update_cmd_arg *arg,
1251 			    const struct wmi_tdls_peer_capab_arg *cap,
1252 			    const struct wmi_channel_arg *chan)
1253 {
1254 	struct sk_buff *skb;
1255 
1256 	if (!ar->wmi.ops->gen_tdls_peer_update)
1257 		return -EOPNOTSUPP;
1258 
1259 	skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1260 	if (IS_ERR(skb))
1261 		return PTR_ERR(skb);
1262 
1263 	return ath10k_wmi_cmd_send(ar, skb,
1264 				   ar->wmi.cmd->tdls_peer_update_cmdid);
1265 }
1266 
1267 static inline int
1268 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1269 {
1270 	struct sk_buff *skb;
1271 
1272 	if (!ar->wmi.ops->gen_adaptive_qcs)
1273 		return -EOPNOTSUPP;
1274 
1275 	skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1276 	if (IS_ERR(skb))
1277 		return PTR_ERR(skb);
1278 
1279 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1280 }
1281 
1282 static inline int
1283 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1284 {
1285 	struct sk_buff *skb;
1286 
1287 	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1288 		return -EOPNOTSUPP;
1289 
1290 	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1291 
1292 	if (IS_ERR(skb))
1293 		return PTR_ERR(skb);
1294 
1295 	return ath10k_wmi_cmd_send(ar, skb,
1296 				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1297 }
1298 
1299 static inline int
1300 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1301 			 char *buf)
1302 {
1303 	if (!ar->wmi.ops->fw_stats_fill)
1304 		return -EOPNOTSUPP;
1305 
1306 	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1307 	return 0;
1308 }
1309 
1310 static inline int
1311 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1312 				    u32 detect_level, u32 detect_margin)
1313 {
1314 	struct sk_buff *skb;
1315 
1316 	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1317 		return -EOPNOTSUPP;
1318 
1319 	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1320 							detect_level,
1321 							detect_margin);
1322 
1323 	if (IS_ERR(skb))
1324 		return PTR_ERR(skb);
1325 
1326 	return ath10k_wmi_cmd_send(ar, skb,
1327 				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1328 }
1329 
1330 #endif
1331