1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 29 30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_scan_ev_arg *arg); 32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_mgmt_rx_ev_arg *arg); 34 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); 36 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_ch_info_ev_arg *arg); 38 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_vdev_start_ev_arg *arg); 40 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_peer_kick_ev_arg *arg); 42 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_swba_ev_arg *arg); 44 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_phyerr_hdr_arg *arg); 46 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 47 int left_len, struct wmi_phyerr_ev_arg *arg); 48 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_svc_rdy_ev_arg *arg); 50 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_rdy_ev_arg *arg); 52 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 53 struct ath10k_fw_stats *stats); 54 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_roam_ev_arg *arg); 56 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 57 struct wmi_wow_ev_arg *arg); 58 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 59 struct wmi_echo_ev_arg *arg); 60 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, 61 struct wmi_dfs_status_ev_arg *arg); 62 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 63 struct wmi_svc_avail_ev_arg *arg); 64 65 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 66 67 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 68 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 69 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 70 u16 rd5g, u16 ctl2g, u16 ctl5g, 71 enum wmi_dfs_region dfs_reg); 72 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 73 u32 value); 74 struct sk_buff *(*gen_init)(struct ath10k *ar); 75 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 76 const struct wmi_start_scan_arg *arg); 77 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 78 const struct wmi_stop_scan_arg *arg); 79 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 80 enum wmi_vdev_type type, 81 enum wmi_vdev_subtype subtype, 82 const u8 macaddr[ETH_ALEN]); 83 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 84 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 85 const struct wmi_vdev_start_request_arg *arg, 86 bool restart); 87 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 88 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 89 const u8 *bssid); 90 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 91 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 92 u32 param_id, u32 param_value); 93 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 94 const struct wmi_vdev_install_key_arg *arg); 95 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 96 const struct wmi_vdev_spectral_conf_arg *arg); 97 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 98 u32 trigger, u32 enable); 99 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 100 const struct wmi_wmm_params_all_arg *arg); 101 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 102 const u8 peer_addr[ETH_ALEN], 103 enum wmi_peer_type peer_type); 104 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 105 const u8 peer_addr[ETH_ALEN]); 106 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 107 const u8 peer_addr[ETH_ALEN], 108 u32 tid_bitmap); 109 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 110 const u8 *peer_addr, 111 enum wmi_peer_param param_id, 112 u32 param_value); 113 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 114 const struct wmi_peer_assoc_complete_arg *arg); 115 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 116 enum wmi_sta_ps_mode psmode); 117 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 118 enum wmi_sta_powersave_param param_id, 119 u32 value); 120 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 121 const u8 *mac, 122 enum wmi_ap_ps_peer_param param_id, 123 u32 value); 124 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 125 const struct wmi_scan_chan_list_arg *arg); 126 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 127 u32 prob_req_oui); 128 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 129 const void *bcn, size_t bcn_len, 130 u32 bcn_paddr, bool dtim_zero, 131 bool deliver_cab); 132 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 133 const struct wmi_wmm_params_all_arg *arg); 134 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 135 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 136 enum wmi_force_fw_hang_type type, 137 u32 delay_ms); 138 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 139 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 140 struct sk_buff *skb, 141 dma_addr_t paddr); 142 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 143 u32 log_level); 144 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 145 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 146 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 147 u32 period, u32 duration, 148 u32 next_offset, 149 u32 enabled); 150 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 151 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 152 const u8 *mac); 153 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 154 const u8 *mac, u32 tid, u32 buf_size); 155 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 156 const u8 *mac, u32 tid, 157 u32 status); 158 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 159 const u8 *mac, u32 tid, u32 initiator, 160 u32 reason); 161 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 162 u32 tim_ie_offset, struct sk_buff *bcn, 163 u32 prb_caps, u32 prb_erp, 164 void *prb_ies, size_t prb_ies_len); 165 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 166 struct sk_buff *bcn); 167 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 168 const u8 *p2p_ie); 169 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 170 const u8 peer_addr[ETH_ALEN], 171 const struct wmi_sta_uapsd_auto_trig_arg *args, 172 u32 num_ac); 173 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 174 const struct wmi_sta_keepalive_arg *arg); 175 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 176 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 177 enum wmi_wow_wakeup_event event, 178 u32 enable); 179 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 180 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 181 u32 pattern_id, 182 const u8 *pattern, 183 const u8 *mask, 184 int pattern_len, 185 int pattern_offset); 186 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 187 u32 pattern_id); 188 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 189 u32 vdev_id, 190 enum wmi_tdls_state state); 191 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 192 const struct wmi_tdls_peer_update_cmd_arg *arg, 193 const struct wmi_tdls_peer_capab_arg *cap, 194 const struct wmi_channel_arg *chan); 195 struct sk_buff *(*gen_radar_found) 196 (struct ath10k *ar, 197 const struct ath10k_radar_found_info *arg); 198 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 199 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 200 u32 param); 201 void (*fw_stats_fill)(struct ath10k *ar, 202 struct ath10k_fw_stats *fw_stats, 203 char *buf); 204 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 205 u8 enable, 206 u32 detect_level, 207 u32 detect_margin); 208 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 209 enum wmi_host_platform_type type, 210 u32 fw_feature_bitmap); 211 int (*get_vdev_subtype)(struct ath10k *ar, 212 enum wmi_vdev_subtype subtype); 213 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar, 214 u32 vdev_id, 215 struct wmi_pno_scan_req *pno_scan); 216 struct sk_buff *(*gen_pdev_bss_chan_info_req) 217 (struct ath10k *ar, 218 enum wmi_bss_survey_req_type type); 219 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 220 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 221 u32 param); 222 223 }; 224 225 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 226 227 static inline int 228 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 229 { 230 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 231 return -EOPNOTSUPP; 232 233 ar->wmi.ops->rx(ar, skb); 234 return 0; 235 } 236 237 static inline int 238 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 239 size_t len) 240 { 241 if (!ar->wmi.ops->map_svc) 242 return -EOPNOTSUPP; 243 244 ar->wmi.ops->map_svc(in, out, len); 245 return 0; 246 } 247 248 static inline int 249 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 250 size_t len) 251 { 252 if (!ar->wmi.ops->map_svc_ext) 253 return -EOPNOTSUPP; 254 255 ar->wmi.ops->map_svc_ext(in, out, len); 256 return 0; 257 } 258 259 static inline int 260 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 261 struct wmi_scan_ev_arg *arg) 262 { 263 if (!ar->wmi.ops->pull_scan) 264 return -EOPNOTSUPP; 265 266 return ar->wmi.ops->pull_scan(ar, skb, arg); 267 } 268 269 static inline int 270 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, 271 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) 272 { 273 if (!ar->wmi.ops->pull_mgmt_tx_compl) 274 return -EOPNOTSUPP; 275 276 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); 277 } 278 279 static inline int 280 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 281 struct wmi_mgmt_rx_ev_arg *arg) 282 { 283 if (!ar->wmi.ops->pull_mgmt_rx) 284 return -EOPNOTSUPP; 285 286 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 287 } 288 289 static inline int 290 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 291 struct wmi_ch_info_ev_arg *arg) 292 { 293 if (!ar->wmi.ops->pull_ch_info) 294 return -EOPNOTSUPP; 295 296 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 297 } 298 299 static inline int 300 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 301 struct wmi_vdev_start_ev_arg *arg) 302 { 303 if (!ar->wmi.ops->pull_vdev_start) 304 return -EOPNOTSUPP; 305 306 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 307 } 308 309 static inline int 310 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 311 struct wmi_peer_kick_ev_arg *arg) 312 { 313 if (!ar->wmi.ops->pull_peer_kick) 314 return -EOPNOTSUPP; 315 316 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 317 } 318 319 static inline int 320 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 321 struct wmi_swba_ev_arg *arg) 322 { 323 if (!ar->wmi.ops->pull_swba) 324 return -EOPNOTSUPP; 325 326 return ar->wmi.ops->pull_swba(ar, skb, arg); 327 } 328 329 static inline int 330 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 331 struct wmi_phyerr_hdr_arg *arg) 332 { 333 if (!ar->wmi.ops->pull_phyerr_hdr) 334 return -EOPNOTSUPP; 335 336 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 337 } 338 339 static inline int 340 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 341 int left_len, struct wmi_phyerr_ev_arg *arg) 342 { 343 if (!ar->wmi.ops->pull_phyerr) 344 return -EOPNOTSUPP; 345 346 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 347 } 348 349 static inline int 350 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 351 struct wmi_svc_rdy_ev_arg *arg) 352 { 353 if (!ar->wmi.ops->pull_svc_rdy) 354 return -EOPNOTSUPP; 355 356 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 357 } 358 359 static inline int 360 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 361 struct wmi_rdy_ev_arg *arg) 362 { 363 if (!ar->wmi.ops->pull_rdy) 364 return -EOPNOTSUPP; 365 366 return ar->wmi.ops->pull_rdy(ar, skb, arg); 367 } 368 369 static inline int 370 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 371 struct wmi_svc_avail_ev_arg *arg) 372 { 373 if (!ar->wmi.ops->pull_svc_avail) 374 return -EOPNOTSUPP; 375 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 376 } 377 378 static inline int 379 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 380 struct ath10k_fw_stats *stats) 381 { 382 if (!ar->wmi.ops->pull_fw_stats) 383 return -EOPNOTSUPP; 384 385 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 386 } 387 388 static inline int 389 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 390 struct wmi_roam_ev_arg *arg) 391 { 392 if (!ar->wmi.ops->pull_roam_ev) 393 return -EOPNOTSUPP; 394 395 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 396 } 397 398 static inline int 399 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 400 struct wmi_wow_ev_arg *arg) 401 { 402 if (!ar->wmi.ops->pull_wow_event) 403 return -EOPNOTSUPP; 404 405 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 406 } 407 408 static inline int 409 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 410 struct wmi_echo_ev_arg *arg) 411 { 412 if (!ar->wmi.ops->pull_echo_ev) 413 return -EOPNOTSUPP; 414 415 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 416 } 417 418 static inline int 419 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, 420 struct wmi_dfs_status_ev_arg *arg) 421 { 422 if (!ar->wmi.ops->pull_dfs_status_ev) 423 return -EOPNOTSUPP; 424 425 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); 426 } 427 428 static inline enum wmi_txbf_conf 429 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 430 { 431 if (!ar->wmi.ops->get_txbf_conf_scheme) 432 return WMI_TXBF_CONF_UNSUPPORTED; 433 434 return ar->wmi.ops->get_txbf_conf_scheme(ar); 435 } 436 437 static inline int 438 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 439 dma_addr_t paddr) 440 { 441 struct sk_buff *skb; 442 int ret; 443 444 if (!ar->wmi.ops->gen_mgmt_tx_send) 445 return -EOPNOTSUPP; 446 447 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 448 if (IS_ERR(skb)) 449 return PTR_ERR(skb); 450 451 ret = ath10k_wmi_cmd_send(ar, skb, 452 ar->wmi.cmd->mgmt_tx_send_cmdid); 453 if (ret) 454 return ret; 455 456 return 0; 457 } 458 459 static inline int 460 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 461 { 462 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 463 struct sk_buff *skb; 464 int ret; 465 466 if (!ar->wmi.ops->gen_mgmt_tx) 467 return -EOPNOTSUPP; 468 469 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 470 if (IS_ERR(skb)) 471 return PTR_ERR(skb); 472 473 ret = ath10k_wmi_cmd_send(ar, skb, 474 ar->wmi.cmd->mgmt_tx_cmdid); 475 if (ret) 476 return ret; 477 478 /* FIXME There's no ACK event for Management Tx. This probably 479 * shouldn't be called here either. 480 */ 481 info->flags |= IEEE80211_TX_STAT_ACK; 482 ieee80211_tx_status_irqsafe(ar->hw, msdu); 483 484 return 0; 485 } 486 487 static inline int 488 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 489 u16 ctl2g, u16 ctl5g, 490 enum wmi_dfs_region dfs_reg) 491 { 492 struct sk_buff *skb; 493 494 if (!ar->wmi.ops->gen_pdev_set_rd) 495 return -EOPNOTSUPP; 496 497 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 498 dfs_reg); 499 if (IS_ERR(skb)) 500 return PTR_ERR(skb); 501 502 return ath10k_wmi_cmd_send(ar, skb, 503 ar->wmi.cmd->pdev_set_regdomain_cmdid); 504 } 505 506 static inline int 507 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 508 { 509 struct sk_buff *skb; 510 511 if (!ar->wmi.ops->gen_pdev_suspend) 512 return -EOPNOTSUPP; 513 514 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 515 if (IS_ERR(skb)) 516 return PTR_ERR(skb); 517 518 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 519 } 520 521 static inline int 522 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 523 { 524 struct sk_buff *skb; 525 526 if (!ar->wmi.ops->gen_pdev_resume) 527 return -EOPNOTSUPP; 528 529 skb = ar->wmi.ops->gen_pdev_resume(ar); 530 if (IS_ERR(skb)) 531 return PTR_ERR(skb); 532 533 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 534 } 535 536 static inline int 537 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 538 { 539 struct sk_buff *skb; 540 541 if (!ar->wmi.ops->gen_pdev_set_param) 542 return -EOPNOTSUPP; 543 544 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 545 if (IS_ERR(skb)) 546 return PTR_ERR(skb); 547 548 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 549 } 550 551 static inline int 552 ath10k_wmi_cmd_init(struct ath10k *ar) 553 { 554 struct sk_buff *skb; 555 556 if (!ar->wmi.ops->gen_init) 557 return -EOPNOTSUPP; 558 559 skb = ar->wmi.ops->gen_init(ar); 560 if (IS_ERR(skb)) 561 return PTR_ERR(skb); 562 563 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 564 } 565 566 static inline int 567 ath10k_wmi_start_scan(struct ath10k *ar, 568 const struct wmi_start_scan_arg *arg) 569 { 570 struct sk_buff *skb; 571 572 if (!ar->wmi.ops->gen_start_scan) 573 return -EOPNOTSUPP; 574 575 skb = ar->wmi.ops->gen_start_scan(ar, arg); 576 if (IS_ERR(skb)) 577 return PTR_ERR(skb); 578 579 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 580 } 581 582 static inline int 583 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 584 { 585 struct sk_buff *skb; 586 587 if (!ar->wmi.ops->gen_stop_scan) 588 return -EOPNOTSUPP; 589 590 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 591 if (IS_ERR(skb)) 592 return PTR_ERR(skb); 593 594 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 595 } 596 597 static inline int 598 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 599 enum wmi_vdev_type type, 600 enum wmi_vdev_subtype subtype, 601 const u8 macaddr[ETH_ALEN]) 602 { 603 struct sk_buff *skb; 604 605 if (!ar->wmi.ops->gen_vdev_create) 606 return -EOPNOTSUPP; 607 608 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 609 if (IS_ERR(skb)) 610 return PTR_ERR(skb); 611 612 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 613 } 614 615 static inline int 616 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 617 { 618 struct sk_buff *skb; 619 620 if (!ar->wmi.ops->gen_vdev_delete) 621 return -EOPNOTSUPP; 622 623 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 624 if (IS_ERR(skb)) 625 return PTR_ERR(skb); 626 627 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 628 } 629 630 static inline int 631 ath10k_wmi_vdev_start(struct ath10k *ar, 632 const struct wmi_vdev_start_request_arg *arg) 633 { 634 struct sk_buff *skb; 635 636 if (!ar->wmi.ops->gen_vdev_start) 637 return -EOPNOTSUPP; 638 639 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 640 if (IS_ERR(skb)) 641 return PTR_ERR(skb); 642 643 return ath10k_wmi_cmd_send(ar, skb, 644 ar->wmi.cmd->vdev_start_request_cmdid); 645 } 646 647 static inline int 648 ath10k_wmi_vdev_restart(struct ath10k *ar, 649 const struct wmi_vdev_start_request_arg *arg) 650 { 651 struct sk_buff *skb; 652 653 if (!ar->wmi.ops->gen_vdev_start) 654 return -EOPNOTSUPP; 655 656 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 657 if (IS_ERR(skb)) 658 return PTR_ERR(skb); 659 660 return ath10k_wmi_cmd_send(ar, skb, 661 ar->wmi.cmd->vdev_restart_request_cmdid); 662 } 663 664 static inline int 665 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 666 { 667 struct sk_buff *skb; 668 669 if (!ar->wmi.ops->gen_vdev_stop) 670 return -EOPNOTSUPP; 671 672 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 673 if (IS_ERR(skb)) 674 return PTR_ERR(skb); 675 676 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 677 } 678 679 static inline int 680 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 681 { 682 struct sk_buff *skb; 683 684 if (!ar->wmi.ops->gen_vdev_up) 685 return -EOPNOTSUPP; 686 687 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 688 if (IS_ERR(skb)) 689 return PTR_ERR(skb); 690 691 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 692 } 693 694 static inline int 695 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 696 { 697 struct sk_buff *skb; 698 699 if (!ar->wmi.ops->gen_vdev_down) 700 return -EOPNOTSUPP; 701 702 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 703 if (IS_ERR(skb)) 704 return PTR_ERR(skb); 705 706 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 707 } 708 709 static inline int 710 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 711 u32 param_value) 712 { 713 struct sk_buff *skb; 714 715 if (!ar->wmi.ops->gen_vdev_set_param) 716 return -EOPNOTSUPP; 717 718 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 719 param_value); 720 if (IS_ERR(skb)) 721 return PTR_ERR(skb); 722 723 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 724 } 725 726 static inline int 727 ath10k_wmi_vdev_install_key(struct ath10k *ar, 728 const struct wmi_vdev_install_key_arg *arg) 729 { 730 struct sk_buff *skb; 731 732 if (!ar->wmi.ops->gen_vdev_install_key) 733 return -EOPNOTSUPP; 734 735 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 736 if (IS_ERR(skb)) 737 return PTR_ERR(skb); 738 739 return ath10k_wmi_cmd_send(ar, skb, 740 ar->wmi.cmd->vdev_install_key_cmdid); 741 } 742 743 static inline int 744 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 745 const struct wmi_vdev_spectral_conf_arg *arg) 746 { 747 struct sk_buff *skb; 748 u32 cmd_id; 749 750 if (!ar->wmi.ops->gen_vdev_spectral_conf) 751 return -EOPNOTSUPP; 752 753 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 754 if (IS_ERR(skb)) 755 return PTR_ERR(skb); 756 757 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 758 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 759 } 760 761 static inline int 762 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 763 u32 enable) 764 { 765 struct sk_buff *skb; 766 u32 cmd_id; 767 768 if (!ar->wmi.ops->gen_vdev_spectral_enable) 769 return -EOPNOTSUPP; 770 771 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 772 enable); 773 if (IS_ERR(skb)) 774 return PTR_ERR(skb); 775 776 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 777 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 778 } 779 780 static inline int 781 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 782 const u8 peer_addr[ETH_ALEN], 783 const struct wmi_sta_uapsd_auto_trig_arg *args, 784 u32 num_ac) 785 { 786 struct sk_buff *skb; 787 u32 cmd_id; 788 789 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 790 return -EOPNOTSUPP; 791 792 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 793 num_ac); 794 if (IS_ERR(skb)) 795 return PTR_ERR(skb); 796 797 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 798 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 799 } 800 801 static inline int 802 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 803 const struct wmi_wmm_params_all_arg *arg) 804 { 805 struct sk_buff *skb; 806 u32 cmd_id; 807 808 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 809 if (IS_ERR(skb)) 810 return PTR_ERR(skb); 811 812 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 813 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 814 } 815 816 static inline int 817 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 818 const u8 peer_addr[ETH_ALEN], 819 enum wmi_peer_type peer_type) 820 { 821 struct sk_buff *skb; 822 823 if (!ar->wmi.ops->gen_peer_create) 824 return -EOPNOTSUPP; 825 826 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 827 if (IS_ERR(skb)) 828 return PTR_ERR(skb); 829 830 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 831 } 832 833 static inline int 834 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 835 const u8 peer_addr[ETH_ALEN]) 836 { 837 struct sk_buff *skb; 838 839 if (!ar->wmi.ops->gen_peer_delete) 840 return -EOPNOTSUPP; 841 842 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 843 if (IS_ERR(skb)) 844 return PTR_ERR(skb); 845 846 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 847 } 848 849 static inline int 850 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 851 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 852 { 853 struct sk_buff *skb; 854 855 if (!ar->wmi.ops->gen_peer_flush) 856 return -EOPNOTSUPP; 857 858 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 859 if (IS_ERR(skb)) 860 return PTR_ERR(skb); 861 862 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 863 } 864 865 static inline int 866 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 867 enum wmi_peer_param param_id, u32 param_value) 868 { 869 struct sk_buff *skb; 870 871 if (!ar->wmi.ops->gen_peer_set_param) 872 return -EOPNOTSUPP; 873 874 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 875 param_value); 876 if (IS_ERR(skb)) 877 return PTR_ERR(skb); 878 879 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 880 } 881 882 static inline int 883 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 884 enum wmi_sta_ps_mode psmode) 885 { 886 struct sk_buff *skb; 887 888 if (!ar->wmi.ops->gen_set_psmode) 889 return -EOPNOTSUPP; 890 891 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 892 if (IS_ERR(skb)) 893 return PTR_ERR(skb); 894 895 return ath10k_wmi_cmd_send(ar, skb, 896 ar->wmi.cmd->sta_powersave_mode_cmdid); 897 } 898 899 static inline int 900 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 901 enum wmi_sta_powersave_param param_id, u32 value) 902 { 903 struct sk_buff *skb; 904 905 if (!ar->wmi.ops->gen_set_sta_ps) 906 return -EOPNOTSUPP; 907 908 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 909 if (IS_ERR(skb)) 910 return PTR_ERR(skb); 911 912 return ath10k_wmi_cmd_send(ar, skb, 913 ar->wmi.cmd->sta_powersave_param_cmdid); 914 } 915 916 static inline int 917 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 918 enum wmi_ap_ps_peer_param param_id, u32 value) 919 { 920 struct sk_buff *skb; 921 922 if (!ar->wmi.ops->gen_set_ap_ps) 923 return -EOPNOTSUPP; 924 925 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 926 if (IS_ERR(skb)) 927 return PTR_ERR(skb); 928 929 return ath10k_wmi_cmd_send(ar, skb, 930 ar->wmi.cmd->ap_ps_peer_param_cmdid); 931 } 932 933 static inline int 934 ath10k_wmi_scan_chan_list(struct ath10k *ar, 935 const struct wmi_scan_chan_list_arg *arg) 936 { 937 struct sk_buff *skb; 938 939 if (!ar->wmi.ops->gen_scan_chan_list) 940 return -EOPNOTSUPP; 941 942 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 943 if (IS_ERR(skb)) 944 return PTR_ERR(skb); 945 946 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 947 } 948 949 static inline int 950 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 951 { 952 struct sk_buff *skb; 953 u32 prob_req_oui; 954 955 prob_req_oui = (((u32)mac_addr[0]) << 16) | 956 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 957 958 if (!ar->wmi.ops->gen_scan_prob_req_oui) 959 return -EOPNOTSUPP; 960 961 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 962 if (IS_ERR(skb)) 963 return PTR_ERR(skb); 964 965 return ath10k_wmi_cmd_send(ar, skb, 966 ar->wmi.cmd->scan_prob_req_oui_cmdid); 967 } 968 969 static inline int 970 ath10k_wmi_peer_assoc(struct ath10k *ar, 971 const struct wmi_peer_assoc_complete_arg *arg) 972 { 973 struct sk_buff *skb; 974 975 if (!ar->wmi.ops->gen_peer_assoc) 976 return -EOPNOTSUPP; 977 978 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 979 if (IS_ERR(skb)) 980 return PTR_ERR(skb); 981 982 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 983 } 984 985 static inline int 986 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 987 const void *bcn, size_t bcn_len, 988 u32 bcn_paddr, bool dtim_zero, 989 bool deliver_cab) 990 { 991 struct sk_buff *skb; 992 int ret; 993 994 if (!ar->wmi.ops->gen_beacon_dma) 995 return -EOPNOTSUPP; 996 997 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 998 dtim_zero, deliver_cab); 999 if (IS_ERR(skb)) 1000 return PTR_ERR(skb); 1001 1002 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 1003 ar->wmi.cmd->pdev_send_bcn_cmdid); 1004 if (ret) { 1005 dev_kfree_skb(skb); 1006 return ret; 1007 } 1008 1009 return 0; 1010 } 1011 1012 static inline int 1013 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 1014 const struct wmi_wmm_params_all_arg *arg) 1015 { 1016 struct sk_buff *skb; 1017 1018 if (!ar->wmi.ops->gen_pdev_set_wmm) 1019 return -EOPNOTSUPP; 1020 1021 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 1022 if (IS_ERR(skb)) 1023 return PTR_ERR(skb); 1024 1025 return ath10k_wmi_cmd_send(ar, skb, 1026 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 1027 } 1028 1029 static inline int 1030 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1031 { 1032 struct sk_buff *skb; 1033 1034 if (!ar->wmi.ops->gen_request_stats) 1035 return -EOPNOTSUPP; 1036 1037 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1038 if (IS_ERR(skb)) 1039 return PTR_ERR(skb); 1040 1041 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1042 } 1043 1044 static inline int 1045 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1046 enum wmi_force_fw_hang_type type, u32 delay_ms) 1047 { 1048 struct sk_buff *skb; 1049 1050 if (!ar->wmi.ops->gen_force_fw_hang) 1051 return -EOPNOTSUPP; 1052 1053 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1054 if (IS_ERR(skb)) 1055 return PTR_ERR(skb); 1056 1057 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1058 } 1059 1060 static inline int 1061 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1062 { 1063 struct sk_buff *skb; 1064 1065 if (!ar->wmi.ops->gen_dbglog_cfg) 1066 return -EOPNOTSUPP; 1067 1068 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1069 if (IS_ERR(skb)) 1070 return PTR_ERR(skb); 1071 1072 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1073 } 1074 1075 static inline int 1076 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1077 { 1078 struct sk_buff *skb; 1079 1080 if (!ar->wmi.ops->gen_pktlog_enable) 1081 return -EOPNOTSUPP; 1082 1083 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1084 if (IS_ERR(skb)) 1085 return PTR_ERR(skb); 1086 1087 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1088 } 1089 1090 static inline int 1091 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1092 { 1093 struct sk_buff *skb; 1094 1095 if (!ar->wmi.ops->gen_pktlog_disable) 1096 return -EOPNOTSUPP; 1097 1098 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1099 if (IS_ERR(skb)) 1100 return PTR_ERR(skb); 1101 1102 return ath10k_wmi_cmd_send(ar, skb, 1103 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1104 } 1105 1106 static inline int 1107 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1108 u32 next_offset, u32 enabled) 1109 { 1110 struct sk_buff *skb; 1111 1112 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1113 return -EOPNOTSUPP; 1114 1115 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1116 next_offset, enabled); 1117 if (IS_ERR(skb)) 1118 return PTR_ERR(skb); 1119 1120 return ath10k_wmi_cmd_send(ar, skb, 1121 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1122 } 1123 1124 static inline int 1125 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1126 { 1127 struct sk_buff *skb; 1128 1129 if (!ar->wmi.ops->gen_pdev_get_temperature) 1130 return -EOPNOTSUPP; 1131 1132 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1133 if (IS_ERR(skb)) 1134 return PTR_ERR(skb); 1135 1136 return ath10k_wmi_cmd_send(ar, skb, 1137 ar->wmi.cmd->pdev_get_temperature_cmdid); 1138 } 1139 1140 static inline int 1141 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1142 { 1143 struct sk_buff *skb; 1144 1145 if (!ar->wmi.ops->gen_addba_clear_resp) 1146 return -EOPNOTSUPP; 1147 1148 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1149 if (IS_ERR(skb)) 1150 return PTR_ERR(skb); 1151 1152 return ath10k_wmi_cmd_send(ar, skb, 1153 ar->wmi.cmd->addba_clear_resp_cmdid); 1154 } 1155 1156 static inline int 1157 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1158 u32 tid, u32 buf_size) 1159 { 1160 struct sk_buff *skb; 1161 1162 if (!ar->wmi.ops->gen_addba_send) 1163 return -EOPNOTSUPP; 1164 1165 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1166 if (IS_ERR(skb)) 1167 return PTR_ERR(skb); 1168 1169 return ath10k_wmi_cmd_send(ar, skb, 1170 ar->wmi.cmd->addba_send_cmdid); 1171 } 1172 1173 static inline int 1174 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1175 u32 tid, u32 status) 1176 { 1177 struct sk_buff *skb; 1178 1179 if (!ar->wmi.ops->gen_addba_set_resp) 1180 return -EOPNOTSUPP; 1181 1182 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1183 if (IS_ERR(skb)) 1184 return PTR_ERR(skb); 1185 1186 return ath10k_wmi_cmd_send(ar, skb, 1187 ar->wmi.cmd->addba_set_resp_cmdid); 1188 } 1189 1190 static inline int 1191 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1192 u32 tid, u32 initiator, u32 reason) 1193 { 1194 struct sk_buff *skb; 1195 1196 if (!ar->wmi.ops->gen_delba_send) 1197 return -EOPNOTSUPP; 1198 1199 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1200 reason); 1201 if (IS_ERR(skb)) 1202 return PTR_ERR(skb); 1203 1204 return ath10k_wmi_cmd_send(ar, skb, 1205 ar->wmi.cmd->delba_send_cmdid); 1206 } 1207 1208 static inline int 1209 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1210 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1211 void *prb_ies, size_t prb_ies_len) 1212 { 1213 struct sk_buff *skb; 1214 1215 if (!ar->wmi.ops->gen_bcn_tmpl) 1216 return -EOPNOTSUPP; 1217 1218 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1219 prb_caps, prb_erp, prb_ies, 1220 prb_ies_len); 1221 if (IS_ERR(skb)) 1222 return PTR_ERR(skb); 1223 1224 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1225 } 1226 1227 static inline int 1228 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1229 { 1230 struct sk_buff *skb; 1231 1232 if (!ar->wmi.ops->gen_prb_tmpl) 1233 return -EOPNOTSUPP; 1234 1235 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1236 if (IS_ERR(skb)) 1237 return PTR_ERR(skb); 1238 1239 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1240 } 1241 1242 static inline int 1243 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1244 { 1245 struct sk_buff *skb; 1246 1247 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1248 return -EOPNOTSUPP; 1249 1250 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1251 if (IS_ERR(skb)) 1252 return PTR_ERR(skb); 1253 1254 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1255 } 1256 1257 static inline int 1258 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1259 const struct wmi_sta_keepalive_arg *arg) 1260 { 1261 struct sk_buff *skb; 1262 u32 cmd_id; 1263 1264 if (!ar->wmi.ops->gen_sta_keepalive) 1265 return -EOPNOTSUPP; 1266 1267 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1268 if (IS_ERR(skb)) 1269 return PTR_ERR(skb); 1270 1271 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1272 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1273 } 1274 1275 static inline int 1276 ath10k_wmi_wow_enable(struct ath10k *ar) 1277 { 1278 struct sk_buff *skb; 1279 u32 cmd_id; 1280 1281 if (!ar->wmi.ops->gen_wow_enable) 1282 return -EOPNOTSUPP; 1283 1284 skb = ar->wmi.ops->gen_wow_enable(ar); 1285 if (IS_ERR(skb)) 1286 return PTR_ERR(skb); 1287 1288 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1289 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1290 } 1291 1292 static inline int 1293 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1294 enum wmi_wow_wakeup_event event, 1295 u32 enable) 1296 { 1297 struct sk_buff *skb; 1298 u32 cmd_id; 1299 1300 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1301 return -EOPNOTSUPP; 1302 1303 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1304 if (IS_ERR(skb)) 1305 return PTR_ERR(skb); 1306 1307 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1308 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1309 } 1310 1311 static inline int 1312 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1313 { 1314 struct sk_buff *skb; 1315 u32 cmd_id; 1316 1317 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1318 return -EOPNOTSUPP; 1319 1320 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1321 if (IS_ERR(skb)) 1322 return PTR_ERR(skb); 1323 1324 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1325 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1326 } 1327 1328 static inline int 1329 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1330 const u8 *pattern, const u8 *mask, 1331 int pattern_len, int pattern_offset) 1332 { 1333 struct sk_buff *skb; 1334 u32 cmd_id; 1335 1336 if (!ar->wmi.ops->gen_wow_add_pattern) 1337 return -EOPNOTSUPP; 1338 1339 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1340 pattern, mask, pattern_len, 1341 pattern_offset); 1342 if (IS_ERR(skb)) 1343 return PTR_ERR(skb); 1344 1345 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1346 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1347 } 1348 1349 static inline int 1350 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1351 { 1352 struct sk_buff *skb; 1353 u32 cmd_id; 1354 1355 if (!ar->wmi.ops->gen_wow_del_pattern) 1356 return -EOPNOTSUPP; 1357 1358 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1359 if (IS_ERR(skb)) 1360 return PTR_ERR(skb); 1361 1362 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1363 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1364 } 1365 1366 static inline int 1367 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id, 1368 struct wmi_pno_scan_req *pno_scan) 1369 { 1370 struct sk_buff *skb; 1371 u32 cmd_id; 1372 1373 if (!ar->wmi.ops->gen_wow_config_pno) 1374 return -EOPNOTSUPP; 1375 1376 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan); 1377 if (IS_ERR(skb)) 1378 return PTR_ERR(skb); 1379 1380 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid; 1381 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1382 } 1383 1384 static inline int 1385 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1386 enum wmi_tdls_state state) 1387 { 1388 struct sk_buff *skb; 1389 1390 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1391 return -EOPNOTSUPP; 1392 1393 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1394 if (IS_ERR(skb)) 1395 return PTR_ERR(skb); 1396 1397 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1398 } 1399 1400 static inline int 1401 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1402 const struct wmi_tdls_peer_update_cmd_arg *arg, 1403 const struct wmi_tdls_peer_capab_arg *cap, 1404 const struct wmi_channel_arg *chan) 1405 { 1406 struct sk_buff *skb; 1407 1408 if (!ar->wmi.ops->gen_tdls_peer_update) 1409 return -EOPNOTSUPP; 1410 1411 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1412 if (IS_ERR(skb)) 1413 return PTR_ERR(skb); 1414 1415 return ath10k_wmi_cmd_send(ar, skb, 1416 ar->wmi.cmd->tdls_peer_update_cmdid); 1417 } 1418 1419 static inline int 1420 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1421 { 1422 struct sk_buff *skb; 1423 1424 if (!ar->wmi.ops->gen_adaptive_qcs) 1425 return -EOPNOTSUPP; 1426 1427 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1428 if (IS_ERR(skb)) 1429 return PTR_ERR(skb); 1430 1431 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1432 } 1433 1434 static inline int 1435 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1436 { 1437 struct sk_buff *skb; 1438 1439 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1440 return -EOPNOTSUPP; 1441 1442 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1443 1444 if (IS_ERR(skb)) 1445 return PTR_ERR(skb); 1446 1447 return ath10k_wmi_cmd_send(ar, skb, 1448 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1449 } 1450 1451 static inline int 1452 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1453 char *buf) 1454 { 1455 if (!ar->wmi.ops->fw_stats_fill) 1456 return -EOPNOTSUPP; 1457 1458 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1459 return 0; 1460 } 1461 1462 static inline int 1463 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1464 u32 detect_level, u32 detect_margin) 1465 { 1466 struct sk_buff *skb; 1467 1468 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1469 return -EOPNOTSUPP; 1470 1471 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1472 detect_level, 1473 detect_margin); 1474 1475 if (IS_ERR(skb)) 1476 return PTR_ERR(skb); 1477 1478 return ath10k_wmi_cmd_send(ar, skb, 1479 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1480 } 1481 1482 static inline int 1483 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1484 enum wmi_host_platform_type type, 1485 u32 fw_feature_bitmap) 1486 { 1487 struct sk_buff *skb; 1488 1489 if (!ar->wmi.ops->ext_resource_config) 1490 return -EOPNOTSUPP; 1491 1492 skb = ar->wmi.ops->ext_resource_config(ar, type, 1493 fw_feature_bitmap); 1494 1495 if (IS_ERR(skb)) 1496 return PTR_ERR(skb); 1497 1498 return ath10k_wmi_cmd_send(ar, skb, 1499 ar->wmi.cmd->ext_resource_cfg_cmdid); 1500 } 1501 1502 static inline int 1503 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1504 { 1505 if (!ar->wmi.ops->get_vdev_subtype) 1506 return -EOPNOTSUPP; 1507 1508 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1509 } 1510 1511 static inline int 1512 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1513 enum wmi_bss_survey_req_type type) 1514 { 1515 struct ath10k_wmi *wmi = &ar->wmi; 1516 struct sk_buff *skb; 1517 1518 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1519 return -EOPNOTSUPP; 1520 1521 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1522 if (IS_ERR(skb)) 1523 return PTR_ERR(skb); 1524 1525 return ath10k_wmi_cmd_send(ar, skb, 1526 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1527 } 1528 1529 static inline int 1530 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1531 { 1532 struct ath10k_wmi *wmi = &ar->wmi; 1533 struct sk_buff *skb; 1534 1535 if (!wmi->ops->gen_echo) 1536 return -EOPNOTSUPP; 1537 1538 skb = wmi->ops->gen_echo(ar, value); 1539 if (IS_ERR(skb)) 1540 return PTR_ERR(skb); 1541 1542 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1543 } 1544 1545 static inline int 1546 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1547 { 1548 struct sk_buff *skb; 1549 1550 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1551 return -EOPNOTSUPP; 1552 1553 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1554 1555 if (IS_ERR(skb)) 1556 return PTR_ERR(skb); 1557 1558 return ath10k_wmi_cmd_send(ar, skb, 1559 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1560 } 1561 1562 static inline int 1563 ath10k_wmi_report_radar_found(struct ath10k *ar, 1564 const struct ath10k_radar_found_info *arg) 1565 { 1566 struct sk_buff *skb; 1567 1568 if (!ar->wmi.ops->gen_radar_found) 1569 return -EOPNOTSUPP; 1570 1571 skb = ar->wmi.ops->gen_radar_found(ar, arg); 1572 if (IS_ERR(skb)) 1573 return PTR_ERR(skb); 1574 1575 return ath10k_wmi_cmd_send(ar, skb, 1576 ar->wmi.cmd->radar_found_cmdid); 1577 } 1578 1579 #endif 1580