1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 29 30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_scan_ev_arg *arg); 32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_mgmt_rx_ev_arg *arg); 34 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); 36 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_ch_info_ev_arg *arg); 38 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_vdev_start_ev_arg *arg); 40 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_peer_kick_ev_arg *arg); 42 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_swba_ev_arg *arg); 44 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_phyerr_hdr_arg *arg); 46 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 47 int left_len, struct wmi_phyerr_ev_arg *arg); 48 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_svc_rdy_ev_arg *arg); 50 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_rdy_ev_arg *arg); 52 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 53 struct ath10k_fw_stats *stats); 54 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_roam_ev_arg *arg); 56 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 57 struct wmi_wow_ev_arg *arg); 58 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 59 struct wmi_echo_ev_arg *arg); 60 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, 61 struct wmi_dfs_status_ev_arg *arg); 62 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 63 struct wmi_svc_avail_ev_arg *arg); 64 65 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 66 67 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 68 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 69 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 70 u16 rd5g, u16 ctl2g, u16 ctl5g, 71 enum wmi_dfs_region dfs_reg); 72 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 73 u32 value); 74 struct sk_buff *(*gen_init)(struct ath10k *ar); 75 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 76 const struct wmi_start_scan_arg *arg); 77 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 78 const struct wmi_stop_scan_arg *arg); 79 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 80 enum wmi_vdev_type type, 81 enum wmi_vdev_subtype subtype, 82 const u8 macaddr[ETH_ALEN]); 83 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 84 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 85 const struct wmi_vdev_start_request_arg *arg, 86 bool restart); 87 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 88 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 89 const u8 *bssid); 90 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 91 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 92 u32 param_id, u32 param_value); 93 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 94 const struct wmi_vdev_install_key_arg *arg); 95 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 96 const struct wmi_vdev_spectral_conf_arg *arg); 97 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 98 u32 trigger, u32 enable); 99 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 100 const struct wmi_wmm_params_all_arg *arg); 101 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 102 const u8 peer_addr[ETH_ALEN], 103 enum wmi_peer_type peer_type); 104 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 105 const u8 peer_addr[ETH_ALEN]); 106 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 107 const u8 peer_addr[ETH_ALEN], 108 u32 tid_bitmap); 109 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 110 const u8 *peer_addr, 111 enum wmi_peer_param param_id, 112 u32 param_value); 113 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 114 const struct wmi_peer_assoc_complete_arg *arg); 115 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 116 enum wmi_sta_ps_mode psmode); 117 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 118 enum wmi_sta_powersave_param param_id, 119 u32 value); 120 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 121 const u8 *mac, 122 enum wmi_ap_ps_peer_param param_id, 123 u32 value); 124 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 125 const struct wmi_scan_chan_list_arg *arg); 126 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 127 u32 prob_req_oui); 128 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 129 const void *bcn, size_t bcn_len, 130 u32 bcn_paddr, bool dtim_zero, 131 bool deliver_cab); 132 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 133 const struct wmi_wmm_params_all_arg *arg); 134 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 135 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 136 enum wmi_force_fw_hang_type type, 137 u32 delay_ms); 138 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 139 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 140 struct sk_buff *skb, 141 dma_addr_t paddr); 142 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 143 u32 log_level); 144 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 145 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 146 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 147 u32 period, u32 duration, 148 u32 next_offset, 149 u32 enabled); 150 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 151 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 152 const u8 *mac); 153 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 154 const u8 *mac, u32 tid, u32 buf_size); 155 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 156 const u8 *mac, u32 tid, 157 u32 status); 158 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 159 const u8 *mac, u32 tid, u32 initiator, 160 u32 reason); 161 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 162 u32 tim_ie_offset, struct sk_buff *bcn, 163 u32 prb_caps, u32 prb_erp, 164 void *prb_ies, size_t prb_ies_len); 165 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 166 struct sk_buff *bcn); 167 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 168 const u8 *p2p_ie); 169 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 170 const u8 peer_addr[ETH_ALEN], 171 const struct wmi_sta_uapsd_auto_trig_arg *args, 172 u32 num_ac); 173 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 174 const struct wmi_sta_keepalive_arg *arg); 175 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 176 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 177 enum wmi_wow_wakeup_event event, 178 u32 enable); 179 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 180 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 181 u32 pattern_id, 182 const u8 *pattern, 183 const u8 *mask, 184 int pattern_len, 185 int pattern_offset); 186 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 187 u32 pattern_id); 188 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 189 u32 vdev_id, 190 enum wmi_tdls_state state); 191 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 192 const struct wmi_tdls_peer_update_cmd_arg *arg, 193 const struct wmi_tdls_peer_capab_arg *cap, 194 const struct wmi_channel_arg *chan); 195 struct sk_buff *(*gen_radar_found) 196 (struct ath10k *ar, 197 const struct ath10k_radar_found_info *arg); 198 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 199 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 200 u32 param); 201 void (*fw_stats_fill)(struct ath10k *ar, 202 struct ath10k_fw_stats *fw_stats, 203 char *buf); 204 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 205 u8 enable, 206 u32 detect_level, 207 u32 detect_margin); 208 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 209 enum wmi_host_platform_type type, 210 u32 fw_feature_bitmap); 211 int (*get_vdev_subtype)(struct ath10k *ar, 212 enum wmi_vdev_subtype subtype); 213 struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar, 214 u32 vdev_id, 215 struct wmi_pno_scan_req *pno_scan); 216 struct sk_buff *(*gen_pdev_bss_chan_info_req) 217 (struct ath10k *ar, 218 enum wmi_bss_survey_req_type type); 219 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 220 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 221 u32 param); 222 struct sk_buff *(*gen_bb_timing) 223 (struct ath10k *ar, 224 const struct wmi_bb_timing_cfg_arg *arg); 225 226 }; 227 228 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 229 230 static inline int 231 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 232 { 233 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 234 return -EOPNOTSUPP; 235 236 ar->wmi.ops->rx(ar, skb); 237 return 0; 238 } 239 240 static inline int 241 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 242 size_t len) 243 { 244 if (!ar->wmi.ops->map_svc) 245 return -EOPNOTSUPP; 246 247 ar->wmi.ops->map_svc(in, out, len); 248 return 0; 249 } 250 251 static inline int 252 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 253 size_t len) 254 { 255 if (!ar->wmi.ops->map_svc_ext) 256 return -EOPNOTSUPP; 257 258 ar->wmi.ops->map_svc_ext(in, out, len); 259 return 0; 260 } 261 262 static inline int 263 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 264 struct wmi_scan_ev_arg *arg) 265 { 266 if (!ar->wmi.ops->pull_scan) 267 return -EOPNOTSUPP; 268 269 return ar->wmi.ops->pull_scan(ar, skb, arg); 270 } 271 272 static inline int 273 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, 274 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) 275 { 276 if (!ar->wmi.ops->pull_mgmt_tx_compl) 277 return -EOPNOTSUPP; 278 279 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); 280 } 281 282 static inline int 283 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 284 struct wmi_mgmt_rx_ev_arg *arg) 285 { 286 if (!ar->wmi.ops->pull_mgmt_rx) 287 return -EOPNOTSUPP; 288 289 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 290 } 291 292 static inline int 293 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 294 struct wmi_ch_info_ev_arg *arg) 295 { 296 if (!ar->wmi.ops->pull_ch_info) 297 return -EOPNOTSUPP; 298 299 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 300 } 301 302 static inline int 303 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 304 struct wmi_vdev_start_ev_arg *arg) 305 { 306 if (!ar->wmi.ops->pull_vdev_start) 307 return -EOPNOTSUPP; 308 309 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 310 } 311 312 static inline int 313 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 314 struct wmi_peer_kick_ev_arg *arg) 315 { 316 if (!ar->wmi.ops->pull_peer_kick) 317 return -EOPNOTSUPP; 318 319 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 320 } 321 322 static inline int 323 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 324 struct wmi_swba_ev_arg *arg) 325 { 326 if (!ar->wmi.ops->pull_swba) 327 return -EOPNOTSUPP; 328 329 return ar->wmi.ops->pull_swba(ar, skb, arg); 330 } 331 332 static inline int 333 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 334 struct wmi_phyerr_hdr_arg *arg) 335 { 336 if (!ar->wmi.ops->pull_phyerr_hdr) 337 return -EOPNOTSUPP; 338 339 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 340 } 341 342 static inline int 343 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 344 int left_len, struct wmi_phyerr_ev_arg *arg) 345 { 346 if (!ar->wmi.ops->pull_phyerr) 347 return -EOPNOTSUPP; 348 349 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 350 } 351 352 static inline int 353 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 354 struct wmi_svc_rdy_ev_arg *arg) 355 { 356 if (!ar->wmi.ops->pull_svc_rdy) 357 return -EOPNOTSUPP; 358 359 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 360 } 361 362 static inline int 363 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 364 struct wmi_rdy_ev_arg *arg) 365 { 366 if (!ar->wmi.ops->pull_rdy) 367 return -EOPNOTSUPP; 368 369 return ar->wmi.ops->pull_rdy(ar, skb, arg); 370 } 371 372 static inline int 373 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 374 struct wmi_svc_avail_ev_arg *arg) 375 { 376 if (!ar->wmi.ops->pull_svc_avail) 377 return -EOPNOTSUPP; 378 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 379 } 380 381 static inline int 382 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 383 struct ath10k_fw_stats *stats) 384 { 385 if (!ar->wmi.ops->pull_fw_stats) 386 return -EOPNOTSUPP; 387 388 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 389 } 390 391 static inline int 392 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 393 struct wmi_roam_ev_arg *arg) 394 { 395 if (!ar->wmi.ops->pull_roam_ev) 396 return -EOPNOTSUPP; 397 398 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 399 } 400 401 static inline int 402 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 403 struct wmi_wow_ev_arg *arg) 404 { 405 if (!ar->wmi.ops->pull_wow_event) 406 return -EOPNOTSUPP; 407 408 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 409 } 410 411 static inline int 412 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 413 struct wmi_echo_ev_arg *arg) 414 { 415 if (!ar->wmi.ops->pull_echo_ev) 416 return -EOPNOTSUPP; 417 418 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 419 } 420 421 static inline int 422 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, 423 struct wmi_dfs_status_ev_arg *arg) 424 { 425 if (!ar->wmi.ops->pull_dfs_status_ev) 426 return -EOPNOTSUPP; 427 428 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); 429 } 430 431 static inline enum wmi_txbf_conf 432 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 433 { 434 if (!ar->wmi.ops->get_txbf_conf_scheme) 435 return WMI_TXBF_CONF_UNSUPPORTED; 436 437 return ar->wmi.ops->get_txbf_conf_scheme(ar); 438 } 439 440 static inline int 441 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 442 dma_addr_t paddr) 443 { 444 struct sk_buff *skb; 445 int ret; 446 447 if (!ar->wmi.ops->gen_mgmt_tx_send) 448 return -EOPNOTSUPP; 449 450 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 451 if (IS_ERR(skb)) 452 return PTR_ERR(skb); 453 454 ret = ath10k_wmi_cmd_send(ar, skb, 455 ar->wmi.cmd->mgmt_tx_send_cmdid); 456 if (ret) 457 return ret; 458 459 return 0; 460 } 461 462 static inline int 463 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 464 { 465 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 466 struct sk_buff *skb; 467 int ret; 468 469 if (!ar->wmi.ops->gen_mgmt_tx) 470 return -EOPNOTSUPP; 471 472 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 473 if (IS_ERR(skb)) 474 return PTR_ERR(skb); 475 476 ret = ath10k_wmi_cmd_send(ar, skb, 477 ar->wmi.cmd->mgmt_tx_cmdid); 478 if (ret) 479 return ret; 480 481 /* FIXME There's no ACK event for Management Tx. This probably 482 * shouldn't be called here either. 483 */ 484 info->flags |= IEEE80211_TX_STAT_ACK; 485 ieee80211_tx_status_irqsafe(ar->hw, msdu); 486 487 return 0; 488 } 489 490 static inline int 491 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 492 u16 ctl2g, u16 ctl5g, 493 enum wmi_dfs_region dfs_reg) 494 { 495 struct sk_buff *skb; 496 497 if (!ar->wmi.ops->gen_pdev_set_rd) 498 return -EOPNOTSUPP; 499 500 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 501 dfs_reg); 502 if (IS_ERR(skb)) 503 return PTR_ERR(skb); 504 505 return ath10k_wmi_cmd_send(ar, skb, 506 ar->wmi.cmd->pdev_set_regdomain_cmdid); 507 } 508 509 static inline int 510 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 511 { 512 struct sk_buff *skb; 513 514 if (!ar->wmi.ops->gen_pdev_suspend) 515 return -EOPNOTSUPP; 516 517 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 518 if (IS_ERR(skb)) 519 return PTR_ERR(skb); 520 521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 522 } 523 524 static inline int 525 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 526 { 527 struct sk_buff *skb; 528 529 if (!ar->wmi.ops->gen_pdev_resume) 530 return -EOPNOTSUPP; 531 532 skb = ar->wmi.ops->gen_pdev_resume(ar); 533 if (IS_ERR(skb)) 534 return PTR_ERR(skb); 535 536 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 537 } 538 539 static inline int 540 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 541 { 542 struct sk_buff *skb; 543 544 if (!ar->wmi.ops->gen_pdev_set_param) 545 return -EOPNOTSUPP; 546 547 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 548 if (IS_ERR(skb)) 549 return PTR_ERR(skb); 550 551 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 552 } 553 554 static inline int 555 ath10k_wmi_cmd_init(struct ath10k *ar) 556 { 557 struct sk_buff *skb; 558 559 if (!ar->wmi.ops->gen_init) 560 return -EOPNOTSUPP; 561 562 skb = ar->wmi.ops->gen_init(ar); 563 if (IS_ERR(skb)) 564 return PTR_ERR(skb); 565 566 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 567 } 568 569 static inline int 570 ath10k_wmi_start_scan(struct ath10k *ar, 571 const struct wmi_start_scan_arg *arg) 572 { 573 struct sk_buff *skb; 574 575 if (!ar->wmi.ops->gen_start_scan) 576 return -EOPNOTSUPP; 577 578 skb = ar->wmi.ops->gen_start_scan(ar, arg); 579 if (IS_ERR(skb)) 580 return PTR_ERR(skb); 581 582 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 583 } 584 585 static inline int 586 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 587 { 588 struct sk_buff *skb; 589 590 if (!ar->wmi.ops->gen_stop_scan) 591 return -EOPNOTSUPP; 592 593 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 594 if (IS_ERR(skb)) 595 return PTR_ERR(skb); 596 597 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 598 } 599 600 static inline int 601 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 602 enum wmi_vdev_type type, 603 enum wmi_vdev_subtype subtype, 604 const u8 macaddr[ETH_ALEN]) 605 { 606 struct sk_buff *skb; 607 608 if (!ar->wmi.ops->gen_vdev_create) 609 return -EOPNOTSUPP; 610 611 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 612 if (IS_ERR(skb)) 613 return PTR_ERR(skb); 614 615 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 616 } 617 618 static inline int 619 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 620 { 621 struct sk_buff *skb; 622 623 if (!ar->wmi.ops->gen_vdev_delete) 624 return -EOPNOTSUPP; 625 626 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 627 if (IS_ERR(skb)) 628 return PTR_ERR(skb); 629 630 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 631 } 632 633 static inline int 634 ath10k_wmi_vdev_start(struct ath10k *ar, 635 const struct wmi_vdev_start_request_arg *arg) 636 { 637 struct sk_buff *skb; 638 639 if (!ar->wmi.ops->gen_vdev_start) 640 return -EOPNOTSUPP; 641 642 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 643 if (IS_ERR(skb)) 644 return PTR_ERR(skb); 645 646 return ath10k_wmi_cmd_send(ar, skb, 647 ar->wmi.cmd->vdev_start_request_cmdid); 648 } 649 650 static inline int 651 ath10k_wmi_vdev_restart(struct ath10k *ar, 652 const struct wmi_vdev_start_request_arg *arg) 653 { 654 struct sk_buff *skb; 655 656 if (!ar->wmi.ops->gen_vdev_start) 657 return -EOPNOTSUPP; 658 659 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 660 if (IS_ERR(skb)) 661 return PTR_ERR(skb); 662 663 return ath10k_wmi_cmd_send(ar, skb, 664 ar->wmi.cmd->vdev_restart_request_cmdid); 665 } 666 667 static inline int 668 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 669 { 670 struct sk_buff *skb; 671 672 if (!ar->wmi.ops->gen_vdev_stop) 673 return -EOPNOTSUPP; 674 675 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 676 if (IS_ERR(skb)) 677 return PTR_ERR(skb); 678 679 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 680 } 681 682 static inline int 683 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 684 { 685 struct sk_buff *skb; 686 687 if (!ar->wmi.ops->gen_vdev_up) 688 return -EOPNOTSUPP; 689 690 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 691 if (IS_ERR(skb)) 692 return PTR_ERR(skb); 693 694 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 695 } 696 697 static inline int 698 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 699 { 700 struct sk_buff *skb; 701 702 if (!ar->wmi.ops->gen_vdev_down) 703 return -EOPNOTSUPP; 704 705 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 706 if (IS_ERR(skb)) 707 return PTR_ERR(skb); 708 709 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 710 } 711 712 static inline int 713 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 714 u32 param_value) 715 { 716 struct sk_buff *skb; 717 718 if (!ar->wmi.ops->gen_vdev_set_param) 719 return -EOPNOTSUPP; 720 721 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 722 param_value); 723 if (IS_ERR(skb)) 724 return PTR_ERR(skb); 725 726 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 727 } 728 729 static inline int 730 ath10k_wmi_vdev_install_key(struct ath10k *ar, 731 const struct wmi_vdev_install_key_arg *arg) 732 { 733 struct sk_buff *skb; 734 735 if (!ar->wmi.ops->gen_vdev_install_key) 736 return -EOPNOTSUPP; 737 738 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 739 if (IS_ERR(skb)) 740 return PTR_ERR(skb); 741 742 return ath10k_wmi_cmd_send(ar, skb, 743 ar->wmi.cmd->vdev_install_key_cmdid); 744 } 745 746 static inline int 747 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 748 const struct wmi_vdev_spectral_conf_arg *arg) 749 { 750 struct sk_buff *skb; 751 u32 cmd_id; 752 753 if (!ar->wmi.ops->gen_vdev_spectral_conf) 754 return -EOPNOTSUPP; 755 756 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 757 if (IS_ERR(skb)) 758 return PTR_ERR(skb); 759 760 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 761 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 762 } 763 764 static inline int 765 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 766 u32 enable) 767 { 768 struct sk_buff *skb; 769 u32 cmd_id; 770 771 if (!ar->wmi.ops->gen_vdev_spectral_enable) 772 return -EOPNOTSUPP; 773 774 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 775 enable); 776 if (IS_ERR(skb)) 777 return PTR_ERR(skb); 778 779 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 780 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 781 } 782 783 static inline int 784 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 785 const u8 peer_addr[ETH_ALEN], 786 const struct wmi_sta_uapsd_auto_trig_arg *args, 787 u32 num_ac) 788 { 789 struct sk_buff *skb; 790 u32 cmd_id; 791 792 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 793 return -EOPNOTSUPP; 794 795 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 796 num_ac); 797 if (IS_ERR(skb)) 798 return PTR_ERR(skb); 799 800 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 801 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 802 } 803 804 static inline int 805 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 806 const struct wmi_wmm_params_all_arg *arg) 807 { 808 struct sk_buff *skb; 809 u32 cmd_id; 810 811 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 812 if (IS_ERR(skb)) 813 return PTR_ERR(skb); 814 815 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 816 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 817 } 818 819 static inline int 820 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 821 const u8 peer_addr[ETH_ALEN], 822 enum wmi_peer_type peer_type) 823 { 824 struct sk_buff *skb; 825 826 if (!ar->wmi.ops->gen_peer_create) 827 return -EOPNOTSUPP; 828 829 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 830 if (IS_ERR(skb)) 831 return PTR_ERR(skb); 832 833 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 834 } 835 836 static inline int 837 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 838 const u8 peer_addr[ETH_ALEN]) 839 { 840 struct sk_buff *skb; 841 842 if (!ar->wmi.ops->gen_peer_delete) 843 return -EOPNOTSUPP; 844 845 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 846 if (IS_ERR(skb)) 847 return PTR_ERR(skb); 848 849 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 850 } 851 852 static inline int 853 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 854 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 855 { 856 struct sk_buff *skb; 857 858 if (!ar->wmi.ops->gen_peer_flush) 859 return -EOPNOTSUPP; 860 861 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 862 if (IS_ERR(skb)) 863 return PTR_ERR(skb); 864 865 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 866 } 867 868 static inline int 869 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 870 enum wmi_peer_param param_id, u32 param_value) 871 { 872 struct sk_buff *skb; 873 874 if (!ar->wmi.ops->gen_peer_set_param) 875 return -EOPNOTSUPP; 876 877 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 878 param_value); 879 if (IS_ERR(skb)) 880 return PTR_ERR(skb); 881 882 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 883 } 884 885 static inline int 886 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 887 enum wmi_sta_ps_mode psmode) 888 { 889 struct sk_buff *skb; 890 891 if (!ar->wmi.ops->gen_set_psmode) 892 return -EOPNOTSUPP; 893 894 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 895 if (IS_ERR(skb)) 896 return PTR_ERR(skb); 897 898 return ath10k_wmi_cmd_send(ar, skb, 899 ar->wmi.cmd->sta_powersave_mode_cmdid); 900 } 901 902 static inline int 903 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 904 enum wmi_sta_powersave_param param_id, u32 value) 905 { 906 struct sk_buff *skb; 907 908 if (!ar->wmi.ops->gen_set_sta_ps) 909 return -EOPNOTSUPP; 910 911 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 912 if (IS_ERR(skb)) 913 return PTR_ERR(skb); 914 915 return ath10k_wmi_cmd_send(ar, skb, 916 ar->wmi.cmd->sta_powersave_param_cmdid); 917 } 918 919 static inline int 920 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 921 enum wmi_ap_ps_peer_param param_id, u32 value) 922 { 923 struct sk_buff *skb; 924 925 if (!ar->wmi.ops->gen_set_ap_ps) 926 return -EOPNOTSUPP; 927 928 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 929 if (IS_ERR(skb)) 930 return PTR_ERR(skb); 931 932 return ath10k_wmi_cmd_send(ar, skb, 933 ar->wmi.cmd->ap_ps_peer_param_cmdid); 934 } 935 936 static inline int 937 ath10k_wmi_scan_chan_list(struct ath10k *ar, 938 const struct wmi_scan_chan_list_arg *arg) 939 { 940 struct sk_buff *skb; 941 942 if (!ar->wmi.ops->gen_scan_chan_list) 943 return -EOPNOTSUPP; 944 945 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 946 if (IS_ERR(skb)) 947 return PTR_ERR(skb); 948 949 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 950 } 951 952 static inline int 953 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 954 { 955 struct sk_buff *skb; 956 u32 prob_req_oui; 957 958 prob_req_oui = (((u32)mac_addr[0]) << 16) | 959 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 960 961 if (!ar->wmi.ops->gen_scan_prob_req_oui) 962 return -EOPNOTSUPP; 963 964 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 965 if (IS_ERR(skb)) 966 return PTR_ERR(skb); 967 968 return ath10k_wmi_cmd_send(ar, skb, 969 ar->wmi.cmd->scan_prob_req_oui_cmdid); 970 } 971 972 static inline int 973 ath10k_wmi_peer_assoc(struct ath10k *ar, 974 const struct wmi_peer_assoc_complete_arg *arg) 975 { 976 struct sk_buff *skb; 977 978 if (!ar->wmi.ops->gen_peer_assoc) 979 return -EOPNOTSUPP; 980 981 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 982 if (IS_ERR(skb)) 983 return PTR_ERR(skb); 984 985 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 986 } 987 988 static inline int 989 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 990 const void *bcn, size_t bcn_len, 991 u32 bcn_paddr, bool dtim_zero, 992 bool deliver_cab) 993 { 994 struct sk_buff *skb; 995 int ret; 996 997 if (!ar->wmi.ops->gen_beacon_dma) 998 return -EOPNOTSUPP; 999 1000 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 1001 dtim_zero, deliver_cab); 1002 if (IS_ERR(skb)) 1003 return PTR_ERR(skb); 1004 1005 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 1006 ar->wmi.cmd->pdev_send_bcn_cmdid); 1007 if (ret) { 1008 dev_kfree_skb(skb); 1009 return ret; 1010 } 1011 1012 return 0; 1013 } 1014 1015 static inline int 1016 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 1017 const struct wmi_wmm_params_all_arg *arg) 1018 { 1019 struct sk_buff *skb; 1020 1021 if (!ar->wmi.ops->gen_pdev_set_wmm) 1022 return -EOPNOTSUPP; 1023 1024 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 1025 if (IS_ERR(skb)) 1026 return PTR_ERR(skb); 1027 1028 return ath10k_wmi_cmd_send(ar, skb, 1029 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 1030 } 1031 1032 static inline int 1033 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1034 { 1035 struct sk_buff *skb; 1036 1037 if (!ar->wmi.ops->gen_request_stats) 1038 return -EOPNOTSUPP; 1039 1040 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1041 if (IS_ERR(skb)) 1042 return PTR_ERR(skb); 1043 1044 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1045 } 1046 1047 static inline int 1048 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1049 enum wmi_force_fw_hang_type type, u32 delay_ms) 1050 { 1051 struct sk_buff *skb; 1052 1053 if (!ar->wmi.ops->gen_force_fw_hang) 1054 return -EOPNOTSUPP; 1055 1056 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1057 if (IS_ERR(skb)) 1058 return PTR_ERR(skb); 1059 1060 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1061 } 1062 1063 static inline int 1064 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1065 { 1066 struct sk_buff *skb; 1067 1068 if (!ar->wmi.ops->gen_dbglog_cfg) 1069 return -EOPNOTSUPP; 1070 1071 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1072 if (IS_ERR(skb)) 1073 return PTR_ERR(skb); 1074 1075 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1076 } 1077 1078 static inline int 1079 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1080 { 1081 struct sk_buff *skb; 1082 1083 if (!ar->wmi.ops->gen_pktlog_enable) 1084 return -EOPNOTSUPP; 1085 1086 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1087 if (IS_ERR(skb)) 1088 return PTR_ERR(skb); 1089 1090 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1091 } 1092 1093 static inline int 1094 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1095 { 1096 struct sk_buff *skb; 1097 1098 if (!ar->wmi.ops->gen_pktlog_disable) 1099 return -EOPNOTSUPP; 1100 1101 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1102 if (IS_ERR(skb)) 1103 return PTR_ERR(skb); 1104 1105 return ath10k_wmi_cmd_send(ar, skb, 1106 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1107 } 1108 1109 static inline int 1110 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1111 u32 next_offset, u32 enabled) 1112 { 1113 struct sk_buff *skb; 1114 1115 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1116 return -EOPNOTSUPP; 1117 1118 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1119 next_offset, enabled); 1120 if (IS_ERR(skb)) 1121 return PTR_ERR(skb); 1122 1123 return ath10k_wmi_cmd_send(ar, skb, 1124 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1125 } 1126 1127 static inline int 1128 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1129 { 1130 struct sk_buff *skb; 1131 1132 if (!ar->wmi.ops->gen_pdev_get_temperature) 1133 return -EOPNOTSUPP; 1134 1135 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1136 if (IS_ERR(skb)) 1137 return PTR_ERR(skb); 1138 1139 return ath10k_wmi_cmd_send(ar, skb, 1140 ar->wmi.cmd->pdev_get_temperature_cmdid); 1141 } 1142 1143 static inline int 1144 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1145 { 1146 struct sk_buff *skb; 1147 1148 if (!ar->wmi.ops->gen_addba_clear_resp) 1149 return -EOPNOTSUPP; 1150 1151 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1152 if (IS_ERR(skb)) 1153 return PTR_ERR(skb); 1154 1155 return ath10k_wmi_cmd_send(ar, skb, 1156 ar->wmi.cmd->addba_clear_resp_cmdid); 1157 } 1158 1159 static inline int 1160 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1161 u32 tid, u32 buf_size) 1162 { 1163 struct sk_buff *skb; 1164 1165 if (!ar->wmi.ops->gen_addba_send) 1166 return -EOPNOTSUPP; 1167 1168 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1169 if (IS_ERR(skb)) 1170 return PTR_ERR(skb); 1171 1172 return ath10k_wmi_cmd_send(ar, skb, 1173 ar->wmi.cmd->addba_send_cmdid); 1174 } 1175 1176 static inline int 1177 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1178 u32 tid, u32 status) 1179 { 1180 struct sk_buff *skb; 1181 1182 if (!ar->wmi.ops->gen_addba_set_resp) 1183 return -EOPNOTSUPP; 1184 1185 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1186 if (IS_ERR(skb)) 1187 return PTR_ERR(skb); 1188 1189 return ath10k_wmi_cmd_send(ar, skb, 1190 ar->wmi.cmd->addba_set_resp_cmdid); 1191 } 1192 1193 static inline int 1194 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1195 u32 tid, u32 initiator, u32 reason) 1196 { 1197 struct sk_buff *skb; 1198 1199 if (!ar->wmi.ops->gen_delba_send) 1200 return -EOPNOTSUPP; 1201 1202 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1203 reason); 1204 if (IS_ERR(skb)) 1205 return PTR_ERR(skb); 1206 1207 return ath10k_wmi_cmd_send(ar, skb, 1208 ar->wmi.cmd->delba_send_cmdid); 1209 } 1210 1211 static inline int 1212 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1213 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1214 void *prb_ies, size_t prb_ies_len) 1215 { 1216 struct sk_buff *skb; 1217 1218 if (!ar->wmi.ops->gen_bcn_tmpl) 1219 return -EOPNOTSUPP; 1220 1221 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1222 prb_caps, prb_erp, prb_ies, 1223 prb_ies_len); 1224 if (IS_ERR(skb)) 1225 return PTR_ERR(skb); 1226 1227 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1228 } 1229 1230 static inline int 1231 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1232 { 1233 struct sk_buff *skb; 1234 1235 if (!ar->wmi.ops->gen_prb_tmpl) 1236 return -EOPNOTSUPP; 1237 1238 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1239 if (IS_ERR(skb)) 1240 return PTR_ERR(skb); 1241 1242 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1243 } 1244 1245 static inline int 1246 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1247 { 1248 struct sk_buff *skb; 1249 1250 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1251 return -EOPNOTSUPP; 1252 1253 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1254 if (IS_ERR(skb)) 1255 return PTR_ERR(skb); 1256 1257 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1258 } 1259 1260 static inline int 1261 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1262 const struct wmi_sta_keepalive_arg *arg) 1263 { 1264 struct sk_buff *skb; 1265 u32 cmd_id; 1266 1267 if (!ar->wmi.ops->gen_sta_keepalive) 1268 return -EOPNOTSUPP; 1269 1270 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1271 if (IS_ERR(skb)) 1272 return PTR_ERR(skb); 1273 1274 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1275 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1276 } 1277 1278 static inline int 1279 ath10k_wmi_wow_enable(struct ath10k *ar) 1280 { 1281 struct sk_buff *skb; 1282 u32 cmd_id; 1283 1284 if (!ar->wmi.ops->gen_wow_enable) 1285 return -EOPNOTSUPP; 1286 1287 skb = ar->wmi.ops->gen_wow_enable(ar); 1288 if (IS_ERR(skb)) 1289 return PTR_ERR(skb); 1290 1291 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1292 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1293 } 1294 1295 static inline int 1296 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1297 enum wmi_wow_wakeup_event event, 1298 u32 enable) 1299 { 1300 struct sk_buff *skb; 1301 u32 cmd_id; 1302 1303 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1304 return -EOPNOTSUPP; 1305 1306 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1307 if (IS_ERR(skb)) 1308 return PTR_ERR(skb); 1309 1310 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1311 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1312 } 1313 1314 static inline int 1315 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1316 { 1317 struct sk_buff *skb; 1318 u32 cmd_id; 1319 1320 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1321 return -EOPNOTSUPP; 1322 1323 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1324 if (IS_ERR(skb)) 1325 return PTR_ERR(skb); 1326 1327 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1328 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1329 } 1330 1331 static inline int 1332 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1333 const u8 *pattern, const u8 *mask, 1334 int pattern_len, int pattern_offset) 1335 { 1336 struct sk_buff *skb; 1337 u32 cmd_id; 1338 1339 if (!ar->wmi.ops->gen_wow_add_pattern) 1340 return -EOPNOTSUPP; 1341 1342 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1343 pattern, mask, pattern_len, 1344 pattern_offset); 1345 if (IS_ERR(skb)) 1346 return PTR_ERR(skb); 1347 1348 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1349 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1350 } 1351 1352 static inline int 1353 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1354 { 1355 struct sk_buff *skb; 1356 u32 cmd_id; 1357 1358 if (!ar->wmi.ops->gen_wow_del_pattern) 1359 return -EOPNOTSUPP; 1360 1361 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1362 if (IS_ERR(skb)) 1363 return PTR_ERR(skb); 1364 1365 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1366 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1367 } 1368 1369 static inline int 1370 ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id, 1371 struct wmi_pno_scan_req *pno_scan) 1372 { 1373 struct sk_buff *skb; 1374 u32 cmd_id; 1375 1376 if (!ar->wmi.ops->gen_wow_config_pno) 1377 return -EOPNOTSUPP; 1378 1379 skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan); 1380 if (IS_ERR(skb)) 1381 return PTR_ERR(skb); 1382 1383 cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid; 1384 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1385 } 1386 1387 static inline int 1388 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1389 enum wmi_tdls_state state) 1390 { 1391 struct sk_buff *skb; 1392 1393 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1394 return -EOPNOTSUPP; 1395 1396 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1397 if (IS_ERR(skb)) 1398 return PTR_ERR(skb); 1399 1400 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1401 } 1402 1403 static inline int 1404 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1405 const struct wmi_tdls_peer_update_cmd_arg *arg, 1406 const struct wmi_tdls_peer_capab_arg *cap, 1407 const struct wmi_channel_arg *chan) 1408 { 1409 struct sk_buff *skb; 1410 1411 if (!ar->wmi.ops->gen_tdls_peer_update) 1412 return -EOPNOTSUPP; 1413 1414 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1415 if (IS_ERR(skb)) 1416 return PTR_ERR(skb); 1417 1418 return ath10k_wmi_cmd_send(ar, skb, 1419 ar->wmi.cmd->tdls_peer_update_cmdid); 1420 } 1421 1422 static inline int 1423 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1424 { 1425 struct sk_buff *skb; 1426 1427 if (!ar->wmi.ops->gen_adaptive_qcs) 1428 return -EOPNOTSUPP; 1429 1430 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1431 if (IS_ERR(skb)) 1432 return PTR_ERR(skb); 1433 1434 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1435 } 1436 1437 static inline int 1438 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1439 { 1440 struct sk_buff *skb; 1441 1442 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1443 return -EOPNOTSUPP; 1444 1445 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1446 1447 if (IS_ERR(skb)) 1448 return PTR_ERR(skb); 1449 1450 return ath10k_wmi_cmd_send(ar, skb, 1451 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1452 } 1453 1454 static inline int 1455 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1456 char *buf) 1457 { 1458 if (!ar->wmi.ops->fw_stats_fill) 1459 return -EOPNOTSUPP; 1460 1461 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1462 return 0; 1463 } 1464 1465 static inline int 1466 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1467 u32 detect_level, u32 detect_margin) 1468 { 1469 struct sk_buff *skb; 1470 1471 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1472 return -EOPNOTSUPP; 1473 1474 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1475 detect_level, 1476 detect_margin); 1477 1478 if (IS_ERR(skb)) 1479 return PTR_ERR(skb); 1480 1481 return ath10k_wmi_cmd_send(ar, skb, 1482 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1483 } 1484 1485 static inline int 1486 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1487 enum wmi_host_platform_type type, 1488 u32 fw_feature_bitmap) 1489 { 1490 struct sk_buff *skb; 1491 1492 if (!ar->wmi.ops->ext_resource_config) 1493 return -EOPNOTSUPP; 1494 1495 skb = ar->wmi.ops->ext_resource_config(ar, type, 1496 fw_feature_bitmap); 1497 1498 if (IS_ERR(skb)) 1499 return PTR_ERR(skb); 1500 1501 return ath10k_wmi_cmd_send(ar, skb, 1502 ar->wmi.cmd->ext_resource_cfg_cmdid); 1503 } 1504 1505 static inline int 1506 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1507 { 1508 if (!ar->wmi.ops->get_vdev_subtype) 1509 return -EOPNOTSUPP; 1510 1511 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1512 } 1513 1514 static inline int 1515 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1516 enum wmi_bss_survey_req_type type) 1517 { 1518 struct ath10k_wmi *wmi = &ar->wmi; 1519 struct sk_buff *skb; 1520 1521 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1522 return -EOPNOTSUPP; 1523 1524 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1525 if (IS_ERR(skb)) 1526 return PTR_ERR(skb); 1527 1528 return ath10k_wmi_cmd_send(ar, skb, 1529 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1530 } 1531 1532 static inline int 1533 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1534 { 1535 struct ath10k_wmi *wmi = &ar->wmi; 1536 struct sk_buff *skb; 1537 1538 if (!wmi->ops->gen_echo) 1539 return -EOPNOTSUPP; 1540 1541 skb = wmi->ops->gen_echo(ar, value); 1542 if (IS_ERR(skb)) 1543 return PTR_ERR(skb); 1544 1545 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1546 } 1547 1548 static inline int 1549 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1550 { 1551 struct sk_buff *skb; 1552 1553 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1554 return -EOPNOTSUPP; 1555 1556 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1557 1558 if (IS_ERR(skb)) 1559 return PTR_ERR(skb); 1560 1561 return ath10k_wmi_cmd_send(ar, skb, 1562 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1563 } 1564 1565 static inline int 1566 ath10k_wmi_report_radar_found(struct ath10k *ar, 1567 const struct ath10k_radar_found_info *arg) 1568 { 1569 struct sk_buff *skb; 1570 1571 if (!ar->wmi.ops->gen_radar_found) 1572 return -EOPNOTSUPP; 1573 1574 skb = ar->wmi.ops->gen_radar_found(ar, arg); 1575 if (IS_ERR(skb)) 1576 return PTR_ERR(skb); 1577 1578 return ath10k_wmi_cmd_send(ar, skb, 1579 ar->wmi.cmd->radar_found_cmdid); 1580 } 1581 1582 static inline int 1583 ath10k_wmi_pdev_bb_timing(struct ath10k *ar, 1584 const struct wmi_bb_timing_cfg_arg *arg) 1585 { 1586 struct sk_buff *skb; 1587 1588 if (!ar->wmi.ops->gen_bb_timing) 1589 return -EOPNOTSUPP; 1590 1591 skb = ar->wmi.ops->gen_bb_timing(ar, arg); 1592 1593 if (IS_ERR(skb)) 1594 return PTR_ERR(skb); 1595 1596 return ath10k_wmi_cmd_send(ar, skb, 1597 ar->wmi.cmd->set_bb_timing_cmdid); 1598 } 1599 #endif 1600