1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 29 30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_scan_ev_arg *arg); 32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_mgmt_rx_ev_arg *arg); 34 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); 36 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_ch_info_ev_arg *arg); 38 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_vdev_start_ev_arg *arg); 40 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_peer_kick_ev_arg *arg); 42 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_swba_ev_arg *arg); 44 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_phyerr_hdr_arg *arg); 46 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 47 int left_len, struct wmi_phyerr_ev_arg *arg); 48 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_svc_rdy_ev_arg *arg); 50 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_rdy_ev_arg *arg); 52 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 53 struct ath10k_fw_stats *stats); 54 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_roam_ev_arg *arg); 56 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 57 struct wmi_wow_ev_arg *arg); 58 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 59 struct wmi_echo_ev_arg *arg); 60 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, 61 struct wmi_dfs_status_ev_arg *arg); 62 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 63 struct wmi_svc_avail_ev_arg *arg); 64 65 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 66 67 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 68 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 69 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 70 u16 rd5g, u16 ctl2g, u16 ctl5g, 71 enum wmi_dfs_region dfs_reg); 72 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 73 u32 value); 74 struct sk_buff *(*gen_init)(struct ath10k *ar); 75 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 76 const struct wmi_start_scan_arg *arg); 77 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 78 const struct wmi_stop_scan_arg *arg); 79 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 80 enum wmi_vdev_type type, 81 enum wmi_vdev_subtype subtype, 82 const u8 macaddr[ETH_ALEN]); 83 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 84 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 85 const struct wmi_vdev_start_request_arg *arg, 86 bool restart); 87 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 88 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 89 const u8 *bssid); 90 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 91 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 92 u32 param_id, u32 param_value); 93 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 94 const struct wmi_vdev_install_key_arg *arg); 95 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 96 const struct wmi_vdev_spectral_conf_arg *arg); 97 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 98 u32 trigger, u32 enable); 99 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 100 const struct wmi_wmm_params_all_arg *arg); 101 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 102 const u8 peer_addr[ETH_ALEN], 103 enum wmi_peer_type peer_type); 104 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 105 const u8 peer_addr[ETH_ALEN]); 106 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 107 const u8 peer_addr[ETH_ALEN], 108 u32 tid_bitmap); 109 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 110 const u8 *peer_addr, 111 enum wmi_peer_param param_id, 112 u32 param_value); 113 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 114 const struct wmi_peer_assoc_complete_arg *arg); 115 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 116 enum wmi_sta_ps_mode psmode); 117 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 118 enum wmi_sta_powersave_param param_id, 119 u32 value); 120 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 121 const u8 *mac, 122 enum wmi_ap_ps_peer_param param_id, 123 u32 value); 124 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 125 const struct wmi_scan_chan_list_arg *arg); 126 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 127 u32 prob_req_oui); 128 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 129 const void *bcn, size_t bcn_len, 130 u32 bcn_paddr, bool dtim_zero, 131 bool deliver_cab); 132 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 133 const struct wmi_wmm_params_all_arg *arg); 134 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 135 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 136 enum wmi_force_fw_hang_type type, 137 u32 delay_ms); 138 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 139 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 140 struct sk_buff *skb, 141 dma_addr_t paddr); 142 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 143 u32 log_level); 144 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 145 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 146 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 147 u32 period, u32 duration, 148 u32 next_offset, 149 u32 enabled); 150 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 151 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 152 const u8 *mac); 153 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 154 const u8 *mac, u32 tid, u32 buf_size); 155 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 156 const u8 *mac, u32 tid, 157 u32 status); 158 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 159 const u8 *mac, u32 tid, u32 initiator, 160 u32 reason); 161 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 162 u32 tim_ie_offset, struct sk_buff *bcn, 163 u32 prb_caps, u32 prb_erp, 164 void *prb_ies, size_t prb_ies_len); 165 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 166 struct sk_buff *bcn); 167 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 168 const u8 *p2p_ie); 169 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 170 const u8 peer_addr[ETH_ALEN], 171 const struct wmi_sta_uapsd_auto_trig_arg *args, 172 u32 num_ac); 173 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 174 const struct wmi_sta_keepalive_arg *arg); 175 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 176 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 177 enum wmi_wow_wakeup_event event, 178 u32 enable); 179 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 180 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 181 u32 pattern_id, 182 const u8 *pattern, 183 const u8 *mask, 184 int pattern_len, 185 int pattern_offset); 186 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 187 u32 pattern_id); 188 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 189 u32 vdev_id, 190 enum wmi_tdls_state state); 191 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 192 const struct wmi_tdls_peer_update_cmd_arg *arg, 193 const struct wmi_tdls_peer_capab_arg *cap, 194 const struct wmi_channel_arg *chan); 195 struct sk_buff *(*gen_radar_found) 196 (struct ath10k *ar, 197 const struct ath10k_radar_found_info *arg); 198 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 199 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 200 u32 param); 201 void (*fw_stats_fill)(struct ath10k *ar, 202 struct ath10k_fw_stats *fw_stats, 203 char *buf); 204 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 205 u8 enable, 206 u32 detect_level, 207 u32 detect_margin); 208 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 209 enum wmi_host_platform_type type, 210 u32 fw_feature_bitmap); 211 int (*get_vdev_subtype)(struct ath10k *ar, 212 enum wmi_vdev_subtype subtype); 213 struct sk_buff *(*gen_pdev_bss_chan_info_req) 214 (struct ath10k *ar, 215 enum wmi_bss_survey_req_type type); 216 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 217 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 218 u32 param); 219 220 }; 221 222 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 223 224 static inline int 225 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 226 { 227 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 228 return -EOPNOTSUPP; 229 230 ar->wmi.ops->rx(ar, skb); 231 return 0; 232 } 233 234 static inline int 235 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 236 size_t len) 237 { 238 if (!ar->wmi.ops->map_svc) 239 return -EOPNOTSUPP; 240 241 ar->wmi.ops->map_svc(in, out, len); 242 return 0; 243 } 244 245 static inline int 246 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 247 size_t len) 248 { 249 if (!ar->wmi.ops->map_svc_ext) 250 return -EOPNOTSUPP; 251 252 ar->wmi.ops->map_svc_ext(in, out, len); 253 return 0; 254 } 255 256 static inline int 257 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 258 struct wmi_scan_ev_arg *arg) 259 { 260 if (!ar->wmi.ops->pull_scan) 261 return -EOPNOTSUPP; 262 263 return ar->wmi.ops->pull_scan(ar, skb, arg); 264 } 265 266 static inline int 267 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, 268 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) 269 { 270 if (!ar->wmi.ops->pull_mgmt_tx_compl) 271 return -EOPNOTSUPP; 272 273 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); 274 } 275 276 static inline int 277 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 278 struct wmi_mgmt_rx_ev_arg *arg) 279 { 280 if (!ar->wmi.ops->pull_mgmt_rx) 281 return -EOPNOTSUPP; 282 283 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 284 } 285 286 static inline int 287 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 288 struct wmi_ch_info_ev_arg *arg) 289 { 290 if (!ar->wmi.ops->pull_ch_info) 291 return -EOPNOTSUPP; 292 293 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 294 } 295 296 static inline int 297 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 298 struct wmi_vdev_start_ev_arg *arg) 299 { 300 if (!ar->wmi.ops->pull_vdev_start) 301 return -EOPNOTSUPP; 302 303 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 304 } 305 306 static inline int 307 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 308 struct wmi_peer_kick_ev_arg *arg) 309 { 310 if (!ar->wmi.ops->pull_peer_kick) 311 return -EOPNOTSUPP; 312 313 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 314 } 315 316 static inline int 317 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 318 struct wmi_swba_ev_arg *arg) 319 { 320 if (!ar->wmi.ops->pull_swba) 321 return -EOPNOTSUPP; 322 323 return ar->wmi.ops->pull_swba(ar, skb, arg); 324 } 325 326 static inline int 327 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 328 struct wmi_phyerr_hdr_arg *arg) 329 { 330 if (!ar->wmi.ops->pull_phyerr_hdr) 331 return -EOPNOTSUPP; 332 333 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 334 } 335 336 static inline int 337 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 338 int left_len, struct wmi_phyerr_ev_arg *arg) 339 { 340 if (!ar->wmi.ops->pull_phyerr) 341 return -EOPNOTSUPP; 342 343 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 344 } 345 346 static inline int 347 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 348 struct wmi_svc_rdy_ev_arg *arg) 349 { 350 if (!ar->wmi.ops->pull_svc_rdy) 351 return -EOPNOTSUPP; 352 353 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 354 } 355 356 static inline int 357 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 358 struct wmi_rdy_ev_arg *arg) 359 { 360 if (!ar->wmi.ops->pull_rdy) 361 return -EOPNOTSUPP; 362 363 return ar->wmi.ops->pull_rdy(ar, skb, arg); 364 } 365 366 static inline int 367 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 368 struct wmi_svc_avail_ev_arg *arg) 369 { 370 if (!ar->wmi.ops->pull_svc_avail) 371 return -EOPNOTSUPP; 372 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 373 } 374 375 static inline int 376 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 377 struct ath10k_fw_stats *stats) 378 { 379 if (!ar->wmi.ops->pull_fw_stats) 380 return -EOPNOTSUPP; 381 382 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 383 } 384 385 static inline int 386 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 387 struct wmi_roam_ev_arg *arg) 388 { 389 if (!ar->wmi.ops->pull_roam_ev) 390 return -EOPNOTSUPP; 391 392 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 393 } 394 395 static inline int 396 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 397 struct wmi_wow_ev_arg *arg) 398 { 399 if (!ar->wmi.ops->pull_wow_event) 400 return -EOPNOTSUPP; 401 402 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 403 } 404 405 static inline int 406 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 407 struct wmi_echo_ev_arg *arg) 408 { 409 if (!ar->wmi.ops->pull_echo_ev) 410 return -EOPNOTSUPP; 411 412 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 413 } 414 415 static inline int 416 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, 417 struct wmi_dfs_status_ev_arg *arg) 418 { 419 if (!ar->wmi.ops->pull_dfs_status_ev) 420 return -EOPNOTSUPP; 421 422 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); 423 } 424 425 static inline enum wmi_txbf_conf 426 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 427 { 428 if (!ar->wmi.ops->get_txbf_conf_scheme) 429 return WMI_TXBF_CONF_UNSUPPORTED; 430 431 return ar->wmi.ops->get_txbf_conf_scheme(ar); 432 } 433 434 static inline int 435 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 436 dma_addr_t paddr) 437 { 438 struct sk_buff *skb; 439 int ret; 440 441 if (!ar->wmi.ops->gen_mgmt_tx_send) 442 return -EOPNOTSUPP; 443 444 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 445 if (IS_ERR(skb)) 446 return PTR_ERR(skb); 447 448 ret = ath10k_wmi_cmd_send(ar, skb, 449 ar->wmi.cmd->mgmt_tx_send_cmdid); 450 if (ret) 451 return ret; 452 453 return 0; 454 } 455 456 static inline int 457 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 458 { 459 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 460 struct sk_buff *skb; 461 int ret; 462 463 if (!ar->wmi.ops->gen_mgmt_tx) 464 return -EOPNOTSUPP; 465 466 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 467 if (IS_ERR(skb)) 468 return PTR_ERR(skb); 469 470 ret = ath10k_wmi_cmd_send(ar, skb, 471 ar->wmi.cmd->mgmt_tx_cmdid); 472 if (ret) 473 return ret; 474 475 /* FIXME There's no ACK event for Management Tx. This probably 476 * shouldn't be called here either. 477 */ 478 info->flags |= IEEE80211_TX_STAT_ACK; 479 ieee80211_tx_status_irqsafe(ar->hw, msdu); 480 481 return 0; 482 } 483 484 static inline int 485 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 486 u16 ctl2g, u16 ctl5g, 487 enum wmi_dfs_region dfs_reg) 488 { 489 struct sk_buff *skb; 490 491 if (!ar->wmi.ops->gen_pdev_set_rd) 492 return -EOPNOTSUPP; 493 494 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 495 dfs_reg); 496 if (IS_ERR(skb)) 497 return PTR_ERR(skb); 498 499 return ath10k_wmi_cmd_send(ar, skb, 500 ar->wmi.cmd->pdev_set_regdomain_cmdid); 501 } 502 503 static inline int 504 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 505 { 506 struct sk_buff *skb; 507 508 if (!ar->wmi.ops->gen_pdev_suspend) 509 return -EOPNOTSUPP; 510 511 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 512 if (IS_ERR(skb)) 513 return PTR_ERR(skb); 514 515 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 516 } 517 518 static inline int 519 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 520 { 521 struct sk_buff *skb; 522 523 if (!ar->wmi.ops->gen_pdev_resume) 524 return -EOPNOTSUPP; 525 526 skb = ar->wmi.ops->gen_pdev_resume(ar); 527 if (IS_ERR(skb)) 528 return PTR_ERR(skb); 529 530 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 531 } 532 533 static inline int 534 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 535 { 536 struct sk_buff *skb; 537 538 if (!ar->wmi.ops->gen_pdev_set_param) 539 return -EOPNOTSUPP; 540 541 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 542 if (IS_ERR(skb)) 543 return PTR_ERR(skb); 544 545 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 546 } 547 548 static inline int 549 ath10k_wmi_cmd_init(struct ath10k *ar) 550 { 551 struct sk_buff *skb; 552 553 if (!ar->wmi.ops->gen_init) 554 return -EOPNOTSUPP; 555 556 skb = ar->wmi.ops->gen_init(ar); 557 if (IS_ERR(skb)) 558 return PTR_ERR(skb); 559 560 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 561 } 562 563 static inline int 564 ath10k_wmi_start_scan(struct ath10k *ar, 565 const struct wmi_start_scan_arg *arg) 566 { 567 struct sk_buff *skb; 568 569 if (!ar->wmi.ops->gen_start_scan) 570 return -EOPNOTSUPP; 571 572 skb = ar->wmi.ops->gen_start_scan(ar, arg); 573 if (IS_ERR(skb)) 574 return PTR_ERR(skb); 575 576 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 577 } 578 579 static inline int 580 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 581 { 582 struct sk_buff *skb; 583 584 if (!ar->wmi.ops->gen_stop_scan) 585 return -EOPNOTSUPP; 586 587 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 588 if (IS_ERR(skb)) 589 return PTR_ERR(skb); 590 591 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 592 } 593 594 static inline int 595 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 596 enum wmi_vdev_type type, 597 enum wmi_vdev_subtype subtype, 598 const u8 macaddr[ETH_ALEN]) 599 { 600 struct sk_buff *skb; 601 602 if (!ar->wmi.ops->gen_vdev_create) 603 return -EOPNOTSUPP; 604 605 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 606 if (IS_ERR(skb)) 607 return PTR_ERR(skb); 608 609 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 610 } 611 612 static inline int 613 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 614 { 615 struct sk_buff *skb; 616 617 if (!ar->wmi.ops->gen_vdev_delete) 618 return -EOPNOTSUPP; 619 620 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 621 if (IS_ERR(skb)) 622 return PTR_ERR(skb); 623 624 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 625 } 626 627 static inline int 628 ath10k_wmi_vdev_start(struct ath10k *ar, 629 const struct wmi_vdev_start_request_arg *arg) 630 { 631 struct sk_buff *skb; 632 633 if (!ar->wmi.ops->gen_vdev_start) 634 return -EOPNOTSUPP; 635 636 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 637 if (IS_ERR(skb)) 638 return PTR_ERR(skb); 639 640 return ath10k_wmi_cmd_send(ar, skb, 641 ar->wmi.cmd->vdev_start_request_cmdid); 642 } 643 644 static inline int 645 ath10k_wmi_vdev_restart(struct ath10k *ar, 646 const struct wmi_vdev_start_request_arg *arg) 647 { 648 struct sk_buff *skb; 649 650 if (!ar->wmi.ops->gen_vdev_start) 651 return -EOPNOTSUPP; 652 653 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 654 if (IS_ERR(skb)) 655 return PTR_ERR(skb); 656 657 return ath10k_wmi_cmd_send(ar, skb, 658 ar->wmi.cmd->vdev_restart_request_cmdid); 659 } 660 661 static inline int 662 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 663 { 664 struct sk_buff *skb; 665 666 if (!ar->wmi.ops->gen_vdev_stop) 667 return -EOPNOTSUPP; 668 669 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 670 if (IS_ERR(skb)) 671 return PTR_ERR(skb); 672 673 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 674 } 675 676 static inline int 677 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 678 { 679 struct sk_buff *skb; 680 681 if (!ar->wmi.ops->gen_vdev_up) 682 return -EOPNOTSUPP; 683 684 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 685 if (IS_ERR(skb)) 686 return PTR_ERR(skb); 687 688 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 689 } 690 691 static inline int 692 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 693 { 694 struct sk_buff *skb; 695 696 if (!ar->wmi.ops->gen_vdev_down) 697 return -EOPNOTSUPP; 698 699 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 700 if (IS_ERR(skb)) 701 return PTR_ERR(skb); 702 703 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 704 } 705 706 static inline int 707 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 708 u32 param_value) 709 { 710 struct sk_buff *skb; 711 712 if (!ar->wmi.ops->gen_vdev_set_param) 713 return -EOPNOTSUPP; 714 715 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 716 param_value); 717 if (IS_ERR(skb)) 718 return PTR_ERR(skb); 719 720 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 721 } 722 723 static inline int 724 ath10k_wmi_vdev_install_key(struct ath10k *ar, 725 const struct wmi_vdev_install_key_arg *arg) 726 { 727 struct sk_buff *skb; 728 729 if (!ar->wmi.ops->gen_vdev_install_key) 730 return -EOPNOTSUPP; 731 732 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 733 if (IS_ERR(skb)) 734 return PTR_ERR(skb); 735 736 return ath10k_wmi_cmd_send(ar, skb, 737 ar->wmi.cmd->vdev_install_key_cmdid); 738 } 739 740 static inline int 741 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 742 const struct wmi_vdev_spectral_conf_arg *arg) 743 { 744 struct sk_buff *skb; 745 u32 cmd_id; 746 747 if (!ar->wmi.ops->gen_vdev_spectral_conf) 748 return -EOPNOTSUPP; 749 750 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 751 if (IS_ERR(skb)) 752 return PTR_ERR(skb); 753 754 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 755 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 756 } 757 758 static inline int 759 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 760 u32 enable) 761 { 762 struct sk_buff *skb; 763 u32 cmd_id; 764 765 if (!ar->wmi.ops->gen_vdev_spectral_enable) 766 return -EOPNOTSUPP; 767 768 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 769 enable); 770 if (IS_ERR(skb)) 771 return PTR_ERR(skb); 772 773 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 774 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 775 } 776 777 static inline int 778 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 779 const u8 peer_addr[ETH_ALEN], 780 const struct wmi_sta_uapsd_auto_trig_arg *args, 781 u32 num_ac) 782 { 783 struct sk_buff *skb; 784 u32 cmd_id; 785 786 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 787 return -EOPNOTSUPP; 788 789 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 790 num_ac); 791 if (IS_ERR(skb)) 792 return PTR_ERR(skb); 793 794 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 795 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 796 } 797 798 static inline int 799 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 800 const struct wmi_wmm_params_all_arg *arg) 801 { 802 struct sk_buff *skb; 803 u32 cmd_id; 804 805 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 806 if (IS_ERR(skb)) 807 return PTR_ERR(skb); 808 809 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 810 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 811 } 812 813 static inline int 814 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 815 const u8 peer_addr[ETH_ALEN], 816 enum wmi_peer_type peer_type) 817 { 818 struct sk_buff *skb; 819 820 if (!ar->wmi.ops->gen_peer_create) 821 return -EOPNOTSUPP; 822 823 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 824 if (IS_ERR(skb)) 825 return PTR_ERR(skb); 826 827 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 828 } 829 830 static inline int 831 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 832 const u8 peer_addr[ETH_ALEN]) 833 { 834 struct sk_buff *skb; 835 836 if (!ar->wmi.ops->gen_peer_delete) 837 return -EOPNOTSUPP; 838 839 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 840 if (IS_ERR(skb)) 841 return PTR_ERR(skb); 842 843 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 844 } 845 846 static inline int 847 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 848 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 849 { 850 struct sk_buff *skb; 851 852 if (!ar->wmi.ops->gen_peer_flush) 853 return -EOPNOTSUPP; 854 855 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 856 if (IS_ERR(skb)) 857 return PTR_ERR(skb); 858 859 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 860 } 861 862 static inline int 863 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 864 enum wmi_peer_param param_id, u32 param_value) 865 { 866 struct sk_buff *skb; 867 868 if (!ar->wmi.ops->gen_peer_set_param) 869 return -EOPNOTSUPP; 870 871 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 872 param_value); 873 if (IS_ERR(skb)) 874 return PTR_ERR(skb); 875 876 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 877 } 878 879 static inline int 880 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 881 enum wmi_sta_ps_mode psmode) 882 { 883 struct sk_buff *skb; 884 885 if (!ar->wmi.ops->gen_set_psmode) 886 return -EOPNOTSUPP; 887 888 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 889 if (IS_ERR(skb)) 890 return PTR_ERR(skb); 891 892 return ath10k_wmi_cmd_send(ar, skb, 893 ar->wmi.cmd->sta_powersave_mode_cmdid); 894 } 895 896 static inline int 897 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 898 enum wmi_sta_powersave_param param_id, u32 value) 899 { 900 struct sk_buff *skb; 901 902 if (!ar->wmi.ops->gen_set_sta_ps) 903 return -EOPNOTSUPP; 904 905 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 906 if (IS_ERR(skb)) 907 return PTR_ERR(skb); 908 909 return ath10k_wmi_cmd_send(ar, skb, 910 ar->wmi.cmd->sta_powersave_param_cmdid); 911 } 912 913 static inline int 914 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 915 enum wmi_ap_ps_peer_param param_id, u32 value) 916 { 917 struct sk_buff *skb; 918 919 if (!ar->wmi.ops->gen_set_ap_ps) 920 return -EOPNOTSUPP; 921 922 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 923 if (IS_ERR(skb)) 924 return PTR_ERR(skb); 925 926 return ath10k_wmi_cmd_send(ar, skb, 927 ar->wmi.cmd->ap_ps_peer_param_cmdid); 928 } 929 930 static inline int 931 ath10k_wmi_scan_chan_list(struct ath10k *ar, 932 const struct wmi_scan_chan_list_arg *arg) 933 { 934 struct sk_buff *skb; 935 936 if (!ar->wmi.ops->gen_scan_chan_list) 937 return -EOPNOTSUPP; 938 939 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 940 if (IS_ERR(skb)) 941 return PTR_ERR(skb); 942 943 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 944 } 945 946 static inline int 947 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 948 { 949 struct sk_buff *skb; 950 u32 prob_req_oui; 951 952 prob_req_oui = (((u32)mac_addr[0]) << 16) | 953 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 954 955 if (!ar->wmi.ops->gen_scan_prob_req_oui) 956 return -EOPNOTSUPP; 957 958 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 959 if (IS_ERR(skb)) 960 return PTR_ERR(skb); 961 962 return ath10k_wmi_cmd_send(ar, skb, 963 ar->wmi.cmd->scan_prob_req_oui_cmdid); 964 } 965 966 static inline int 967 ath10k_wmi_peer_assoc(struct ath10k *ar, 968 const struct wmi_peer_assoc_complete_arg *arg) 969 { 970 struct sk_buff *skb; 971 972 if (!ar->wmi.ops->gen_peer_assoc) 973 return -EOPNOTSUPP; 974 975 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 976 if (IS_ERR(skb)) 977 return PTR_ERR(skb); 978 979 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 980 } 981 982 static inline int 983 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 984 const void *bcn, size_t bcn_len, 985 u32 bcn_paddr, bool dtim_zero, 986 bool deliver_cab) 987 { 988 struct sk_buff *skb; 989 int ret; 990 991 if (!ar->wmi.ops->gen_beacon_dma) 992 return -EOPNOTSUPP; 993 994 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 995 dtim_zero, deliver_cab); 996 if (IS_ERR(skb)) 997 return PTR_ERR(skb); 998 999 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 1000 ar->wmi.cmd->pdev_send_bcn_cmdid); 1001 if (ret) { 1002 dev_kfree_skb(skb); 1003 return ret; 1004 } 1005 1006 return 0; 1007 } 1008 1009 static inline int 1010 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 1011 const struct wmi_wmm_params_all_arg *arg) 1012 { 1013 struct sk_buff *skb; 1014 1015 if (!ar->wmi.ops->gen_pdev_set_wmm) 1016 return -EOPNOTSUPP; 1017 1018 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 1019 if (IS_ERR(skb)) 1020 return PTR_ERR(skb); 1021 1022 return ath10k_wmi_cmd_send(ar, skb, 1023 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 1024 } 1025 1026 static inline int 1027 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1028 { 1029 struct sk_buff *skb; 1030 1031 if (!ar->wmi.ops->gen_request_stats) 1032 return -EOPNOTSUPP; 1033 1034 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1035 if (IS_ERR(skb)) 1036 return PTR_ERR(skb); 1037 1038 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1039 } 1040 1041 static inline int 1042 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1043 enum wmi_force_fw_hang_type type, u32 delay_ms) 1044 { 1045 struct sk_buff *skb; 1046 1047 if (!ar->wmi.ops->gen_force_fw_hang) 1048 return -EOPNOTSUPP; 1049 1050 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1051 if (IS_ERR(skb)) 1052 return PTR_ERR(skb); 1053 1054 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1055 } 1056 1057 static inline int 1058 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1059 { 1060 struct sk_buff *skb; 1061 1062 if (!ar->wmi.ops->gen_dbglog_cfg) 1063 return -EOPNOTSUPP; 1064 1065 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1066 if (IS_ERR(skb)) 1067 return PTR_ERR(skb); 1068 1069 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1070 } 1071 1072 static inline int 1073 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1074 { 1075 struct sk_buff *skb; 1076 1077 if (!ar->wmi.ops->gen_pktlog_enable) 1078 return -EOPNOTSUPP; 1079 1080 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1081 if (IS_ERR(skb)) 1082 return PTR_ERR(skb); 1083 1084 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1085 } 1086 1087 static inline int 1088 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1089 { 1090 struct sk_buff *skb; 1091 1092 if (!ar->wmi.ops->gen_pktlog_disable) 1093 return -EOPNOTSUPP; 1094 1095 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1096 if (IS_ERR(skb)) 1097 return PTR_ERR(skb); 1098 1099 return ath10k_wmi_cmd_send(ar, skb, 1100 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1101 } 1102 1103 static inline int 1104 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1105 u32 next_offset, u32 enabled) 1106 { 1107 struct sk_buff *skb; 1108 1109 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1110 return -EOPNOTSUPP; 1111 1112 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1113 next_offset, enabled); 1114 if (IS_ERR(skb)) 1115 return PTR_ERR(skb); 1116 1117 return ath10k_wmi_cmd_send(ar, skb, 1118 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1119 } 1120 1121 static inline int 1122 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1123 { 1124 struct sk_buff *skb; 1125 1126 if (!ar->wmi.ops->gen_pdev_get_temperature) 1127 return -EOPNOTSUPP; 1128 1129 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1130 if (IS_ERR(skb)) 1131 return PTR_ERR(skb); 1132 1133 return ath10k_wmi_cmd_send(ar, skb, 1134 ar->wmi.cmd->pdev_get_temperature_cmdid); 1135 } 1136 1137 static inline int 1138 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1139 { 1140 struct sk_buff *skb; 1141 1142 if (!ar->wmi.ops->gen_addba_clear_resp) 1143 return -EOPNOTSUPP; 1144 1145 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1146 if (IS_ERR(skb)) 1147 return PTR_ERR(skb); 1148 1149 return ath10k_wmi_cmd_send(ar, skb, 1150 ar->wmi.cmd->addba_clear_resp_cmdid); 1151 } 1152 1153 static inline int 1154 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1155 u32 tid, u32 buf_size) 1156 { 1157 struct sk_buff *skb; 1158 1159 if (!ar->wmi.ops->gen_addba_send) 1160 return -EOPNOTSUPP; 1161 1162 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1163 if (IS_ERR(skb)) 1164 return PTR_ERR(skb); 1165 1166 return ath10k_wmi_cmd_send(ar, skb, 1167 ar->wmi.cmd->addba_send_cmdid); 1168 } 1169 1170 static inline int 1171 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1172 u32 tid, u32 status) 1173 { 1174 struct sk_buff *skb; 1175 1176 if (!ar->wmi.ops->gen_addba_set_resp) 1177 return -EOPNOTSUPP; 1178 1179 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1180 if (IS_ERR(skb)) 1181 return PTR_ERR(skb); 1182 1183 return ath10k_wmi_cmd_send(ar, skb, 1184 ar->wmi.cmd->addba_set_resp_cmdid); 1185 } 1186 1187 static inline int 1188 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1189 u32 tid, u32 initiator, u32 reason) 1190 { 1191 struct sk_buff *skb; 1192 1193 if (!ar->wmi.ops->gen_delba_send) 1194 return -EOPNOTSUPP; 1195 1196 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1197 reason); 1198 if (IS_ERR(skb)) 1199 return PTR_ERR(skb); 1200 1201 return ath10k_wmi_cmd_send(ar, skb, 1202 ar->wmi.cmd->delba_send_cmdid); 1203 } 1204 1205 static inline int 1206 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1207 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1208 void *prb_ies, size_t prb_ies_len) 1209 { 1210 struct sk_buff *skb; 1211 1212 if (!ar->wmi.ops->gen_bcn_tmpl) 1213 return -EOPNOTSUPP; 1214 1215 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1216 prb_caps, prb_erp, prb_ies, 1217 prb_ies_len); 1218 if (IS_ERR(skb)) 1219 return PTR_ERR(skb); 1220 1221 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1222 } 1223 1224 static inline int 1225 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1226 { 1227 struct sk_buff *skb; 1228 1229 if (!ar->wmi.ops->gen_prb_tmpl) 1230 return -EOPNOTSUPP; 1231 1232 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1233 if (IS_ERR(skb)) 1234 return PTR_ERR(skb); 1235 1236 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1237 } 1238 1239 static inline int 1240 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1241 { 1242 struct sk_buff *skb; 1243 1244 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1245 return -EOPNOTSUPP; 1246 1247 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1248 if (IS_ERR(skb)) 1249 return PTR_ERR(skb); 1250 1251 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1252 } 1253 1254 static inline int 1255 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1256 const struct wmi_sta_keepalive_arg *arg) 1257 { 1258 struct sk_buff *skb; 1259 u32 cmd_id; 1260 1261 if (!ar->wmi.ops->gen_sta_keepalive) 1262 return -EOPNOTSUPP; 1263 1264 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1265 if (IS_ERR(skb)) 1266 return PTR_ERR(skb); 1267 1268 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1269 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1270 } 1271 1272 static inline int 1273 ath10k_wmi_wow_enable(struct ath10k *ar) 1274 { 1275 struct sk_buff *skb; 1276 u32 cmd_id; 1277 1278 if (!ar->wmi.ops->gen_wow_enable) 1279 return -EOPNOTSUPP; 1280 1281 skb = ar->wmi.ops->gen_wow_enable(ar); 1282 if (IS_ERR(skb)) 1283 return PTR_ERR(skb); 1284 1285 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1286 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1287 } 1288 1289 static inline int 1290 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1291 enum wmi_wow_wakeup_event event, 1292 u32 enable) 1293 { 1294 struct sk_buff *skb; 1295 u32 cmd_id; 1296 1297 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1298 return -EOPNOTSUPP; 1299 1300 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1301 if (IS_ERR(skb)) 1302 return PTR_ERR(skb); 1303 1304 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1305 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1306 } 1307 1308 static inline int 1309 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1310 { 1311 struct sk_buff *skb; 1312 u32 cmd_id; 1313 1314 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1315 return -EOPNOTSUPP; 1316 1317 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1318 if (IS_ERR(skb)) 1319 return PTR_ERR(skb); 1320 1321 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1322 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1323 } 1324 1325 static inline int 1326 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1327 const u8 *pattern, const u8 *mask, 1328 int pattern_len, int pattern_offset) 1329 { 1330 struct sk_buff *skb; 1331 u32 cmd_id; 1332 1333 if (!ar->wmi.ops->gen_wow_add_pattern) 1334 return -EOPNOTSUPP; 1335 1336 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1337 pattern, mask, pattern_len, 1338 pattern_offset); 1339 if (IS_ERR(skb)) 1340 return PTR_ERR(skb); 1341 1342 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1343 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1344 } 1345 1346 static inline int 1347 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1348 { 1349 struct sk_buff *skb; 1350 u32 cmd_id; 1351 1352 if (!ar->wmi.ops->gen_wow_del_pattern) 1353 return -EOPNOTSUPP; 1354 1355 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1356 if (IS_ERR(skb)) 1357 return PTR_ERR(skb); 1358 1359 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1360 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1361 } 1362 1363 static inline int 1364 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1365 enum wmi_tdls_state state) 1366 { 1367 struct sk_buff *skb; 1368 1369 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1370 return -EOPNOTSUPP; 1371 1372 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1373 if (IS_ERR(skb)) 1374 return PTR_ERR(skb); 1375 1376 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1377 } 1378 1379 static inline int 1380 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1381 const struct wmi_tdls_peer_update_cmd_arg *arg, 1382 const struct wmi_tdls_peer_capab_arg *cap, 1383 const struct wmi_channel_arg *chan) 1384 { 1385 struct sk_buff *skb; 1386 1387 if (!ar->wmi.ops->gen_tdls_peer_update) 1388 return -EOPNOTSUPP; 1389 1390 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1391 if (IS_ERR(skb)) 1392 return PTR_ERR(skb); 1393 1394 return ath10k_wmi_cmd_send(ar, skb, 1395 ar->wmi.cmd->tdls_peer_update_cmdid); 1396 } 1397 1398 static inline int 1399 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1400 { 1401 struct sk_buff *skb; 1402 1403 if (!ar->wmi.ops->gen_adaptive_qcs) 1404 return -EOPNOTSUPP; 1405 1406 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1407 if (IS_ERR(skb)) 1408 return PTR_ERR(skb); 1409 1410 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1411 } 1412 1413 static inline int 1414 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1415 { 1416 struct sk_buff *skb; 1417 1418 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1419 return -EOPNOTSUPP; 1420 1421 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1422 1423 if (IS_ERR(skb)) 1424 return PTR_ERR(skb); 1425 1426 return ath10k_wmi_cmd_send(ar, skb, 1427 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1428 } 1429 1430 static inline int 1431 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1432 char *buf) 1433 { 1434 if (!ar->wmi.ops->fw_stats_fill) 1435 return -EOPNOTSUPP; 1436 1437 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1438 return 0; 1439 } 1440 1441 static inline int 1442 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1443 u32 detect_level, u32 detect_margin) 1444 { 1445 struct sk_buff *skb; 1446 1447 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1448 return -EOPNOTSUPP; 1449 1450 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1451 detect_level, 1452 detect_margin); 1453 1454 if (IS_ERR(skb)) 1455 return PTR_ERR(skb); 1456 1457 return ath10k_wmi_cmd_send(ar, skb, 1458 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1459 } 1460 1461 static inline int 1462 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1463 enum wmi_host_platform_type type, 1464 u32 fw_feature_bitmap) 1465 { 1466 struct sk_buff *skb; 1467 1468 if (!ar->wmi.ops->ext_resource_config) 1469 return -EOPNOTSUPP; 1470 1471 skb = ar->wmi.ops->ext_resource_config(ar, type, 1472 fw_feature_bitmap); 1473 1474 if (IS_ERR(skb)) 1475 return PTR_ERR(skb); 1476 1477 return ath10k_wmi_cmd_send(ar, skb, 1478 ar->wmi.cmd->ext_resource_cfg_cmdid); 1479 } 1480 1481 static inline int 1482 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1483 { 1484 if (!ar->wmi.ops->get_vdev_subtype) 1485 return -EOPNOTSUPP; 1486 1487 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1488 } 1489 1490 static inline int 1491 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1492 enum wmi_bss_survey_req_type type) 1493 { 1494 struct ath10k_wmi *wmi = &ar->wmi; 1495 struct sk_buff *skb; 1496 1497 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1498 return -EOPNOTSUPP; 1499 1500 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1501 if (IS_ERR(skb)) 1502 return PTR_ERR(skb); 1503 1504 return ath10k_wmi_cmd_send(ar, skb, 1505 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1506 } 1507 1508 static inline int 1509 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1510 { 1511 struct ath10k_wmi *wmi = &ar->wmi; 1512 struct sk_buff *skb; 1513 1514 if (!wmi->ops->gen_echo) 1515 return -EOPNOTSUPP; 1516 1517 skb = wmi->ops->gen_echo(ar, value); 1518 if (IS_ERR(skb)) 1519 return PTR_ERR(skb); 1520 1521 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1522 } 1523 1524 static inline int 1525 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1526 { 1527 struct sk_buff *skb; 1528 1529 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1530 return -EOPNOTSUPP; 1531 1532 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1533 1534 if (IS_ERR(skb)) 1535 return PTR_ERR(skb); 1536 1537 return ath10k_wmi_cmd_send(ar, skb, 1538 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1539 } 1540 1541 static inline int 1542 ath10k_wmi_report_radar_found(struct ath10k *ar, 1543 const struct ath10k_radar_found_info *arg) 1544 { 1545 struct sk_buff *skb; 1546 1547 if (!ar->wmi.ops->gen_radar_found) 1548 return -EOPNOTSUPP; 1549 1550 skb = ar->wmi.ops->gen_radar_found(ar, arg); 1551 if (IS_ERR(skb)) 1552 return PTR_ERR(skb); 1553 1554 return ath10k_wmi_cmd_send(ar, skb, 1555 ar->wmi.cmd->radar_found_cmdid); 1556 } 1557 1558 #endif 1559