1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 29 30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_scan_ev_arg *arg); 32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_mgmt_rx_ev_arg *arg); 34 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_ch_info_ev_arg *arg); 36 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_vdev_start_ev_arg *arg); 38 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_peer_kick_ev_arg *arg); 40 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_swba_ev_arg *arg); 42 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_phyerr_hdr_arg *arg); 44 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 45 int left_len, struct wmi_phyerr_ev_arg *arg); 46 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_svc_rdy_ev_arg *arg); 48 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_rdy_ev_arg *arg); 50 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 51 struct ath10k_fw_stats *stats); 52 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_roam_ev_arg *arg); 54 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_wow_ev_arg *arg); 56 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 57 struct wmi_echo_ev_arg *arg); 58 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 59 struct wmi_svc_avail_ev_arg *arg); 60 61 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 62 63 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 64 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 65 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 66 u16 rd5g, u16 ctl2g, u16 ctl5g, 67 enum wmi_dfs_region dfs_reg); 68 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 69 u32 value); 70 struct sk_buff *(*gen_init)(struct ath10k *ar); 71 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 72 const struct wmi_start_scan_arg *arg); 73 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 74 const struct wmi_stop_scan_arg *arg); 75 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 76 enum wmi_vdev_type type, 77 enum wmi_vdev_subtype subtype, 78 const u8 macaddr[ETH_ALEN]); 79 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 80 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 81 const struct wmi_vdev_start_request_arg *arg, 82 bool restart); 83 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 84 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 85 const u8 *bssid); 86 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 87 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 88 u32 param_id, u32 param_value); 89 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 90 const struct wmi_vdev_install_key_arg *arg); 91 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 92 const struct wmi_vdev_spectral_conf_arg *arg); 93 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 94 u32 trigger, u32 enable); 95 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 96 const struct wmi_wmm_params_all_arg *arg); 97 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 98 const u8 peer_addr[ETH_ALEN], 99 enum wmi_peer_type peer_type); 100 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 101 const u8 peer_addr[ETH_ALEN]); 102 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 103 const u8 peer_addr[ETH_ALEN], 104 u32 tid_bitmap); 105 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 106 const u8 *peer_addr, 107 enum wmi_peer_param param_id, 108 u32 param_value); 109 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 110 const struct wmi_peer_assoc_complete_arg *arg); 111 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 112 enum wmi_sta_ps_mode psmode); 113 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 114 enum wmi_sta_powersave_param param_id, 115 u32 value); 116 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 117 const u8 *mac, 118 enum wmi_ap_ps_peer_param param_id, 119 u32 value); 120 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 121 const struct wmi_scan_chan_list_arg *arg); 122 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 123 u32 prob_req_oui); 124 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 125 const void *bcn, size_t bcn_len, 126 u32 bcn_paddr, bool dtim_zero, 127 bool deliver_cab); 128 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 129 const struct wmi_wmm_params_all_arg *arg); 130 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 131 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 132 enum wmi_force_fw_hang_type type, 133 u32 delay_ms); 134 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 135 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 136 struct sk_buff *skb, 137 dma_addr_t paddr); 138 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 139 u32 log_level); 140 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 141 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 142 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 143 u32 period, u32 duration, 144 u32 next_offset, 145 u32 enabled); 146 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 147 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 148 const u8 *mac); 149 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 150 const u8 *mac, u32 tid, u32 buf_size); 151 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 152 const u8 *mac, u32 tid, 153 u32 status); 154 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 155 const u8 *mac, u32 tid, u32 initiator, 156 u32 reason); 157 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 158 u32 tim_ie_offset, struct sk_buff *bcn, 159 u32 prb_caps, u32 prb_erp, 160 void *prb_ies, size_t prb_ies_len); 161 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 162 struct sk_buff *bcn); 163 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 164 const u8 *p2p_ie); 165 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 166 const u8 peer_addr[ETH_ALEN], 167 const struct wmi_sta_uapsd_auto_trig_arg *args, 168 u32 num_ac); 169 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 170 const struct wmi_sta_keepalive_arg *arg); 171 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 172 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 173 enum wmi_wow_wakeup_event event, 174 u32 enable); 175 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 176 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 177 u32 pattern_id, 178 const u8 *pattern, 179 const u8 *mask, 180 int pattern_len, 181 int pattern_offset); 182 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 183 u32 pattern_id); 184 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 185 u32 vdev_id, 186 enum wmi_tdls_state state); 187 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 188 const struct wmi_tdls_peer_update_cmd_arg *arg, 189 const struct wmi_tdls_peer_capab_arg *cap, 190 const struct wmi_channel_arg *chan); 191 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 192 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 193 u32 param); 194 void (*fw_stats_fill)(struct ath10k *ar, 195 struct ath10k_fw_stats *fw_stats, 196 char *buf); 197 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 198 u8 enable, 199 u32 detect_level, 200 u32 detect_margin); 201 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 202 enum wmi_host_platform_type type, 203 u32 fw_feature_bitmap); 204 int (*get_vdev_subtype)(struct ath10k *ar, 205 enum wmi_vdev_subtype subtype); 206 struct sk_buff *(*gen_pdev_bss_chan_info_req) 207 (struct ath10k *ar, 208 enum wmi_bss_survey_req_type type); 209 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 210 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 211 u32 param); 212 213 }; 214 215 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 216 217 static inline int 218 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 219 { 220 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 221 return -EOPNOTSUPP; 222 223 ar->wmi.ops->rx(ar, skb); 224 return 0; 225 } 226 227 static inline int 228 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 229 size_t len) 230 { 231 if (!ar->wmi.ops->map_svc) 232 return -EOPNOTSUPP; 233 234 ar->wmi.ops->map_svc(in, out, len); 235 return 0; 236 } 237 238 static inline int 239 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 240 size_t len) 241 { 242 if (!ar->wmi.ops->map_svc_ext) 243 return -EOPNOTSUPP; 244 245 ar->wmi.ops->map_svc_ext(in, out, len); 246 return 0; 247 } 248 249 static inline int 250 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 251 struct wmi_scan_ev_arg *arg) 252 { 253 if (!ar->wmi.ops->pull_scan) 254 return -EOPNOTSUPP; 255 256 return ar->wmi.ops->pull_scan(ar, skb, arg); 257 } 258 259 static inline int 260 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 261 struct wmi_mgmt_rx_ev_arg *arg) 262 { 263 if (!ar->wmi.ops->pull_mgmt_rx) 264 return -EOPNOTSUPP; 265 266 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 267 } 268 269 static inline int 270 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 271 struct wmi_ch_info_ev_arg *arg) 272 { 273 if (!ar->wmi.ops->pull_ch_info) 274 return -EOPNOTSUPP; 275 276 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 277 } 278 279 static inline int 280 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 281 struct wmi_vdev_start_ev_arg *arg) 282 { 283 if (!ar->wmi.ops->pull_vdev_start) 284 return -EOPNOTSUPP; 285 286 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 287 } 288 289 static inline int 290 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 291 struct wmi_peer_kick_ev_arg *arg) 292 { 293 if (!ar->wmi.ops->pull_peer_kick) 294 return -EOPNOTSUPP; 295 296 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 297 } 298 299 static inline int 300 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 301 struct wmi_swba_ev_arg *arg) 302 { 303 if (!ar->wmi.ops->pull_swba) 304 return -EOPNOTSUPP; 305 306 return ar->wmi.ops->pull_swba(ar, skb, arg); 307 } 308 309 static inline int 310 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 311 struct wmi_phyerr_hdr_arg *arg) 312 { 313 if (!ar->wmi.ops->pull_phyerr_hdr) 314 return -EOPNOTSUPP; 315 316 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 317 } 318 319 static inline int 320 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 321 int left_len, struct wmi_phyerr_ev_arg *arg) 322 { 323 if (!ar->wmi.ops->pull_phyerr) 324 return -EOPNOTSUPP; 325 326 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 327 } 328 329 static inline int 330 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 331 struct wmi_svc_rdy_ev_arg *arg) 332 { 333 if (!ar->wmi.ops->pull_svc_rdy) 334 return -EOPNOTSUPP; 335 336 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 337 } 338 339 static inline int 340 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 341 struct wmi_rdy_ev_arg *arg) 342 { 343 if (!ar->wmi.ops->pull_rdy) 344 return -EOPNOTSUPP; 345 346 return ar->wmi.ops->pull_rdy(ar, skb, arg); 347 } 348 349 static inline int 350 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 351 struct wmi_svc_avail_ev_arg *arg) 352 { 353 if (!ar->wmi.ops->pull_svc_avail) 354 return -EOPNOTSUPP; 355 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 356 } 357 358 static inline int 359 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 360 struct ath10k_fw_stats *stats) 361 { 362 if (!ar->wmi.ops->pull_fw_stats) 363 return -EOPNOTSUPP; 364 365 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 366 } 367 368 static inline int 369 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 370 struct wmi_roam_ev_arg *arg) 371 { 372 if (!ar->wmi.ops->pull_roam_ev) 373 return -EOPNOTSUPP; 374 375 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 376 } 377 378 static inline int 379 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 380 struct wmi_wow_ev_arg *arg) 381 { 382 if (!ar->wmi.ops->pull_wow_event) 383 return -EOPNOTSUPP; 384 385 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 386 } 387 388 static inline int 389 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 390 struct wmi_echo_ev_arg *arg) 391 { 392 if (!ar->wmi.ops->pull_echo_ev) 393 return -EOPNOTSUPP; 394 395 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 396 } 397 398 static inline enum wmi_txbf_conf 399 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 400 { 401 if (!ar->wmi.ops->get_txbf_conf_scheme) 402 return WMI_TXBF_CONF_UNSUPPORTED; 403 404 return ar->wmi.ops->get_txbf_conf_scheme(ar); 405 } 406 407 static inline int 408 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 409 dma_addr_t paddr) 410 { 411 struct sk_buff *skb; 412 int ret; 413 414 if (!ar->wmi.ops->gen_mgmt_tx_send) 415 return -EOPNOTSUPP; 416 417 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 418 if (IS_ERR(skb)) 419 return PTR_ERR(skb); 420 421 ret = ath10k_wmi_cmd_send(ar, skb, 422 ar->wmi.cmd->mgmt_tx_send_cmdid); 423 if (ret) 424 return ret; 425 426 return 0; 427 } 428 429 static inline int 430 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 431 { 432 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 433 struct sk_buff *skb; 434 int ret; 435 436 if (!ar->wmi.ops->gen_mgmt_tx) 437 return -EOPNOTSUPP; 438 439 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 440 if (IS_ERR(skb)) 441 return PTR_ERR(skb); 442 443 ret = ath10k_wmi_cmd_send(ar, skb, 444 ar->wmi.cmd->mgmt_tx_cmdid); 445 if (ret) 446 return ret; 447 448 /* FIXME There's no ACK event for Management Tx. This probably 449 * shouldn't be called here either. 450 */ 451 info->flags |= IEEE80211_TX_STAT_ACK; 452 ieee80211_tx_status_irqsafe(ar->hw, msdu); 453 454 return 0; 455 } 456 457 static inline int 458 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 459 u16 ctl2g, u16 ctl5g, 460 enum wmi_dfs_region dfs_reg) 461 { 462 struct sk_buff *skb; 463 464 if (!ar->wmi.ops->gen_pdev_set_rd) 465 return -EOPNOTSUPP; 466 467 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 468 dfs_reg); 469 if (IS_ERR(skb)) 470 return PTR_ERR(skb); 471 472 return ath10k_wmi_cmd_send(ar, skb, 473 ar->wmi.cmd->pdev_set_regdomain_cmdid); 474 } 475 476 static inline int 477 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 478 { 479 struct sk_buff *skb; 480 481 if (!ar->wmi.ops->gen_pdev_suspend) 482 return -EOPNOTSUPP; 483 484 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 485 if (IS_ERR(skb)) 486 return PTR_ERR(skb); 487 488 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 489 } 490 491 static inline int 492 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 493 { 494 struct sk_buff *skb; 495 496 if (!ar->wmi.ops->gen_pdev_resume) 497 return -EOPNOTSUPP; 498 499 skb = ar->wmi.ops->gen_pdev_resume(ar); 500 if (IS_ERR(skb)) 501 return PTR_ERR(skb); 502 503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 504 } 505 506 static inline int 507 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 508 { 509 struct sk_buff *skb; 510 511 if (!ar->wmi.ops->gen_pdev_set_param) 512 return -EOPNOTSUPP; 513 514 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 515 if (IS_ERR(skb)) 516 return PTR_ERR(skb); 517 518 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 519 } 520 521 static inline int 522 ath10k_wmi_cmd_init(struct ath10k *ar) 523 { 524 struct sk_buff *skb; 525 526 if (!ar->wmi.ops->gen_init) 527 return -EOPNOTSUPP; 528 529 skb = ar->wmi.ops->gen_init(ar); 530 if (IS_ERR(skb)) 531 return PTR_ERR(skb); 532 533 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 534 } 535 536 static inline int 537 ath10k_wmi_start_scan(struct ath10k *ar, 538 const struct wmi_start_scan_arg *arg) 539 { 540 struct sk_buff *skb; 541 542 if (!ar->wmi.ops->gen_start_scan) 543 return -EOPNOTSUPP; 544 545 skb = ar->wmi.ops->gen_start_scan(ar, arg); 546 if (IS_ERR(skb)) 547 return PTR_ERR(skb); 548 549 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 550 } 551 552 static inline int 553 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 554 { 555 struct sk_buff *skb; 556 557 if (!ar->wmi.ops->gen_stop_scan) 558 return -EOPNOTSUPP; 559 560 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 561 if (IS_ERR(skb)) 562 return PTR_ERR(skb); 563 564 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 565 } 566 567 static inline int 568 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 569 enum wmi_vdev_type type, 570 enum wmi_vdev_subtype subtype, 571 const u8 macaddr[ETH_ALEN]) 572 { 573 struct sk_buff *skb; 574 575 if (!ar->wmi.ops->gen_vdev_create) 576 return -EOPNOTSUPP; 577 578 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 579 if (IS_ERR(skb)) 580 return PTR_ERR(skb); 581 582 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 583 } 584 585 static inline int 586 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 587 { 588 struct sk_buff *skb; 589 590 if (!ar->wmi.ops->gen_vdev_delete) 591 return -EOPNOTSUPP; 592 593 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 594 if (IS_ERR(skb)) 595 return PTR_ERR(skb); 596 597 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 598 } 599 600 static inline int 601 ath10k_wmi_vdev_start(struct ath10k *ar, 602 const struct wmi_vdev_start_request_arg *arg) 603 { 604 struct sk_buff *skb; 605 606 if (!ar->wmi.ops->gen_vdev_start) 607 return -EOPNOTSUPP; 608 609 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 610 if (IS_ERR(skb)) 611 return PTR_ERR(skb); 612 613 return ath10k_wmi_cmd_send(ar, skb, 614 ar->wmi.cmd->vdev_start_request_cmdid); 615 } 616 617 static inline int 618 ath10k_wmi_vdev_restart(struct ath10k *ar, 619 const struct wmi_vdev_start_request_arg *arg) 620 { 621 struct sk_buff *skb; 622 623 if (!ar->wmi.ops->gen_vdev_start) 624 return -EOPNOTSUPP; 625 626 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 627 if (IS_ERR(skb)) 628 return PTR_ERR(skb); 629 630 return ath10k_wmi_cmd_send(ar, skb, 631 ar->wmi.cmd->vdev_restart_request_cmdid); 632 } 633 634 static inline int 635 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 636 { 637 struct sk_buff *skb; 638 639 if (!ar->wmi.ops->gen_vdev_stop) 640 return -EOPNOTSUPP; 641 642 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 643 if (IS_ERR(skb)) 644 return PTR_ERR(skb); 645 646 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 647 } 648 649 static inline int 650 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 651 { 652 struct sk_buff *skb; 653 654 if (!ar->wmi.ops->gen_vdev_up) 655 return -EOPNOTSUPP; 656 657 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 658 if (IS_ERR(skb)) 659 return PTR_ERR(skb); 660 661 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 662 } 663 664 static inline int 665 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 666 { 667 struct sk_buff *skb; 668 669 if (!ar->wmi.ops->gen_vdev_down) 670 return -EOPNOTSUPP; 671 672 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 673 if (IS_ERR(skb)) 674 return PTR_ERR(skb); 675 676 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 677 } 678 679 static inline int 680 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 681 u32 param_value) 682 { 683 struct sk_buff *skb; 684 685 if (!ar->wmi.ops->gen_vdev_set_param) 686 return -EOPNOTSUPP; 687 688 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 689 param_value); 690 if (IS_ERR(skb)) 691 return PTR_ERR(skb); 692 693 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 694 } 695 696 static inline int 697 ath10k_wmi_vdev_install_key(struct ath10k *ar, 698 const struct wmi_vdev_install_key_arg *arg) 699 { 700 struct sk_buff *skb; 701 702 if (!ar->wmi.ops->gen_vdev_install_key) 703 return -EOPNOTSUPP; 704 705 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 706 if (IS_ERR(skb)) 707 return PTR_ERR(skb); 708 709 return ath10k_wmi_cmd_send(ar, skb, 710 ar->wmi.cmd->vdev_install_key_cmdid); 711 } 712 713 static inline int 714 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 715 const struct wmi_vdev_spectral_conf_arg *arg) 716 { 717 struct sk_buff *skb; 718 u32 cmd_id; 719 720 if (!ar->wmi.ops->gen_vdev_spectral_conf) 721 return -EOPNOTSUPP; 722 723 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 724 if (IS_ERR(skb)) 725 return PTR_ERR(skb); 726 727 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 728 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 729 } 730 731 static inline int 732 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 733 u32 enable) 734 { 735 struct sk_buff *skb; 736 u32 cmd_id; 737 738 if (!ar->wmi.ops->gen_vdev_spectral_enable) 739 return -EOPNOTSUPP; 740 741 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 742 enable); 743 if (IS_ERR(skb)) 744 return PTR_ERR(skb); 745 746 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 747 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 748 } 749 750 static inline int 751 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 752 const u8 peer_addr[ETH_ALEN], 753 const struct wmi_sta_uapsd_auto_trig_arg *args, 754 u32 num_ac) 755 { 756 struct sk_buff *skb; 757 u32 cmd_id; 758 759 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 760 return -EOPNOTSUPP; 761 762 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 763 num_ac); 764 if (IS_ERR(skb)) 765 return PTR_ERR(skb); 766 767 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 768 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 769 } 770 771 static inline int 772 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 773 const struct wmi_wmm_params_all_arg *arg) 774 { 775 struct sk_buff *skb; 776 u32 cmd_id; 777 778 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 779 if (IS_ERR(skb)) 780 return PTR_ERR(skb); 781 782 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 783 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 784 } 785 786 static inline int 787 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 788 const u8 peer_addr[ETH_ALEN], 789 enum wmi_peer_type peer_type) 790 { 791 struct sk_buff *skb; 792 793 if (!ar->wmi.ops->gen_peer_create) 794 return -EOPNOTSUPP; 795 796 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 797 if (IS_ERR(skb)) 798 return PTR_ERR(skb); 799 800 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 801 } 802 803 static inline int 804 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 805 const u8 peer_addr[ETH_ALEN]) 806 { 807 struct sk_buff *skb; 808 809 if (!ar->wmi.ops->gen_peer_delete) 810 return -EOPNOTSUPP; 811 812 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 813 if (IS_ERR(skb)) 814 return PTR_ERR(skb); 815 816 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 817 } 818 819 static inline int 820 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 821 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 822 { 823 struct sk_buff *skb; 824 825 if (!ar->wmi.ops->gen_peer_flush) 826 return -EOPNOTSUPP; 827 828 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 829 if (IS_ERR(skb)) 830 return PTR_ERR(skb); 831 832 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 833 } 834 835 static inline int 836 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 837 enum wmi_peer_param param_id, u32 param_value) 838 { 839 struct sk_buff *skb; 840 841 if (!ar->wmi.ops->gen_peer_set_param) 842 return -EOPNOTSUPP; 843 844 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 845 param_value); 846 if (IS_ERR(skb)) 847 return PTR_ERR(skb); 848 849 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 850 } 851 852 static inline int 853 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 854 enum wmi_sta_ps_mode psmode) 855 { 856 struct sk_buff *skb; 857 858 if (!ar->wmi.ops->gen_set_psmode) 859 return -EOPNOTSUPP; 860 861 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 862 if (IS_ERR(skb)) 863 return PTR_ERR(skb); 864 865 return ath10k_wmi_cmd_send(ar, skb, 866 ar->wmi.cmd->sta_powersave_mode_cmdid); 867 } 868 869 static inline int 870 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 871 enum wmi_sta_powersave_param param_id, u32 value) 872 { 873 struct sk_buff *skb; 874 875 if (!ar->wmi.ops->gen_set_sta_ps) 876 return -EOPNOTSUPP; 877 878 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 879 if (IS_ERR(skb)) 880 return PTR_ERR(skb); 881 882 return ath10k_wmi_cmd_send(ar, skb, 883 ar->wmi.cmd->sta_powersave_param_cmdid); 884 } 885 886 static inline int 887 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 888 enum wmi_ap_ps_peer_param param_id, u32 value) 889 { 890 struct sk_buff *skb; 891 892 if (!ar->wmi.ops->gen_set_ap_ps) 893 return -EOPNOTSUPP; 894 895 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 896 if (IS_ERR(skb)) 897 return PTR_ERR(skb); 898 899 return ath10k_wmi_cmd_send(ar, skb, 900 ar->wmi.cmd->ap_ps_peer_param_cmdid); 901 } 902 903 static inline int 904 ath10k_wmi_scan_chan_list(struct ath10k *ar, 905 const struct wmi_scan_chan_list_arg *arg) 906 { 907 struct sk_buff *skb; 908 909 if (!ar->wmi.ops->gen_scan_chan_list) 910 return -EOPNOTSUPP; 911 912 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 913 if (IS_ERR(skb)) 914 return PTR_ERR(skb); 915 916 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 917 } 918 919 static inline int 920 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 921 { 922 struct sk_buff *skb; 923 u32 prob_req_oui; 924 925 prob_req_oui = (((u32)mac_addr[0]) << 16) | 926 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 927 928 if (!ar->wmi.ops->gen_scan_prob_req_oui) 929 return -EOPNOTSUPP; 930 931 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 932 if (IS_ERR(skb)) 933 return PTR_ERR(skb); 934 935 return ath10k_wmi_cmd_send(ar, skb, 936 ar->wmi.cmd->scan_prob_req_oui_cmdid); 937 } 938 939 static inline int 940 ath10k_wmi_peer_assoc(struct ath10k *ar, 941 const struct wmi_peer_assoc_complete_arg *arg) 942 { 943 struct sk_buff *skb; 944 945 if (!ar->wmi.ops->gen_peer_assoc) 946 return -EOPNOTSUPP; 947 948 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 949 if (IS_ERR(skb)) 950 return PTR_ERR(skb); 951 952 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 953 } 954 955 static inline int 956 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 957 const void *bcn, size_t bcn_len, 958 u32 bcn_paddr, bool dtim_zero, 959 bool deliver_cab) 960 { 961 struct sk_buff *skb; 962 int ret; 963 964 if (!ar->wmi.ops->gen_beacon_dma) 965 return -EOPNOTSUPP; 966 967 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 968 dtim_zero, deliver_cab); 969 if (IS_ERR(skb)) 970 return PTR_ERR(skb); 971 972 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 973 ar->wmi.cmd->pdev_send_bcn_cmdid); 974 if (ret) { 975 dev_kfree_skb(skb); 976 return ret; 977 } 978 979 return 0; 980 } 981 982 static inline int 983 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 984 const struct wmi_wmm_params_all_arg *arg) 985 { 986 struct sk_buff *skb; 987 988 if (!ar->wmi.ops->gen_pdev_set_wmm) 989 return -EOPNOTSUPP; 990 991 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 992 if (IS_ERR(skb)) 993 return PTR_ERR(skb); 994 995 return ath10k_wmi_cmd_send(ar, skb, 996 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 997 } 998 999 static inline int 1000 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1001 { 1002 struct sk_buff *skb; 1003 1004 if (!ar->wmi.ops->gen_request_stats) 1005 return -EOPNOTSUPP; 1006 1007 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1008 if (IS_ERR(skb)) 1009 return PTR_ERR(skb); 1010 1011 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1012 } 1013 1014 static inline int 1015 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1016 enum wmi_force_fw_hang_type type, u32 delay_ms) 1017 { 1018 struct sk_buff *skb; 1019 1020 if (!ar->wmi.ops->gen_force_fw_hang) 1021 return -EOPNOTSUPP; 1022 1023 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1024 if (IS_ERR(skb)) 1025 return PTR_ERR(skb); 1026 1027 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1028 } 1029 1030 static inline int 1031 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1032 { 1033 struct sk_buff *skb; 1034 1035 if (!ar->wmi.ops->gen_dbglog_cfg) 1036 return -EOPNOTSUPP; 1037 1038 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1039 if (IS_ERR(skb)) 1040 return PTR_ERR(skb); 1041 1042 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1043 } 1044 1045 static inline int 1046 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1047 { 1048 struct sk_buff *skb; 1049 1050 if (!ar->wmi.ops->gen_pktlog_enable) 1051 return -EOPNOTSUPP; 1052 1053 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1054 if (IS_ERR(skb)) 1055 return PTR_ERR(skb); 1056 1057 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1058 } 1059 1060 static inline int 1061 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1062 { 1063 struct sk_buff *skb; 1064 1065 if (!ar->wmi.ops->gen_pktlog_disable) 1066 return -EOPNOTSUPP; 1067 1068 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1069 if (IS_ERR(skb)) 1070 return PTR_ERR(skb); 1071 1072 return ath10k_wmi_cmd_send(ar, skb, 1073 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1074 } 1075 1076 static inline int 1077 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1078 u32 next_offset, u32 enabled) 1079 { 1080 struct sk_buff *skb; 1081 1082 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1083 return -EOPNOTSUPP; 1084 1085 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1086 next_offset, enabled); 1087 if (IS_ERR(skb)) 1088 return PTR_ERR(skb); 1089 1090 return ath10k_wmi_cmd_send(ar, skb, 1091 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1092 } 1093 1094 static inline int 1095 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1096 { 1097 struct sk_buff *skb; 1098 1099 if (!ar->wmi.ops->gen_pdev_get_temperature) 1100 return -EOPNOTSUPP; 1101 1102 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1103 if (IS_ERR(skb)) 1104 return PTR_ERR(skb); 1105 1106 return ath10k_wmi_cmd_send(ar, skb, 1107 ar->wmi.cmd->pdev_get_temperature_cmdid); 1108 } 1109 1110 static inline int 1111 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1112 { 1113 struct sk_buff *skb; 1114 1115 if (!ar->wmi.ops->gen_addba_clear_resp) 1116 return -EOPNOTSUPP; 1117 1118 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1119 if (IS_ERR(skb)) 1120 return PTR_ERR(skb); 1121 1122 return ath10k_wmi_cmd_send(ar, skb, 1123 ar->wmi.cmd->addba_clear_resp_cmdid); 1124 } 1125 1126 static inline int 1127 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1128 u32 tid, u32 buf_size) 1129 { 1130 struct sk_buff *skb; 1131 1132 if (!ar->wmi.ops->gen_addba_send) 1133 return -EOPNOTSUPP; 1134 1135 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1136 if (IS_ERR(skb)) 1137 return PTR_ERR(skb); 1138 1139 return ath10k_wmi_cmd_send(ar, skb, 1140 ar->wmi.cmd->addba_send_cmdid); 1141 } 1142 1143 static inline int 1144 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1145 u32 tid, u32 status) 1146 { 1147 struct sk_buff *skb; 1148 1149 if (!ar->wmi.ops->gen_addba_set_resp) 1150 return -EOPNOTSUPP; 1151 1152 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1153 if (IS_ERR(skb)) 1154 return PTR_ERR(skb); 1155 1156 return ath10k_wmi_cmd_send(ar, skb, 1157 ar->wmi.cmd->addba_set_resp_cmdid); 1158 } 1159 1160 static inline int 1161 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1162 u32 tid, u32 initiator, u32 reason) 1163 { 1164 struct sk_buff *skb; 1165 1166 if (!ar->wmi.ops->gen_delba_send) 1167 return -EOPNOTSUPP; 1168 1169 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1170 reason); 1171 if (IS_ERR(skb)) 1172 return PTR_ERR(skb); 1173 1174 return ath10k_wmi_cmd_send(ar, skb, 1175 ar->wmi.cmd->delba_send_cmdid); 1176 } 1177 1178 static inline int 1179 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1180 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1181 void *prb_ies, size_t prb_ies_len) 1182 { 1183 struct sk_buff *skb; 1184 1185 if (!ar->wmi.ops->gen_bcn_tmpl) 1186 return -EOPNOTSUPP; 1187 1188 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1189 prb_caps, prb_erp, prb_ies, 1190 prb_ies_len); 1191 if (IS_ERR(skb)) 1192 return PTR_ERR(skb); 1193 1194 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1195 } 1196 1197 static inline int 1198 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1199 { 1200 struct sk_buff *skb; 1201 1202 if (!ar->wmi.ops->gen_prb_tmpl) 1203 return -EOPNOTSUPP; 1204 1205 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1206 if (IS_ERR(skb)) 1207 return PTR_ERR(skb); 1208 1209 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1210 } 1211 1212 static inline int 1213 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1214 { 1215 struct sk_buff *skb; 1216 1217 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1218 return -EOPNOTSUPP; 1219 1220 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1221 if (IS_ERR(skb)) 1222 return PTR_ERR(skb); 1223 1224 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1225 } 1226 1227 static inline int 1228 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1229 const struct wmi_sta_keepalive_arg *arg) 1230 { 1231 struct sk_buff *skb; 1232 u32 cmd_id; 1233 1234 if (!ar->wmi.ops->gen_sta_keepalive) 1235 return -EOPNOTSUPP; 1236 1237 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1238 if (IS_ERR(skb)) 1239 return PTR_ERR(skb); 1240 1241 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1242 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1243 } 1244 1245 static inline int 1246 ath10k_wmi_wow_enable(struct ath10k *ar) 1247 { 1248 struct sk_buff *skb; 1249 u32 cmd_id; 1250 1251 if (!ar->wmi.ops->gen_wow_enable) 1252 return -EOPNOTSUPP; 1253 1254 skb = ar->wmi.ops->gen_wow_enable(ar); 1255 if (IS_ERR(skb)) 1256 return PTR_ERR(skb); 1257 1258 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1259 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1260 } 1261 1262 static inline int 1263 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1264 enum wmi_wow_wakeup_event event, 1265 u32 enable) 1266 { 1267 struct sk_buff *skb; 1268 u32 cmd_id; 1269 1270 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1271 return -EOPNOTSUPP; 1272 1273 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1274 if (IS_ERR(skb)) 1275 return PTR_ERR(skb); 1276 1277 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1278 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1279 } 1280 1281 static inline int 1282 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1283 { 1284 struct sk_buff *skb; 1285 u32 cmd_id; 1286 1287 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1288 return -EOPNOTSUPP; 1289 1290 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1291 if (IS_ERR(skb)) 1292 return PTR_ERR(skb); 1293 1294 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1295 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1296 } 1297 1298 static inline int 1299 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1300 const u8 *pattern, const u8 *mask, 1301 int pattern_len, int pattern_offset) 1302 { 1303 struct sk_buff *skb; 1304 u32 cmd_id; 1305 1306 if (!ar->wmi.ops->gen_wow_add_pattern) 1307 return -EOPNOTSUPP; 1308 1309 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1310 pattern, mask, pattern_len, 1311 pattern_offset); 1312 if (IS_ERR(skb)) 1313 return PTR_ERR(skb); 1314 1315 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1316 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1317 } 1318 1319 static inline int 1320 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1321 { 1322 struct sk_buff *skb; 1323 u32 cmd_id; 1324 1325 if (!ar->wmi.ops->gen_wow_del_pattern) 1326 return -EOPNOTSUPP; 1327 1328 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1329 if (IS_ERR(skb)) 1330 return PTR_ERR(skb); 1331 1332 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1333 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1334 } 1335 1336 static inline int 1337 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1338 enum wmi_tdls_state state) 1339 { 1340 struct sk_buff *skb; 1341 1342 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1343 return -EOPNOTSUPP; 1344 1345 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1346 if (IS_ERR(skb)) 1347 return PTR_ERR(skb); 1348 1349 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1350 } 1351 1352 static inline int 1353 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1354 const struct wmi_tdls_peer_update_cmd_arg *arg, 1355 const struct wmi_tdls_peer_capab_arg *cap, 1356 const struct wmi_channel_arg *chan) 1357 { 1358 struct sk_buff *skb; 1359 1360 if (!ar->wmi.ops->gen_tdls_peer_update) 1361 return -EOPNOTSUPP; 1362 1363 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1364 if (IS_ERR(skb)) 1365 return PTR_ERR(skb); 1366 1367 return ath10k_wmi_cmd_send(ar, skb, 1368 ar->wmi.cmd->tdls_peer_update_cmdid); 1369 } 1370 1371 static inline int 1372 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1373 { 1374 struct sk_buff *skb; 1375 1376 if (!ar->wmi.ops->gen_adaptive_qcs) 1377 return -EOPNOTSUPP; 1378 1379 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1380 if (IS_ERR(skb)) 1381 return PTR_ERR(skb); 1382 1383 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1384 } 1385 1386 static inline int 1387 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1388 { 1389 struct sk_buff *skb; 1390 1391 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1392 return -EOPNOTSUPP; 1393 1394 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1395 1396 if (IS_ERR(skb)) 1397 return PTR_ERR(skb); 1398 1399 return ath10k_wmi_cmd_send(ar, skb, 1400 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1401 } 1402 1403 static inline int 1404 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1405 char *buf) 1406 { 1407 if (!ar->wmi.ops->fw_stats_fill) 1408 return -EOPNOTSUPP; 1409 1410 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1411 return 0; 1412 } 1413 1414 static inline int 1415 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1416 u32 detect_level, u32 detect_margin) 1417 { 1418 struct sk_buff *skb; 1419 1420 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1421 return -EOPNOTSUPP; 1422 1423 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1424 detect_level, 1425 detect_margin); 1426 1427 if (IS_ERR(skb)) 1428 return PTR_ERR(skb); 1429 1430 return ath10k_wmi_cmd_send(ar, skb, 1431 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1432 } 1433 1434 static inline int 1435 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1436 enum wmi_host_platform_type type, 1437 u32 fw_feature_bitmap) 1438 { 1439 struct sk_buff *skb; 1440 1441 if (!ar->wmi.ops->ext_resource_config) 1442 return -EOPNOTSUPP; 1443 1444 skb = ar->wmi.ops->ext_resource_config(ar, type, 1445 fw_feature_bitmap); 1446 1447 if (IS_ERR(skb)) 1448 return PTR_ERR(skb); 1449 1450 return ath10k_wmi_cmd_send(ar, skb, 1451 ar->wmi.cmd->ext_resource_cfg_cmdid); 1452 } 1453 1454 static inline int 1455 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1456 { 1457 if (!ar->wmi.ops->get_vdev_subtype) 1458 return -EOPNOTSUPP; 1459 1460 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1461 } 1462 1463 static inline int 1464 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1465 enum wmi_bss_survey_req_type type) 1466 { 1467 struct ath10k_wmi *wmi = &ar->wmi; 1468 struct sk_buff *skb; 1469 1470 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1471 return -EOPNOTSUPP; 1472 1473 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1474 if (IS_ERR(skb)) 1475 return PTR_ERR(skb); 1476 1477 return ath10k_wmi_cmd_send(ar, skb, 1478 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1479 } 1480 1481 static inline int 1482 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1483 { 1484 struct ath10k_wmi *wmi = &ar->wmi; 1485 struct sk_buff *skb; 1486 1487 if (!wmi->ops->gen_echo) 1488 return -EOPNOTSUPP; 1489 1490 skb = wmi->ops->gen_echo(ar, value); 1491 if (IS_ERR(skb)) 1492 return PTR_ERR(skb); 1493 1494 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1495 } 1496 1497 static inline int 1498 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1499 { 1500 struct sk_buff *skb; 1501 1502 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1503 return -EOPNOTSUPP; 1504 1505 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1506 1507 if (IS_ERR(skb)) 1508 return PTR_ERR(skb); 1509 1510 return ath10k_wmi_cmd_send(ar, skb, 1511 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1512 } 1513 1514 #endif 1515