1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 29 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 30 struct wmi_scan_ev_arg *arg); 31 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 32 struct wmi_mgmt_rx_ev_arg *arg); 33 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 34 struct wmi_ch_info_ev_arg *arg); 35 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 36 struct wmi_vdev_start_ev_arg *arg); 37 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 38 struct wmi_peer_kick_ev_arg *arg); 39 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 40 struct wmi_swba_ev_arg *arg); 41 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 42 struct wmi_phyerr_hdr_arg *arg); 43 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 44 int left_len, struct wmi_phyerr_ev_arg *arg); 45 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 46 struct wmi_svc_rdy_ev_arg *arg); 47 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 48 struct wmi_rdy_ev_arg *arg); 49 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 50 struct ath10k_fw_stats *stats); 51 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 52 struct wmi_roam_ev_arg *arg); 53 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 54 struct wmi_wow_ev_arg *arg); 55 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 56 struct wmi_echo_ev_arg *arg); 57 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 58 59 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 60 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 61 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 62 u16 rd5g, u16 ctl2g, u16 ctl5g, 63 enum wmi_dfs_region dfs_reg); 64 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 65 u32 value); 66 struct sk_buff *(*gen_init)(struct ath10k *ar); 67 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 68 const struct wmi_start_scan_arg *arg); 69 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 70 const struct wmi_stop_scan_arg *arg); 71 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 72 enum wmi_vdev_type type, 73 enum wmi_vdev_subtype subtype, 74 const u8 macaddr[ETH_ALEN]); 75 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 76 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 77 const struct wmi_vdev_start_request_arg *arg, 78 bool restart); 79 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 80 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 81 const u8 *bssid); 82 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 83 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 84 u32 param_id, u32 param_value); 85 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 86 const struct wmi_vdev_install_key_arg *arg); 87 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 88 const struct wmi_vdev_spectral_conf_arg *arg); 89 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 90 u32 trigger, u32 enable); 91 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 92 const struct wmi_wmm_params_all_arg *arg); 93 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 94 const u8 peer_addr[ETH_ALEN], 95 enum wmi_peer_type peer_type); 96 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 97 const u8 peer_addr[ETH_ALEN]); 98 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 99 const u8 peer_addr[ETH_ALEN], 100 u32 tid_bitmap); 101 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 102 const u8 *peer_addr, 103 enum wmi_peer_param param_id, 104 u32 param_value); 105 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 106 const struct wmi_peer_assoc_complete_arg *arg); 107 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 108 enum wmi_sta_ps_mode psmode); 109 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 110 enum wmi_sta_powersave_param param_id, 111 u32 value); 112 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 113 const u8 *mac, 114 enum wmi_ap_ps_peer_param param_id, 115 u32 value); 116 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 117 const struct wmi_scan_chan_list_arg *arg); 118 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 119 const void *bcn, size_t bcn_len, 120 u32 bcn_paddr, bool dtim_zero, 121 bool deliver_cab); 122 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 123 const struct wmi_wmm_params_all_arg *arg); 124 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 125 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 126 enum wmi_force_fw_hang_type type, 127 u32 delay_ms); 128 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 129 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 130 struct sk_buff *skb, 131 dma_addr_t paddr); 132 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 133 u32 log_level); 134 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 135 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 136 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 137 u32 period, u32 duration, 138 u32 next_offset, 139 u32 enabled); 140 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 141 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 142 const u8 *mac); 143 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 144 const u8 *mac, u32 tid, u32 buf_size); 145 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 146 const u8 *mac, u32 tid, 147 u32 status); 148 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 149 const u8 *mac, u32 tid, u32 initiator, 150 u32 reason); 151 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 152 u32 tim_ie_offset, struct sk_buff *bcn, 153 u32 prb_caps, u32 prb_erp, 154 void *prb_ies, size_t prb_ies_len); 155 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 156 struct sk_buff *bcn); 157 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 158 const u8 *p2p_ie); 159 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 160 const u8 peer_addr[ETH_ALEN], 161 const struct wmi_sta_uapsd_auto_trig_arg *args, 162 u32 num_ac); 163 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 164 const struct wmi_sta_keepalive_arg *arg); 165 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 166 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 167 enum wmi_wow_wakeup_event event, 168 u32 enable); 169 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 170 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 171 u32 pattern_id, 172 const u8 *pattern, 173 const u8 *mask, 174 int pattern_len, 175 int pattern_offset); 176 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 177 u32 pattern_id); 178 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 179 u32 vdev_id, 180 enum wmi_tdls_state state); 181 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 182 const struct wmi_tdls_peer_update_cmd_arg *arg, 183 const struct wmi_tdls_peer_capab_arg *cap, 184 const struct wmi_channel_arg *chan); 185 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 186 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 187 u32 param); 188 void (*fw_stats_fill)(struct ath10k *ar, 189 struct ath10k_fw_stats *fw_stats, 190 char *buf); 191 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 192 u8 enable, 193 u32 detect_level, 194 u32 detect_margin); 195 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 196 enum wmi_host_platform_type type, 197 u32 fw_feature_bitmap); 198 int (*get_vdev_subtype)(struct ath10k *ar, 199 enum wmi_vdev_subtype subtype); 200 struct sk_buff *(*gen_pdev_bss_chan_info_req) 201 (struct ath10k *ar, 202 enum wmi_bss_survey_req_type type); 203 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 204 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 205 u32 param); 206 207 }; 208 209 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 210 211 static inline int 212 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 213 { 214 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 215 return -EOPNOTSUPP; 216 217 ar->wmi.ops->rx(ar, skb); 218 return 0; 219 } 220 221 static inline int 222 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 223 size_t len) 224 { 225 if (!ar->wmi.ops->map_svc) 226 return -EOPNOTSUPP; 227 228 ar->wmi.ops->map_svc(in, out, len); 229 return 0; 230 } 231 232 static inline int 233 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 234 struct wmi_scan_ev_arg *arg) 235 { 236 if (!ar->wmi.ops->pull_scan) 237 return -EOPNOTSUPP; 238 239 return ar->wmi.ops->pull_scan(ar, skb, arg); 240 } 241 242 static inline int 243 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 244 struct wmi_mgmt_rx_ev_arg *arg) 245 { 246 if (!ar->wmi.ops->pull_mgmt_rx) 247 return -EOPNOTSUPP; 248 249 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 250 } 251 252 static inline int 253 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 254 struct wmi_ch_info_ev_arg *arg) 255 { 256 if (!ar->wmi.ops->pull_ch_info) 257 return -EOPNOTSUPP; 258 259 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 260 } 261 262 static inline int 263 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 264 struct wmi_vdev_start_ev_arg *arg) 265 { 266 if (!ar->wmi.ops->pull_vdev_start) 267 return -EOPNOTSUPP; 268 269 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 270 } 271 272 static inline int 273 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 274 struct wmi_peer_kick_ev_arg *arg) 275 { 276 if (!ar->wmi.ops->pull_peer_kick) 277 return -EOPNOTSUPP; 278 279 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 280 } 281 282 static inline int 283 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 284 struct wmi_swba_ev_arg *arg) 285 { 286 if (!ar->wmi.ops->pull_swba) 287 return -EOPNOTSUPP; 288 289 return ar->wmi.ops->pull_swba(ar, skb, arg); 290 } 291 292 static inline int 293 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 294 struct wmi_phyerr_hdr_arg *arg) 295 { 296 if (!ar->wmi.ops->pull_phyerr_hdr) 297 return -EOPNOTSUPP; 298 299 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 300 } 301 302 static inline int 303 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 304 int left_len, struct wmi_phyerr_ev_arg *arg) 305 { 306 if (!ar->wmi.ops->pull_phyerr) 307 return -EOPNOTSUPP; 308 309 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 310 } 311 312 static inline int 313 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 314 struct wmi_svc_rdy_ev_arg *arg) 315 { 316 if (!ar->wmi.ops->pull_svc_rdy) 317 return -EOPNOTSUPP; 318 319 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 320 } 321 322 static inline int 323 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 324 struct wmi_rdy_ev_arg *arg) 325 { 326 if (!ar->wmi.ops->pull_rdy) 327 return -EOPNOTSUPP; 328 329 return ar->wmi.ops->pull_rdy(ar, skb, arg); 330 } 331 332 static inline int 333 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 334 struct ath10k_fw_stats *stats) 335 { 336 if (!ar->wmi.ops->pull_fw_stats) 337 return -EOPNOTSUPP; 338 339 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 340 } 341 342 static inline int 343 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 344 struct wmi_roam_ev_arg *arg) 345 { 346 if (!ar->wmi.ops->pull_roam_ev) 347 return -EOPNOTSUPP; 348 349 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 350 } 351 352 static inline int 353 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 354 struct wmi_wow_ev_arg *arg) 355 { 356 if (!ar->wmi.ops->pull_wow_event) 357 return -EOPNOTSUPP; 358 359 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 360 } 361 362 static inline int 363 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 364 struct wmi_echo_ev_arg *arg) 365 { 366 if (!ar->wmi.ops->pull_echo_ev) 367 return -EOPNOTSUPP; 368 369 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 370 } 371 372 static inline enum wmi_txbf_conf 373 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 374 { 375 if (!ar->wmi.ops->get_txbf_conf_scheme) 376 return WMI_TXBF_CONF_UNSUPPORTED; 377 378 return ar->wmi.ops->get_txbf_conf_scheme(ar); 379 } 380 381 static inline int 382 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 383 dma_addr_t paddr) 384 { 385 struct sk_buff *skb; 386 int ret; 387 388 if (!ar->wmi.ops->gen_mgmt_tx_send) 389 return -EOPNOTSUPP; 390 391 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 392 if (IS_ERR(skb)) 393 return PTR_ERR(skb); 394 395 ret = ath10k_wmi_cmd_send(ar, skb, 396 ar->wmi.cmd->mgmt_tx_send_cmdid); 397 if (ret) 398 return ret; 399 400 return 0; 401 } 402 403 static inline int 404 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 405 { 406 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 407 struct sk_buff *skb; 408 int ret; 409 410 if (!ar->wmi.ops->gen_mgmt_tx) 411 return -EOPNOTSUPP; 412 413 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 414 if (IS_ERR(skb)) 415 return PTR_ERR(skb); 416 417 ret = ath10k_wmi_cmd_send(ar, skb, 418 ar->wmi.cmd->mgmt_tx_cmdid); 419 if (ret) 420 return ret; 421 422 /* FIXME There's no ACK event for Management Tx. This probably 423 * shouldn't be called here either. 424 */ 425 info->flags |= IEEE80211_TX_STAT_ACK; 426 ieee80211_tx_status_irqsafe(ar->hw, msdu); 427 428 return 0; 429 } 430 431 static inline int 432 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 433 u16 ctl2g, u16 ctl5g, 434 enum wmi_dfs_region dfs_reg) 435 { 436 struct sk_buff *skb; 437 438 if (!ar->wmi.ops->gen_pdev_set_rd) 439 return -EOPNOTSUPP; 440 441 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 442 dfs_reg); 443 if (IS_ERR(skb)) 444 return PTR_ERR(skb); 445 446 return ath10k_wmi_cmd_send(ar, skb, 447 ar->wmi.cmd->pdev_set_regdomain_cmdid); 448 } 449 450 static inline int 451 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 452 { 453 struct sk_buff *skb; 454 455 if (!ar->wmi.ops->gen_pdev_suspend) 456 return -EOPNOTSUPP; 457 458 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 459 if (IS_ERR(skb)) 460 return PTR_ERR(skb); 461 462 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 463 } 464 465 static inline int 466 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 467 { 468 struct sk_buff *skb; 469 470 if (!ar->wmi.ops->gen_pdev_resume) 471 return -EOPNOTSUPP; 472 473 skb = ar->wmi.ops->gen_pdev_resume(ar); 474 if (IS_ERR(skb)) 475 return PTR_ERR(skb); 476 477 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 478 } 479 480 static inline int 481 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 482 { 483 struct sk_buff *skb; 484 485 if (!ar->wmi.ops->gen_pdev_set_param) 486 return -EOPNOTSUPP; 487 488 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 489 if (IS_ERR(skb)) 490 return PTR_ERR(skb); 491 492 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 493 } 494 495 static inline int 496 ath10k_wmi_cmd_init(struct ath10k *ar) 497 { 498 struct sk_buff *skb; 499 500 if (!ar->wmi.ops->gen_init) 501 return -EOPNOTSUPP; 502 503 skb = ar->wmi.ops->gen_init(ar); 504 if (IS_ERR(skb)) 505 return PTR_ERR(skb); 506 507 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 508 } 509 510 static inline int 511 ath10k_wmi_start_scan(struct ath10k *ar, 512 const struct wmi_start_scan_arg *arg) 513 { 514 struct sk_buff *skb; 515 516 if (!ar->wmi.ops->gen_start_scan) 517 return -EOPNOTSUPP; 518 519 skb = ar->wmi.ops->gen_start_scan(ar, arg); 520 if (IS_ERR(skb)) 521 return PTR_ERR(skb); 522 523 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 524 } 525 526 static inline int 527 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 528 { 529 struct sk_buff *skb; 530 531 if (!ar->wmi.ops->gen_stop_scan) 532 return -EOPNOTSUPP; 533 534 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 535 if (IS_ERR(skb)) 536 return PTR_ERR(skb); 537 538 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 539 } 540 541 static inline int 542 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 543 enum wmi_vdev_type type, 544 enum wmi_vdev_subtype subtype, 545 const u8 macaddr[ETH_ALEN]) 546 { 547 struct sk_buff *skb; 548 549 if (!ar->wmi.ops->gen_vdev_create) 550 return -EOPNOTSUPP; 551 552 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 553 if (IS_ERR(skb)) 554 return PTR_ERR(skb); 555 556 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 557 } 558 559 static inline int 560 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 561 { 562 struct sk_buff *skb; 563 564 if (!ar->wmi.ops->gen_vdev_delete) 565 return -EOPNOTSUPP; 566 567 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 568 if (IS_ERR(skb)) 569 return PTR_ERR(skb); 570 571 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 572 } 573 574 static inline int 575 ath10k_wmi_vdev_start(struct ath10k *ar, 576 const struct wmi_vdev_start_request_arg *arg) 577 { 578 struct sk_buff *skb; 579 580 if (!ar->wmi.ops->gen_vdev_start) 581 return -EOPNOTSUPP; 582 583 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 584 if (IS_ERR(skb)) 585 return PTR_ERR(skb); 586 587 return ath10k_wmi_cmd_send(ar, skb, 588 ar->wmi.cmd->vdev_start_request_cmdid); 589 } 590 591 static inline int 592 ath10k_wmi_vdev_restart(struct ath10k *ar, 593 const struct wmi_vdev_start_request_arg *arg) 594 { 595 struct sk_buff *skb; 596 597 if (!ar->wmi.ops->gen_vdev_start) 598 return -EOPNOTSUPP; 599 600 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 601 if (IS_ERR(skb)) 602 return PTR_ERR(skb); 603 604 return ath10k_wmi_cmd_send(ar, skb, 605 ar->wmi.cmd->vdev_restart_request_cmdid); 606 } 607 608 static inline int 609 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 610 { 611 struct sk_buff *skb; 612 613 if (!ar->wmi.ops->gen_vdev_stop) 614 return -EOPNOTSUPP; 615 616 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 617 if (IS_ERR(skb)) 618 return PTR_ERR(skb); 619 620 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 621 } 622 623 static inline int 624 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 625 { 626 struct sk_buff *skb; 627 628 if (!ar->wmi.ops->gen_vdev_up) 629 return -EOPNOTSUPP; 630 631 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 632 if (IS_ERR(skb)) 633 return PTR_ERR(skb); 634 635 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 636 } 637 638 static inline int 639 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 640 { 641 struct sk_buff *skb; 642 643 if (!ar->wmi.ops->gen_vdev_down) 644 return -EOPNOTSUPP; 645 646 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 647 if (IS_ERR(skb)) 648 return PTR_ERR(skb); 649 650 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 651 } 652 653 static inline int 654 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 655 u32 param_value) 656 { 657 struct sk_buff *skb; 658 659 if (!ar->wmi.ops->gen_vdev_set_param) 660 return -EOPNOTSUPP; 661 662 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 663 param_value); 664 if (IS_ERR(skb)) 665 return PTR_ERR(skb); 666 667 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 668 } 669 670 static inline int 671 ath10k_wmi_vdev_install_key(struct ath10k *ar, 672 const struct wmi_vdev_install_key_arg *arg) 673 { 674 struct sk_buff *skb; 675 676 if (!ar->wmi.ops->gen_vdev_install_key) 677 return -EOPNOTSUPP; 678 679 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 680 if (IS_ERR(skb)) 681 return PTR_ERR(skb); 682 683 return ath10k_wmi_cmd_send(ar, skb, 684 ar->wmi.cmd->vdev_install_key_cmdid); 685 } 686 687 static inline int 688 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 689 const struct wmi_vdev_spectral_conf_arg *arg) 690 { 691 struct sk_buff *skb; 692 u32 cmd_id; 693 694 if (!ar->wmi.ops->gen_vdev_spectral_conf) 695 return -EOPNOTSUPP; 696 697 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 698 if (IS_ERR(skb)) 699 return PTR_ERR(skb); 700 701 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 702 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 703 } 704 705 static inline int 706 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 707 u32 enable) 708 { 709 struct sk_buff *skb; 710 u32 cmd_id; 711 712 if (!ar->wmi.ops->gen_vdev_spectral_enable) 713 return -EOPNOTSUPP; 714 715 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 716 enable); 717 if (IS_ERR(skb)) 718 return PTR_ERR(skb); 719 720 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 721 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 722 } 723 724 static inline int 725 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 726 const u8 peer_addr[ETH_ALEN], 727 const struct wmi_sta_uapsd_auto_trig_arg *args, 728 u32 num_ac) 729 { 730 struct sk_buff *skb; 731 u32 cmd_id; 732 733 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 734 return -EOPNOTSUPP; 735 736 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 737 num_ac); 738 if (IS_ERR(skb)) 739 return PTR_ERR(skb); 740 741 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 742 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 743 } 744 745 static inline int 746 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 747 const struct wmi_wmm_params_all_arg *arg) 748 { 749 struct sk_buff *skb; 750 u32 cmd_id; 751 752 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 753 if (IS_ERR(skb)) 754 return PTR_ERR(skb); 755 756 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 757 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 758 } 759 760 static inline int 761 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 762 const u8 peer_addr[ETH_ALEN], 763 enum wmi_peer_type peer_type) 764 { 765 struct sk_buff *skb; 766 767 if (!ar->wmi.ops->gen_peer_create) 768 return -EOPNOTSUPP; 769 770 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 771 if (IS_ERR(skb)) 772 return PTR_ERR(skb); 773 774 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 775 } 776 777 static inline int 778 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 779 const u8 peer_addr[ETH_ALEN]) 780 { 781 struct sk_buff *skb; 782 783 if (!ar->wmi.ops->gen_peer_delete) 784 return -EOPNOTSUPP; 785 786 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 787 if (IS_ERR(skb)) 788 return PTR_ERR(skb); 789 790 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 791 } 792 793 static inline int 794 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 795 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 796 { 797 struct sk_buff *skb; 798 799 if (!ar->wmi.ops->gen_peer_flush) 800 return -EOPNOTSUPP; 801 802 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 803 if (IS_ERR(skb)) 804 return PTR_ERR(skb); 805 806 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 807 } 808 809 static inline int 810 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 811 enum wmi_peer_param param_id, u32 param_value) 812 { 813 struct sk_buff *skb; 814 815 if (!ar->wmi.ops->gen_peer_set_param) 816 return -EOPNOTSUPP; 817 818 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 819 param_value); 820 if (IS_ERR(skb)) 821 return PTR_ERR(skb); 822 823 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 824 } 825 826 static inline int 827 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 828 enum wmi_sta_ps_mode psmode) 829 { 830 struct sk_buff *skb; 831 832 if (!ar->wmi.ops->gen_set_psmode) 833 return -EOPNOTSUPP; 834 835 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 836 if (IS_ERR(skb)) 837 return PTR_ERR(skb); 838 839 return ath10k_wmi_cmd_send(ar, skb, 840 ar->wmi.cmd->sta_powersave_mode_cmdid); 841 } 842 843 static inline int 844 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 845 enum wmi_sta_powersave_param param_id, u32 value) 846 { 847 struct sk_buff *skb; 848 849 if (!ar->wmi.ops->gen_set_sta_ps) 850 return -EOPNOTSUPP; 851 852 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 853 if (IS_ERR(skb)) 854 return PTR_ERR(skb); 855 856 return ath10k_wmi_cmd_send(ar, skb, 857 ar->wmi.cmd->sta_powersave_param_cmdid); 858 } 859 860 static inline int 861 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 862 enum wmi_ap_ps_peer_param param_id, u32 value) 863 { 864 struct sk_buff *skb; 865 866 if (!ar->wmi.ops->gen_set_ap_ps) 867 return -EOPNOTSUPP; 868 869 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 870 if (IS_ERR(skb)) 871 return PTR_ERR(skb); 872 873 return ath10k_wmi_cmd_send(ar, skb, 874 ar->wmi.cmd->ap_ps_peer_param_cmdid); 875 } 876 877 static inline int 878 ath10k_wmi_scan_chan_list(struct ath10k *ar, 879 const struct wmi_scan_chan_list_arg *arg) 880 { 881 struct sk_buff *skb; 882 883 if (!ar->wmi.ops->gen_scan_chan_list) 884 return -EOPNOTSUPP; 885 886 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 887 if (IS_ERR(skb)) 888 return PTR_ERR(skb); 889 890 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 891 } 892 893 static inline int 894 ath10k_wmi_peer_assoc(struct ath10k *ar, 895 const struct wmi_peer_assoc_complete_arg *arg) 896 { 897 struct sk_buff *skb; 898 899 if (!ar->wmi.ops->gen_peer_assoc) 900 return -EOPNOTSUPP; 901 902 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 903 if (IS_ERR(skb)) 904 return PTR_ERR(skb); 905 906 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 907 } 908 909 static inline int 910 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 911 const void *bcn, size_t bcn_len, 912 u32 bcn_paddr, bool dtim_zero, 913 bool deliver_cab) 914 { 915 struct sk_buff *skb; 916 int ret; 917 918 if (!ar->wmi.ops->gen_beacon_dma) 919 return -EOPNOTSUPP; 920 921 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 922 dtim_zero, deliver_cab); 923 if (IS_ERR(skb)) 924 return PTR_ERR(skb); 925 926 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 927 ar->wmi.cmd->pdev_send_bcn_cmdid); 928 if (ret) { 929 dev_kfree_skb(skb); 930 return ret; 931 } 932 933 return 0; 934 } 935 936 static inline int 937 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 938 const struct wmi_wmm_params_all_arg *arg) 939 { 940 struct sk_buff *skb; 941 942 if (!ar->wmi.ops->gen_pdev_set_wmm) 943 return -EOPNOTSUPP; 944 945 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 946 if (IS_ERR(skb)) 947 return PTR_ERR(skb); 948 949 return ath10k_wmi_cmd_send(ar, skb, 950 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 951 } 952 953 static inline int 954 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 955 { 956 struct sk_buff *skb; 957 958 if (!ar->wmi.ops->gen_request_stats) 959 return -EOPNOTSUPP; 960 961 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 962 if (IS_ERR(skb)) 963 return PTR_ERR(skb); 964 965 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 966 } 967 968 static inline int 969 ath10k_wmi_force_fw_hang(struct ath10k *ar, 970 enum wmi_force_fw_hang_type type, u32 delay_ms) 971 { 972 struct sk_buff *skb; 973 974 if (!ar->wmi.ops->gen_force_fw_hang) 975 return -EOPNOTSUPP; 976 977 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 978 if (IS_ERR(skb)) 979 return PTR_ERR(skb); 980 981 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 982 } 983 984 static inline int 985 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 986 { 987 struct sk_buff *skb; 988 989 if (!ar->wmi.ops->gen_dbglog_cfg) 990 return -EOPNOTSUPP; 991 992 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 993 if (IS_ERR(skb)) 994 return PTR_ERR(skb); 995 996 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 997 } 998 999 static inline int 1000 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1001 { 1002 struct sk_buff *skb; 1003 1004 if (!ar->wmi.ops->gen_pktlog_enable) 1005 return -EOPNOTSUPP; 1006 1007 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1008 if (IS_ERR(skb)) 1009 return PTR_ERR(skb); 1010 1011 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1012 } 1013 1014 static inline int 1015 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1016 { 1017 struct sk_buff *skb; 1018 1019 if (!ar->wmi.ops->gen_pktlog_disable) 1020 return -EOPNOTSUPP; 1021 1022 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1023 if (IS_ERR(skb)) 1024 return PTR_ERR(skb); 1025 1026 return ath10k_wmi_cmd_send(ar, skb, 1027 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1028 } 1029 1030 static inline int 1031 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1032 u32 next_offset, u32 enabled) 1033 { 1034 struct sk_buff *skb; 1035 1036 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1037 return -EOPNOTSUPP; 1038 1039 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1040 next_offset, enabled); 1041 if (IS_ERR(skb)) 1042 return PTR_ERR(skb); 1043 1044 return ath10k_wmi_cmd_send(ar, skb, 1045 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1046 } 1047 1048 static inline int 1049 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1050 { 1051 struct sk_buff *skb; 1052 1053 if (!ar->wmi.ops->gen_pdev_get_temperature) 1054 return -EOPNOTSUPP; 1055 1056 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1057 if (IS_ERR(skb)) 1058 return PTR_ERR(skb); 1059 1060 return ath10k_wmi_cmd_send(ar, skb, 1061 ar->wmi.cmd->pdev_get_temperature_cmdid); 1062 } 1063 1064 static inline int 1065 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1066 { 1067 struct sk_buff *skb; 1068 1069 if (!ar->wmi.ops->gen_addba_clear_resp) 1070 return -EOPNOTSUPP; 1071 1072 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1073 if (IS_ERR(skb)) 1074 return PTR_ERR(skb); 1075 1076 return ath10k_wmi_cmd_send(ar, skb, 1077 ar->wmi.cmd->addba_clear_resp_cmdid); 1078 } 1079 1080 static inline int 1081 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1082 u32 tid, u32 buf_size) 1083 { 1084 struct sk_buff *skb; 1085 1086 if (!ar->wmi.ops->gen_addba_send) 1087 return -EOPNOTSUPP; 1088 1089 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1090 if (IS_ERR(skb)) 1091 return PTR_ERR(skb); 1092 1093 return ath10k_wmi_cmd_send(ar, skb, 1094 ar->wmi.cmd->addba_send_cmdid); 1095 } 1096 1097 static inline int 1098 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1099 u32 tid, u32 status) 1100 { 1101 struct sk_buff *skb; 1102 1103 if (!ar->wmi.ops->gen_addba_set_resp) 1104 return -EOPNOTSUPP; 1105 1106 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1107 if (IS_ERR(skb)) 1108 return PTR_ERR(skb); 1109 1110 return ath10k_wmi_cmd_send(ar, skb, 1111 ar->wmi.cmd->addba_set_resp_cmdid); 1112 } 1113 1114 static inline int 1115 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1116 u32 tid, u32 initiator, u32 reason) 1117 { 1118 struct sk_buff *skb; 1119 1120 if (!ar->wmi.ops->gen_delba_send) 1121 return -EOPNOTSUPP; 1122 1123 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1124 reason); 1125 if (IS_ERR(skb)) 1126 return PTR_ERR(skb); 1127 1128 return ath10k_wmi_cmd_send(ar, skb, 1129 ar->wmi.cmd->delba_send_cmdid); 1130 } 1131 1132 static inline int 1133 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1134 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1135 void *prb_ies, size_t prb_ies_len) 1136 { 1137 struct sk_buff *skb; 1138 1139 if (!ar->wmi.ops->gen_bcn_tmpl) 1140 return -EOPNOTSUPP; 1141 1142 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1143 prb_caps, prb_erp, prb_ies, 1144 prb_ies_len); 1145 if (IS_ERR(skb)) 1146 return PTR_ERR(skb); 1147 1148 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1149 } 1150 1151 static inline int 1152 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1153 { 1154 struct sk_buff *skb; 1155 1156 if (!ar->wmi.ops->gen_prb_tmpl) 1157 return -EOPNOTSUPP; 1158 1159 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1160 if (IS_ERR(skb)) 1161 return PTR_ERR(skb); 1162 1163 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1164 } 1165 1166 static inline int 1167 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1168 { 1169 struct sk_buff *skb; 1170 1171 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1172 return -EOPNOTSUPP; 1173 1174 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1175 if (IS_ERR(skb)) 1176 return PTR_ERR(skb); 1177 1178 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1179 } 1180 1181 static inline int 1182 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1183 const struct wmi_sta_keepalive_arg *arg) 1184 { 1185 struct sk_buff *skb; 1186 u32 cmd_id; 1187 1188 if (!ar->wmi.ops->gen_sta_keepalive) 1189 return -EOPNOTSUPP; 1190 1191 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1192 if (IS_ERR(skb)) 1193 return PTR_ERR(skb); 1194 1195 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1196 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1197 } 1198 1199 static inline int 1200 ath10k_wmi_wow_enable(struct ath10k *ar) 1201 { 1202 struct sk_buff *skb; 1203 u32 cmd_id; 1204 1205 if (!ar->wmi.ops->gen_wow_enable) 1206 return -EOPNOTSUPP; 1207 1208 skb = ar->wmi.ops->gen_wow_enable(ar); 1209 if (IS_ERR(skb)) 1210 return PTR_ERR(skb); 1211 1212 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1213 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1214 } 1215 1216 static inline int 1217 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1218 enum wmi_wow_wakeup_event event, 1219 u32 enable) 1220 { 1221 struct sk_buff *skb; 1222 u32 cmd_id; 1223 1224 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1225 return -EOPNOTSUPP; 1226 1227 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1228 if (IS_ERR(skb)) 1229 return PTR_ERR(skb); 1230 1231 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1232 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1233 } 1234 1235 static inline int 1236 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1237 { 1238 struct sk_buff *skb; 1239 u32 cmd_id; 1240 1241 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1242 return -EOPNOTSUPP; 1243 1244 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1245 if (IS_ERR(skb)) 1246 return PTR_ERR(skb); 1247 1248 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1249 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1250 } 1251 1252 static inline int 1253 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1254 const u8 *pattern, const u8 *mask, 1255 int pattern_len, int pattern_offset) 1256 { 1257 struct sk_buff *skb; 1258 u32 cmd_id; 1259 1260 if (!ar->wmi.ops->gen_wow_add_pattern) 1261 return -EOPNOTSUPP; 1262 1263 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1264 pattern, mask, pattern_len, 1265 pattern_offset); 1266 if (IS_ERR(skb)) 1267 return PTR_ERR(skb); 1268 1269 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1270 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1271 } 1272 1273 static inline int 1274 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1275 { 1276 struct sk_buff *skb; 1277 u32 cmd_id; 1278 1279 if (!ar->wmi.ops->gen_wow_del_pattern) 1280 return -EOPNOTSUPP; 1281 1282 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1283 if (IS_ERR(skb)) 1284 return PTR_ERR(skb); 1285 1286 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1287 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1288 } 1289 1290 static inline int 1291 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1292 enum wmi_tdls_state state) 1293 { 1294 struct sk_buff *skb; 1295 1296 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1297 return -EOPNOTSUPP; 1298 1299 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1300 if (IS_ERR(skb)) 1301 return PTR_ERR(skb); 1302 1303 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1304 } 1305 1306 static inline int 1307 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1308 const struct wmi_tdls_peer_update_cmd_arg *arg, 1309 const struct wmi_tdls_peer_capab_arg *cap, 1310 const struct wmi_channel_arg *chan) 1311 { 1312 struct sk_buff *skb; 1313 1314 if (!ar->wmi.ops->gen_tdls_peer_update) 1315 return -EOPNOTSUPP; 1316 1317 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1318 if (IS_ERR(skb)) 1319 return PTR_ERR(skb); 1320 1321 return ath10k_wmi_cmd_send(ar, skb, 1322 ar->wmi.cmd->tdls_peer_update_cmdid); 1323 } 1324 1325 static inline int 1326 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1327 { 1328 struct sk_buff *skb; 1329 1330 if (!ar->wmi.ops->gen_adaptive_qcs) 1331 return -EOPNOTSUPP; 1332 1333 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1334 if (IS_ERR(skb)) 1335 return PTR_ERR(skb); 1336 1337 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1338 } 1339 1340 static inline int 1341 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1342 { 1343 struct sk_buff *skb; 1344 1345 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1346 return -EOPNOTSUPP; 1347 1348 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1349 1350 if (IS_ERR(skb)) 1351 return PTR_ERR(skb); 1352 1353 return ath10k_wmi_cmd_send(ar, skb, 1354 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1355 } 1356 1357 static inline int 1358 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1359 char *buf) 1360 { 1361 if (!ar->wmi.ops->fw_stats_fill) 1362 return -EOPNOTSUPP; 1363 1364 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1365 return 0; 1366 } 1367 1368 static inline int 1369 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1370 u32 detect_level, u32 detect_margin) 1371 { 1372 struct sk_buff *skb; 1373 1374 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1375 return -EOPNOTSUPP; 1376 1377 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1378 detect_level, 1379 detect_margin); 1380 1381 if (IS_ERR(skb)) 1382 return PTR_ERR(skb); 1383 1384 return ath10k_wmi_cmd_send(ar, skb, 1385 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1386 } 1387 1388 static inline int 1389 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1390 enum wmi_host_platform_type type, 1391 u32 fw_feature_bitmap) 1392 { 1393 struct sk_buff *skb; 1394 1395 if (!ar->wmi.ops->ext_resource_config) 1396 return -EOPNOTSUPP; 1397 1398 skb = ar->wmi.ops->ext_resource_config(ar, type, 1399 fw_feature_bitmap); 1400 1401 if (IS_ERR(skb)) 1402 return PTR_ERR(skb); 1403 1404 return ath10k_wmi_cmd_send(ar, skb, 1405 ar->wmi.cmd->ext_resource_cfg_cmdid); 1406 } 1407 1408 static inline int 1409 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1410 { 1411 if (!ar->wmi.ops->get_vdev_subtype) 1412 return -EOPNOTSUPP; 1413 1414 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1415 } 1416 1417 static inline int 1418 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1419 enum wmi_bss_survey_req_type type) 1420 { 1421 struct ath10k_wmi *wmi = &ar->wmi; 1422 struct sk_buff *skb; 1423 1424 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1425 return -EOPNOTSUPP; 1426 1427 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1428 if (IS_ERR(skb)) 1429 return PTR_ERR(skb); 1430 1431 return ath10k_wmi_cmd_send(ar, skb, 1432 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1433 } 1434 1435 static inline int 1436 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1437 { 1438 struct ath10k_wmi *wmi = &ar->wmi; 1439 struct sk_buff *skb; 1440 1441 if (!wmi->ops->gen_echo) 1442 return -EOPNOTSUPP; 1443 1444 skb = wmi->ops->gen_echo(ar, value); 1445 if (IS_ERR(skb)) 1446 return PTR_ERR(skb); 1447 1448 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1449 } 1450 1451 static inline int 1452 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1453 { 1454 struct sk_buff *skb; 1455 1456 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1457 return -EOPNOTSUPP; 1458 1459 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1460 1461 if (IS_ERR(skb)) 1462 return PTR_ERR(skb); 1463 1464 return ath10k_wmi_cmd_send(ar, skb, 1465 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1466 } 1467 1468 #endif 1469