1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #ifndef _WMI_OPS_H_ 20 #define _WMI_OPS_H_ 21 22 struct ath10k; 23 struct sk_buff; 24 25 struct wmi_ops { 26 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); 29 30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_scan_ev_arg *arg); 32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_mgmt_rx_ev_arg *arg); 34 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_ch_info_ev_arg *arg); 36 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_vdev_start_ev_arg *arg); 38 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_peer_kick_ev_arg *arg); 40 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_swba_ev_arg *arg); 42 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 43 struct wmi_phyerr_hdr_arg *arg); 44 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 45 int left_len, struct wmi_phyerr_ev_arg *arg); 46 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_svc_rdy_ev_arg *arg); 48 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 49 struct wmi_rdy_ev_arg *arg); 50 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 51 struct ath10k_fw_stats *stats); 52 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_roam_ev_arg *arg); 54 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_wow_ev_arg *arg); 56 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 57 struct wmi_echo_ev_arg *arg); 58 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, 59 struct wmi_dfs_status_ev_arg *arg); 60 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, 61 struct wmi_svc_avail_ev_arg *arg); 62 63 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 64 65 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 66 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 67 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 68 u16 rd5g, u16 ctl2g, u16 ctl5g, 69 enum wmi_dfs_region dfs_reg); 70 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 71 u32 value); 72 struct sk_buff *(*gen_init)(struct ath10k *ar); 73 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 74 const struct wmi_start_scan_arg *arg); 75 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 76 const struct wmi_stop_scan_arg *arg); 77 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 78 enum wmi_vdev_type type, 79 enum wmi_vdev_subtype subtype, 80 const u8 macaddr[ETH_ALEN]); 81 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 82 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 83 const struct wmi_vdev_start_request_arg *arg, 84 bool restart); 85 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 86 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 87 const u8 *bssid); 88 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 89 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 90 u32 param_id, u32 param_value); 91 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 92 const struct wmi_vdev_install_key_arg *arg); 93 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 94 const struct wmi_vdev_spectral_conf_arg *arg); 95 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 96 u32 trigger, u32 enable); 97 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 98 const struct wmi_wmm_params_all_arg *arg); 99 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 100 const u8 peer_addr[ETH_ALEN], 101 enum wmi_peer_type peer_type); 102 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 103 const u8 peer_addr[ETH_ALEN]); 104 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 105 const u8 peer_addr[ETH_ALEN], 106 u32 tid_bitmap); 107 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 108 const u8 *peer_addr, 109 enum wmi_peer_param param_id, 110 u32 param_value); 111 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 112 const struct wmi_peer_assoc_complete_arg *arg); 113 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 114 enum wmi_sta_ps_mode psmode); 115 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 116 enum wmi_sta_powersave_param param_id, 117 u32 value); 118 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 119 const u8 *mac, 120 enum wmi_ap_ps_peer_param param_id, 121 u32 value); 122 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 123 const struct wmi_scan_chan_list_arg *arg); 124 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, 125 u32 prob_req_oui); 126 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 127 const void *bcn, size_t bcn_len, 128 u32 bcn_paddr, bool dtim_zero, 129 bool deliver_cab); 130 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 131 const struct wmi_wmm_params_all_arg *arg); 132 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 133 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 134 enum wmi_force_fw_hang_type type, 135 u32 delay_ms); 136 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 137 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar, 138 struct sk_buff *skb, 139 dma_addr_t paddr); 140 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 141 u32 log_level); 142 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 143 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 144 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 145 u32 period, u32 duration, 146 u32 next_offset, 147 u32 enabled); 148 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 149 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 150 const u8 *mac); 151 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 152 const u8 *mac, u32 tid, u32 buf_size); 153 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 154 const u8 *mac, u32 tid, 155 u32 status); 156 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 157 const u8 *mac, u32 tid, u32 initiator, 158 u32 reason); 159 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 160 u32 tim_ie_offset, struct sk_buff *bcn, 161 u32 prb_caps, u32 prb_erp, 162 void *prb_ies, size_t prb_ies_len); 163 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 164 struct sk_buff *bcn); 165 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 166 const u8 *p2p_ie); 167 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 168 const u8 peer_addr[ETH_ALEN], 169 const struct wmi_sta_uapsd_auto_trig_arg *args, 170 u32 num_ac); 171 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 172 const struct wmi_sta_keepalive_arg *arg); 173 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 174 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 175 enum wmi_wow_wakeup_event event, 176 u32 enable); 177 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 178 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 179 u32 pattern_id, 180 const u8 *pattern, 181 const u8 *mask, 182 int pattern_len, 183 int pattern_offset); 184 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 185 u32 pattern_id); 186 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 187 u32 vdev_id, 188 enum wmi_tdls_state state); 189 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 190 const struct wmi_tdls_peer_update_cmd_arg *arg, 191 const struct wmi_tdls_peer_capab_arg *cap, 192 const struct wmi_channel_arg *chan); 193 struct sk_buff *(*gen_radar_found) 194 (struct ath10k *ar, 195 const struct ath10k_radar_found_info *arg); 196 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 197 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 198 u32 param); 199 void (*fw_stats_fill)(struct ath10k *ar, 200 struct ath10k_fw_stats *fw_stats, 201 char *buf); 202 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 203 u8 enable, 204 u32 detect_level, 205 u32 detect_margin); 206 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 207 enum wmi_host_platform_type type, 208 u32 fw_feature_bitmap); 209 int (*get_vdev_subtype)(struct ath10k *ar, 210 enum wmi_vdev_subtype subtype); 211 struct sk_buff *(*gen_pdev_bss_chan_info_req) 212 (struct ath10k *ar, 213 enum wmi_bss_survey_req_type type); 214 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 215 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, 216 u32 param); 217 218 }; 219 220 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 221 222 static inline int 223 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 224 { 225 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 226 return -EOPNOTSUPP; 227 228 ar->wmi.ops->rx(ar, skb); 229 return 0; 230 } 231 232 static inline int 233 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 234 size_t len) 235 { 236 if (!ar->wmi.ops->map_svc) 237 return -EOPNOTSUPP; 238 239 ar->wmi.ops->map_svc(in, out, len); 240 return 0; 241 } 242 243 static inline int 244 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, 245 size_t len) 246 { 247 if (!ar->wmi.ops->map_svc_ext) 248 return -EOPNOTSUPP; 249 250 ar->wmi.ops->map_svc_ext(in, out, len); 251 return 0; 252 } 253 254 static inline int 255 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 256 struct wmi_scan_ev_arg *arg) 257 { 258 if (!ar->wmi.ops->pull_scan) 259 return -EOPNOTSUPP; 260 261 return ar->wmi.ops->pull_scan(ar, skb, arg); 262 } 263 264 static inline int 265 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 266 struct wmi_mgmt_rx_ev_arg *arg) 267 { 268 if (!ar->wmi.ops->pull_mgmt_rx) 269 return -EOPNOTSUPP; 270 271 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 272 } 273 274 static inline int 275 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 276 struct wmi_ch_info_ev_arg *arg) 277 { 278 if (!ar->wmi.ops->pull_ch_info) 279 return -EOPNOTSUPP; 280 281 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 282 } 283 284 static inline int 285 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 286 struct wmi_vdev_start_ev_arg *arg) 287 { 288 if (!ar->wmi.ops->pull_vdev_start) 289 return -EOPNOTSUPP; 290 291 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 292 } 293 294 static inline int 295 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 296 struct wmi_peer_kick_ev_arg *arg) 297 { 298 if (!ar->wmi.ops->pull_peer_kick) 299 return -EOPNOTSUPP; 300 301 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 302 } 303 304 static inline int 305 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 306 struct wmi_swba_ev_arg *arg) 307 { 308 if (!ar->wmi.ops->pull_swba) 309 return -EOPNOTSUPP; 310 311 return ar->wmi.ops->pull_swba(ar, skb, arg); 312 } 313 314 static inline int 315 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 316 struct wmi_phyerr_hdr_arg *arg) 317 { 318 if (!ar->wmi.ops->pull_phyerr_hdr) 319 return -EOPNOTSUPP; 320 321 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 322 } 323 324 static inline int 325 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 326 int left_len, struct wmi_phyerr_ev_arg *arg) 327 { 328 if (!ar->wmi.ops->pull_phyerr) 329 return -EOPNOTSUPP; 330 331 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 332 } 333 334 static inline int 335 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 336 struct wmi_svc_rdy_ev_arg *arg) 337 { 338 if (!ar->wmi.ops->pull_svc_rdy) 339 return -EOPNOTSUPP; 340 341 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 342 } 343 344 static inline int 345 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 346 struct wmi_rdy_ev_arg *arg) 347 { 348 if (!ar->wmi.ops->pull_rdy) 349 return -EOPNOTSUPP; 350 351 return ar->wmi.ops->pull_rdy(ar, skb, arg); 352 } 353 354 static inline int 355 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, 356 struct wmi_svc_avail_ev_arg *arg) 357 { 358 if (!ar->wmi.ops->pull_svc_avail) 359 return -EOPNOTSUPP; 360 return ar->wmi.ops->pull_svc_avail(ar, skb, arg); 361 } 362 363 static inline int 364 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 365 struct ath10k_fw_stats *stats) 366 { 367 if (!ar->wmi.ops->pull_fw_stats) 368 return -EOPNOTSUPP; 369 370 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 371 } 372 373 static inline int 374 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 375 struct wmi_roam_ev_arg *arg) 376 { 377 if (!ar->wmi.ops->pull_roam_ev) 378 return -EOPNOTSUPP; 379 380 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 381 } 382 383 static inline int 384 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 385 struct wmi_wow_ev_arg *arg) 386 { 387 if (!ar->wmi.ops->pull_wow_event) 388 return -EOPNOTSUPP; 389 390 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 391 } 392 393 static inline int 394 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 395 struct wmi_echo_ev_arg *arg) 396 { 397 if (!ar->wmi.ops->pull_echo_ev) 398 return -EOPNOTSUPP; 399 400 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 401 } 402 403 static inline int 404 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, 405 struct wmi_dfs_status_ev_arg *arg) 406 { 407 if (!ar->wmi.ops->pull_dfs_status_ev) 408 return -EOPNOTSUPP; 409 410 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); 411 } 412 413 static inline enum wmi_txbf_conf 414 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 415 { 416 if (!ar->wmi.ops->get_txbf_conf_scheme) 417 return WMI_TXBF_CONF_UNSUPPORTED; 418 419 return ar->wmi.ops->get_txbf_conf_scheme(ar); 420 } 421 422 static inline int 423 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 424 dma_addr_t paddr) 425 { 426 struct sk_buff *skb; 427 int ret; 428 429 if (!ar->wmi.ops->gen_mgmt_tx_send) 430 return -EOPNOTSUPP; 431 432 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr); 433 if (IS_ERR(skb)) 434 return PTR_ERR(skb); 435 436 ret = ath10k_wmi_cmd_send(ar, skb, 437 ar->wmi.cmd->mgmt_tx_send_cmdid); 438 if (ret) 439 return ret; 440 441 return 0; 442 } 443 444 static inline int 445 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 446 { 447 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 448 struct sk_buff *skb; 449 int ret; 450 451 if (!ar->wmi.ops->gen_mgmt_tx) 452 return -EOPNOTSUPP; 453 454 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 455 if (IS_ERR(skb)) 456 return PTR_ERR(skb); 457 458 ret = ath10k_wmi_cmd_send(ar, skb, 459 ar->wmi.cmd->mgmt_tx_cmdid); 460 if (ret) 461 return ret; 462 463 /* FIXME There's no ACK event for Management Tx. This probably 464 * shouldn't be called here either. 465 */ 466 info->flags |= IEEE80211_TX_STAT_ACK; 467 ieee80211_tx_status_irqsafe(ar->hw, msdu); 468 469 return 0; 470 } 471 472 static inline int 473 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 474 u16 ctl2g, u16 ctl5g, 475 enum wmi_dfs_region dfs_reg) 476 { 477 struct sk_buff *skb; 478 479 if (!ar->wmi.ops->gen_pdev_set_rd) 480 return -EOPNOTSUPP; 481 482 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 483 dfs_reg); 484 if (IS_ERR(skb)) 485 return PTR_ERR(skb); 486 487 return ath10k_wmi_cmd_send(ar, skb, 488 ar->wmi.cmd->pdev_set_regdomain_cmdid); 489 } 490 491 static inline int 492 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 493 { 494 struct sk_buff *skb; 495 496 if (!ar->wmi.ops->gen_pdev_suspend) 497 return -EOPNOTSUPP; 498 499 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 500 if (IS_ERR(skb)) 501 return PTR_ERR(skb); 502 503 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 504 } 505 506 static inline int 507 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 508 { 509 struct sk_buff *skb; 510 511 if (!ar->wmi.ops->gen_pdev_resume) 512 return -EOPNOTSUPP; 513 514 skb = ar->wmi.ops->gen_pdev_resume(ar); 515 if (IS_ERR(skb)) 516 return PTR_ERR(skb); 517 518 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 519 } 520 521 static inline int 522 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 523 { 524 struct sk_buff *skb; 525 526 if (!ar->wmi.ops->gen_pdev_set_param) 527 return -EOPNOTSUPP; 528 529 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 530 if (IS_ERR(skb)) 531 return PTR_ERR(skb); 532 533 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 534 } 535 536 static inline int 537 ath10k_wmi_cmd_init(struct ath10k *ar) 538 { 539 struct sk_buff *skb; 540 541 if (!ar->wmi.ops->gen_init) 542 return -EOPNOTSUPP; 543 544 skb = ar->wmi.ops->gen_init(ar); 545 if (IS_ERR(skb)) 546 return PTR_ERR(skb); 547 548 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 549 } 550 551 static inline int 552 ath10k_wmi_start_scan(struct ath10k *ar, 553 const struct wmi_start_scan_arg *arg) 554 { 555 struct sk_buff *skb; 556 557 if (!ar->wmi.ops->gen_start_scan) 558 return -EOPNOTSUPP; 559 560 skb = ar->wmi.ops->gen_start_scan(ar, arg); 561 if (IS_ERR(skb)) 562 return PTR_ERR(skb); 563 564 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 565 } 566 567 static inline int 568 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 569 { 570 struct sk_buff *skb; 571 572 if (!ar->wmi.ops->gen_stop_scan) 573 return -EOPNOTSUPP; 574 575 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 576 if (IS_ERR(skb)) 577 return PTR_ERR(skb); 578 579 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 580 } 581 582 static inline int 583 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 584 enum wmi_vdev_type type, 585 enum wmi_vdev_subtype subtype, 586 const u8 macaddr[ETH_ALEN]) 587 { 588 struct sk_buff *skb; 589 590 if (!ar->wmi.ops->gen_vdev_create) 591 return -EOPNOTSUPP; 592 593 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 594 if (IS_ERR(skb)) 595 return PTR_ERR(skb); 596 597 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 598 } 599 600 static inline int 601 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 602 { 603 struct sk_buff *skb; 604 605 if (!ar->wmi.ops->gen_vdev_delete) 606 return -EOPNOTSUPP; 607 608 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 609 if (IS_ERR(skb)) 610 return PTR_ERR(skb); 611 612 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 613 } 614 615 static inline int 616 ath10k_wmi_vdev_start(struct ath10k *ar, 617 const struct wmi_vdev_start_request_arg *arg) 618 { 619 struct sk_buff *skb; 620 621 if (!ar->wmi.ops->gen_vdev_start) 622 return -EOPNOTSUPP; 623 624 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 625 if (IS_ERR(skb)) 626 return PTR_ERR(skb); 627 628 return ath10k_wmi_cmd_send(ar, skb, 629 ar->wmi.cmd->vdev_start_request_cmdid); 630 } 631 632 static inline int 633 ath10k_wmi_vdev_restart(struct ath10k *ar, 634 const struct wmi_vdev_start_request_arg *arg) 635 { 636 struct sk_buff *skb; 637 638 if (!ar->wmi.ops->gen_vdev_start) 639 return -EOPNOTSUPP; 640 641 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 642 if (IS_ERR(skb)) 643 return PTR_ERR(skb); 644 645 return ath10k_wmi_cmd_send(ar, skb, 646 ar->wmi.cmd->vdev_restart_request_cmdid); 647 } 648 649 static inline int 650 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 651 { 652 struct sk_buff *skb; 653 654 if (!ar->wmi.ops->gen_vdev_stop) 655 return -EOPNOTSUPP; 656 657 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 658 if (IS_ERR(skb)) 659 return PTR_ERR(skb); 660 661 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 662 } 663 664 static inline int 665 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 666 { 667 struct sk_buff *skb; 668 669 if (!ar->wmi.ops->gen_vdev_up) 670 return -EOPNOTSUPP; 671 672 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 673 if (IS_ERR(skb)) 674 return PTR_ERR(skb); 675 676 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 677 } 678 679 static inline int 680 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 681 { 682 struct sk_buff *skb; 683 684 if (!ar->wmi.ops->gen_vdev_down) 685 return -EOPNOTSUPP; 686 687 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 688 if (IS_ERR(skb)) 689 return PTR_ERR(skb); 690 691 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 692 } 693 694 static inline int 695 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 696 u32 param_value) 697 { 698 struct sk_buff *skb; 699 700 if (!ar->wmi.ops->gen_vdev_set_param) 701 return -EOPNOTSUPP; 702 703 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 704 param_value); 705 if (IS_ERR(skb)) 706 return PTR_ERR(skb); 707 708 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 709 } 710 711 static inline int 712 ath10k_wmi_vdev_install_key(struct ath10k *ar, 713 const struct wmi_vdev_install_key_arg *arg) 714 { 715 struct sk_buff *skb; 716 717 if (!ar->wmi.ops->gen_vdev_install_key) 718 return -EOPNOTSUPP; 719 720 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 721 if (IS_ERR(skb)) 722 return PTR_ERR(skb); 723 724 return ath10k_wmi_cmd_send(ar, skb, 725 ar->wmi.cmd->vdev_install_key_cmdid); 726 } 727 728 static inline int 729 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 730 const struct wmi_vdev_spectral_conf_arg *arg) 731 { 732 struct sk_buff *skb; 733 u32 cmd_id; 734 735 if (!ar->wmi.ops->gen_vdev_spectral_conf) 736 return -EOPNOTSUPP; 737 738 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 739 if (IS_ERR(skb)) 740 return PTR_ERR(skb); 741 742 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 743 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 744 } 745 746 static inline int 747 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 748 u32 enable) 749 { 750 struct sk_buff *skb; 751 u32 cmd_id; 752 753 if (!ar->wmi.ops->gen_vdev_spectral_enable) 754 return -EOPNOTSUPP; 755 756 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 757 enable); 758 if (IS_ERR(skb)) 759 return PTR_ERR(skb); 760 761 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 762 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 763 } 764 765 static inline int 766 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 767 const u8 peer_addr[ETH_ALEN], 768 const struct wmi_sta_uapsd_auto_trig_arg *args, 769 u32 num_ac) 770 { 771 struct sk_buff *skb; 772 u32 cmd_id; 773 774 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 775 return -EOPNOTSUPP; 776 777 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 778 num_ac); 779 if (IS_ERR(skb)) 780 return PTR_ERR(skb); 781 782 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 783 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 784 } 785 786 static inline int 787 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 788 const struct wmi_wmm_params_all_arg *arg) 789 { 790 struct sk_buff *skb; 791 u32 cmd_id; 792 793 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 794 if (IS_ERR(skb)) 795 return PTR_ERR(skb); 796 797 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 798 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 799 } 800 801 static inline int 802 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 803 const u8 peer_addr[ETH_ALEN], 804 enum wmi_peer_type peer_type) 805 { 806 struct sk_buff *skb; 807 808 if (!ar->wmi.ops->gen_peer_create) 809 return -EOPNOTSUPP; 810 811 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 812 if (IS_ERR(skb)) 813 return PTR_ERR(skb); 814 815 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 816 } 817 818 static inline int 819 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 820 const u8 peer_addr[ETH_ALEN]) 821 { 822 struct sk_buff *skb; 823 824 if (!ar->wmi.ops->gen_peer_delete) 825 return -EOPNOTSUPP; 826 827 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 828 if (IS_ERR(skb)) 829 return PTR_ERR(skb); 830 831 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 832 } 833 834 static inline int 835 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 836 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 837 { 838 struct sk_buff *skb; 839 840 if (!ar->wmi.ops->gen_peer_flush) 841 return -EOPNOTSUPP; 842 843 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 844 if (IS_ERR(skb)) 845 return PTR_ERR(skb); 846 847 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 848 } 849 850 static inline int 851 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 852 enum wmi_peer_param param_id, u32 param_value) 853 { 854 struct sk_buff *skb; 855 856 if (!ar->wmi.ops->gen_peer_set_param) 857 return -EOPNOTSUPP; 858 859 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 860 param_value); 861 if (IS_ERR(skb)) 862 return PTR_ERR(skb); 863 864 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 865 } 866 867 static inline int 868 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 869 enum wmi_sta_ps_mode psmode) 870 { 871 struct sk_buff *skb; 872 873 if (!ar->wmi.ops->gen_set_psmode) 874 return -EOPNOTSUPP; 875 876 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 877 if (IS_ERR(skb)) 878 return PTR_ERR(skb); 879 880 return ath10k_wmi_cmd_send(ar, skb, 881 ar->wmi.cmd->sta_powersave_mode_cmdid); 882 } 883 884 static inline int 885 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 886 enum wmi_sta_powersave_param param_id, u32 value) 887 { 888 struct sk_buff *skb; 889 890 if (!ar->wmi.ops->gen_set_sta_ps) 891 return -EOPNOTSUPP; 892 893 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 894 if (IS_ERR(skb)) 895 return PTR_ERR(skb); 896 897 return ath10k_wmi_cmd_send(ar, skb, 898 ar->wmi.cmd->sta_powersave_param_cmdid); 899 } 900 901 static inline int 902 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 903 enum wmi_ap_ps_peer_param param_id, u32 value) 904 { 905 struct sk_buff *skb; 906 907 if (!ar->wmi.ops->gen_set_ap_ps) 908 return -EOPNOTSUPP; 909 910 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 911 if (IS_ERR(skb)) 912 return PTR_ERR(skb); 913 914 return ath10k_wmi_cmd_send(ar, skb, 915 ar->wmi.cmd->ap_ps_peer_param_cmdid); 916 } 917 918 static inline int 919 ath10k_wmi_scan_chan_list(struct ath10k *ar, 920 const struct wmi_scan_chan_list_arg *arg) 921 { 922 struct sk_buff *skb; 923 924 if (!ar->wmi.ops->gen_scan_chan_list) 925 return -EOPNOTSUPP; 926 927 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 928 if (IS_ERR(skb)) 929 return PTR_ERR(skb); 930 931 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 932 } 933 934 static inline int 935 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) 936 { 937 struct sk_buff *skb; 938 u32 prob_req_oui; 939 940 prob_req_oui = (((u32)mac_addr[0]) << 16) | 941 (((u32)mac_addr[1]) << 8) | mac_addr[2]; 942 943 if (!ar->wmi.ops->gen_scan_prob_req_oui) 944 return -EOPNOTSUPP; 945 946 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); 947 if (IS_ERR(skb)) 948 return PTR_ERR(skb); 949 950 return ath10k_wmi_cmd_send(ar, skb, 951 ar->wmi.cmd->scan_prob_req_oui_cmdid); 952 } 953 954 static inline int 955 ath10k_wmi_peer_assoc(struct ath10k *ar, 956 const struct wmi_peer_assoc_complete_arg *arg) 957 { 958 struct sk_buff *skb; 959 960 if (!ar->wmi.ops->gen_peer_assoc) 961 return -EOPNOTSUPP; 962 963 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 964 if (IS_ERR(skb)) 965 return PTR_ERR(skb); 966 967 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 968 } 969 970 static inline int 971 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 972 const void *bcn, size_t bcn_len, 973 u32 bcn_paddr, bool dtim_zero, 974 bool deliver_cab) 975 { 976 struct sk_buff *skb; 977 int ret; 978 979 if (!ar->wmi.ops->gen_beacon_dma) 980 return -EOPNOTSUPP; 981 982 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 983 dtim_zero, deliver_cab); 984 if (IS_ERR(skb)) 985 return PTR_ERR(skb); 986 987 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 988 ar->wmi.cmd->pdev_send_bcn_cmdid); 989 if (ret) { 990 dev_kfree_skb(skb); 991 return ret; 992 } 993 994 return 0; 995 } 996 997 static inline int 998 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 999 const struct wmi_wmm_params_all_arg *arg) 1000 { 1001 struct sk_buff *skb; 1002 1003 if (!ar->wmi.ops->gen_pdev_set_wmm) 1004 return -EOPNOTSUPP; 1005 1006 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 1007 if (IS_ERR(skb)) 1008 return PTR_ERR(skb); 1009 1010 return ath10k_wmi_cmd_send(ar, skb, 1011 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 1012 } 1013 1014 static inline int 1015 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 1016 { 1017 struct sk_buff *skb; 1018 1019 if (!ar->wmi.ops->gen_request_stats) 1020 return -EOPNOTSUPP; 1021 1022 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 1023 if (IS_ERR(skb)) 1024 return PTR_ERR(skb); 1025 1026 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 1027 } 1028 1029 static inline int 1030 ath10k_wmi_force_fw_hang(struct ath10k *ar, 1031 enum wmi_force_fw_hang_type type, u32 delay_ms) 1032 { 1033 struct sk_buff *skb; 1034 1035 if (!ar->wmi.ops->gen_force_fw_hang) 1036 return -EOPNOTSUPP; 1037 1038 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 1039 if (IS_ERR(skb)) 1040 return PTR_ERR(skb); 1041 1042 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 1043 } 1044 1045 static inline int 1046 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 1047 { 1048 struct sk_buff *skb; 1049 1050 if (!ar->wmi.ops->gen_dbglog_cfg) 1051 return -EOPNOTSUPP; 1052 1053 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 1054 if (IS_ERR(skb)) 1055 return PTR_ERR(skb); 1056 1057 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 1058 } 1059 1060 static inline int 1061 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 1062 { 1063 struct sk_buff *skb; 1064 1065 if (!ar->wmi.ops->gen_pktlog_enable) 1066 return -EOPNOTSUPP; 1067 1068 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 1069 if (IS_ERR(skb)) 1070 return PTR_ERR(skb); 1071 1072 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 1073 } 1074 1075 static inline int 1076 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 1077 { 1078 struct sk_buff *skb; 1079 1080 if (!ar->wmi.ops->gen_pktlog_disable) 1081 return -EOPNOTSUPP; 1082 1083 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1084 if (IS_ERR(skb)) 1085 return PTR_ERR(skb); 1086 1087 return ath10k_wmi_cmd_send(ar, skb, 1088 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1089 } 1090 1091 static inline int 1092 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1093 u32 next_offset, u32 enabled) 1094 { 1095 struct sk_buff *skb; 1096 1097 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1098 return -EOPNOTSUPP; 1099 1100 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1101 next_offset, enabled); 1102 if (IS_ERR(skb)) 1103 return PTR_ERR(skb); 1104 1105 return ath10k_wmi_cmd_send(ar, skb, 1106 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1107 } 1108 1109 static inline int 1110 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1111 { 1112 struct sk_buff *skb; 1113 1114 if (!ar->wmi.ops->gen_pdev_get_temperature) 1115 return -EOPNOTSUPP; 1116 1117 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1118 if (IS_ERR(skb)) 1119 return PTR_ERR(skb); 1120 1121 return ath10k_wmi_cmd_send(ar, skb, 1122 ar->wmi.cmd->pdev_get_temperature_cmdid); 1123 } 1124 1125 static inline int 1126 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1127 { 1128 struct sk_buff *skb; 1129 1130 if (!ar->wmi.ops->gen_addba_clear_resp) 1131 return -EOPNOTSUPP; 1132 1133 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1134 if (IS_ERR(skb)) 1135 return PTR_ERR(skb); 1136 1137 return ath10k_wmi_cmd_send(ar, skb, 1138 ar->wmi.cmd->addba_clear_resp_cmdid); 1139 } 1140 1141 static inline int 1142 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1143 u32 tid, u32 buf_size) 1144 { 1145 struct sk_buff *skb; 1146 1147 if (!ar->wmi.ops->gen_addba_send) 1148 return -EOPNOTSUPP; 1149 1150 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1151 if (IS_ERR(skb)) 1152 return PTR_ERR(skb); 1153 1154 return ath10k_wmi_cmd_send(ar, skb, 1155 ar->wmi.cmd->addba_send_cmdid); 1156 } 1157 1158 static inline int 1159 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1160 u32 tid, u32 status) 1161 { 1162 struct sk_buff *skb; 1163 1164 if (!ar->wmi.ops->gen_addba_set_resp) 1165 return -EOPNOTSUPP; 1166 1167 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1168 if (IS_ERR(skb)) 1169 return PTR_ERR(skb); 1170 1171 return ath10k_wmi_cmd_send(ar, skb, 1172 ar->wmi.cmd->addba_set_resp_cmdid); 1173 } 1174 1175 static inline int 1176 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1177 u32 tid, u32 initiator, u32 reason) 1178 { 1179 struct sk_buff *skb; 1180 1181 if (!ar->wmi.ops->gen_delba_send) 1182 return -EOPNOTSUPP; 1183 1184 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1185 reason); 1186 if (IS_ERR(skb)) 1187 return PTR_ERR(skb); 1188 1189 return ath10k_wmi_cmd_send(ar, skb, 1190 ar->wmi.cmd->delba_send_cmdid); 1191 } 1192 1193 static inline int 1194 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1195 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1196 void *prb_ies, size_t prb_ies_len) 1197 { 1198 struct sk_buff *skb; 1199 1200 if (!ar->wmi.ops->gen_bcn_tmpl) 1201 return -EOPNOTSUPP; 1202 1203 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1204 prb_caps, prb_erp, prb_ies, 1205 prb_ies_len); 1206 if (IS_ERR(skb)) 1207 return PTR_ERR(skb); 1208 1209 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1210 } 1211 1212 static inline int 1213 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1214 { 1215 struct sk_buff *skb; 1216 1217 if (!ar->wmi.ops->gen_prb_tmpl) 1218 return -EOPNOTSUPP; 1219 1220 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1221 if (IS_ERR(skb)) 1222 return PTR_ERR(skb); 1223 1224 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1225 } 1226 1227 static inline int 1228 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1229 { 1230 struct sk_buff *skb; 1231 1232 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1233 return -EOPNOTSUPP; 1234 1235 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1236 if (IS_ERR(skb)) 1237 return PTR_ERR(skb); 1238 1239 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1240 } 1241 1242 static inline int 1243 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1244 const struct wmi_sta_keepalive_arg *arg) 1245 { 1246 struct sk_buff *skb; 1247 u32 cmd_id; 1248 1249 if (!ar->wmi.ops->gen_sta_keepalive) 1250 return -EOPNOTSUPP; 1251 1252 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1253 if (IS_ERR(skb)) 1254 return PTR_ERR(skb); 1255 1256 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1257 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1258 } 1259 1260 static inline int 1261 ath10k_wmi_wow_enable(struct ath10k *ar) 1262 { 1263 struct sk_buff *skb; 1264 u32 cmd_id; 1265 1266 if (!ar->wmi.ops->gen_wow_enable) 1267 return -EOPNOTSUPP; 1268 1269 skb = ar->wmi.ops->gen_wow_enable(ar); 1270 if (IS_ERR(skb)) 1271 return PTR_ERR(skb); 1272 1273 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1274 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1275 } 1276 1277 static inline int 1278 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1279 enum wmi_wow_wakeup_event event, 1280 u32 enable) 1281 { 1282 struct sk_buff *skb; 1283 u32 cmd_id; 1284 1285 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1286 return -EOPNOTSUPP; 1287 1288 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1289 if (IS_ERR(skb)) 1290 return PTR_ERR(skb); 1291 1292 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1293 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1294 } 1295 1296 static inline int 1297 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1298 { 1299 struct sk_buff *skb; 1300 u32 cmd_id; 1301 1302 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1303 return -EOPNOTSUPP; 1304 1305 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1306 if (IS_ERR(skb)) 1307 return PTR_ERR(skb); 1308 1309 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1310 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1311 } 1312 1313 static inline int 1314 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1315 const u8 *pattern, const u8 *mask, 1316 int pattern_len, int pattern_offset) 1317 { 1318 struct sk_buff *skb; 1319 u32 cmd_id; 1320 1321 if (!ar->wmi.ops->gen_wow_add_pattern) 1322 return -EOPNOTSUPP; 1323 1324 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1325 pattern, mask, pattern_len, 1326 pattern_offset); 1327 if (IS_ERR(skb)) 1328 return PTR_ERR(skb); 1329 1330 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1331 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1332 } 1333 1334 static inline int 1335 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1336 { 1337 struct sk_buff *skb; 1338 u32 cmd_id; 1339 1340 if (!ar->wmi.ops->gen_wow_del_pattern) 1341 return -EOPNOTSUPP; 1342 1343 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1344 if (IS_ERR(skb)) 1345 return PTR_ERR(skb); 1346 1347 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1348 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1349 } 1350 1351 static inline int 1352 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1353 enum wmi_tdls_state state) 1354 { 1355 struct sk_buff *skb; 1356 1357 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1358 return -EOPNOTSUPP; 1359 1360 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1361 if (IS_ERR(skb)) 1362 return PTR_ERR(skb); 1363 1364 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1365 } 1366 1367 static inline int 1368 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1369 const struct wmi_tdls_peer_update_cmd_arg *arg, 1370 const struct wmi_tdls_peer_capab_arg *cap, 1371 const struct wmi_channel_arg *chan) 1372 { 1373 struct sk_buff *skb; 1374 1375 if (!ar->wmi.ops->gen_tdls_peer_update) 1376 return -EOPNOTSUPP; 1377 1378 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1379 if (IS_ERR(skb)) 1380 return PTR_ERR(skb); 1381 1382 return ath10k_wmi_cmd_send(ar, skb, 1383 ar->wmi.cmd->tdls_peer_update_cmdid); 1384 } 1385 1386 static inline int 1387 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1388 { 1389 struct sk_buff *skb; 1390 1391 if (!ar->wmi.ops->gen_adaptive_qcs) 1392 return -EOPNOTSUPP; 1393 1394 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1395 if (IS_ERR(skb)) 1396 return PTR_ERR(skb); 1397 1398 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1399 } 1400 1401 static inline int 1402 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1403 { 1404 struct sk_buff *skb; 1405 1406 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1407 return -EOPNOTSUPP; 1408 1409 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1410 1411 if (IS_ERR(skb)) 1412 return PTR_ERR(skb); 1413 1414 return ath10k_wmi_cmd_send(ar, skb, 1415 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1416 } 1417 1418 static inline int 1419 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1420 char *buf) 1421 { 1422 if (!ar->wmi.ops->fw_stats_fill) 1423 return -EOPNOTSUPP; 1424 1425 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1426 return 0; 1427 } 1428 1429 static inline int 1430 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1431 u32 detect_level, u32 detect_margin) 1432 { 1433 struct sk_buff *skb; 1434 1435 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1436 return -EOPNOTSUPP; 1437 1438 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1439 detect_level, 1440 detect_margin); 1441 1442 if (IS_ERR(skb)) 1443 return PTR_ERR(skb); 1444 1445 return ath10k_wmi_cmd_send(ar, skb, 1446 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1447 } 1448 1449 static inline int 1450 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1451 enum wmi_host_platform_type type, 1452 u32 fw_feature_bitmap) 1453 { 1454 struct sk_buff *skb; 1455 1456 if (!ar->wmi.ops->ext_resource_config) 1457 return -EOPNOTSUPP; 1458 1459 skb = ar->wmi.ops->ext_resource_config(ar, type, 1460 fw_feature_bitmap); 1461 1462 if (IS_ERR(skb)) 1463 return PTR_ERR(skb); 1464 1465 return ath10k_wmi_cmd_send(ar, skb, 1466 ar->wmi.cmd->ext_resource_cfg_cmdid); 1467 } 1468 1469 static inline int 1470 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1471 { 1472 if (!ar->wmi.ops->get_vdev_subtype) 1473 return -EOPNOTSUPP; 1474 1475 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1476 } 1477 1478 static inline int 1479 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1480 enum wmi_bss_survey_req_type type) 1481 { 1482 struct ath10k_wmi *wmi = &ar->wmi; 1483 struct sk_buff *skb; 1484 1485 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1486 return -EOPNOTSUPP; 1487 1488 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1489 if (IS_ERR(skb)) 1490 return PTR_ERR(skb); 1491 1492 return ath10k_wmi_cmd_send(ar, skb, 1493 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1494 } 1495 1496 static inline int 1497 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1498 { 1499 struct ath10k_wmi *wmi = &ar->wmi; 1500 struct sk_buff *skb; 1501 1502 if (!wmi->ops->gen_echo) 1503 return -EOPNOTSUPP; 1504 1505 skb = wmi->ops->gen_echo(ar, value); 1506 if (IS_ERR(skb)) 1507 return PTR_ERR(skb); 1508 1509 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1510 } 1511 1512 static inline int 1513 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) 1514 { 1515 struct sk_buff *skb; 1516 1517 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid) 1518 return -EOPNOTSUPP; 1519 1520 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param); 1521 1522 if (IS_ERR(skb)) 1523 return PTR_ERR(skb); 1524 1525 return ath10k_wmi_cmd_send(ar, skb, 1526 ar->wmi.cmd->pdev_get_tpc_table_cmdid); 1527 } 1528 1529 static inline int 1530 ath10k_wmi_report_radar_found(struct ath10k *ar, 1531 const struct ath10k_radar_found_info *arg) 1532 { 1533 struct sk_buff *skb; 1534 1535 if (!ar->wmi.ops->gen_radar_found) 1536 return -EOPNOTSUPP; 1537 1538 skb = ar->wmi.ops->gen_radar_found(ar, arg); 1539 if (IS_ERR(skb)) 1540 return PTR_ERR(skb); 1541 1542 return ath10k_wmi_cmd_send(ar, skb, 1543 ar->wmi.cmd->radar_found_cmdid); 1544 } 1545 1546 #endif 1547