1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _WMI_OPS_H_ 19 #define _WMI_OPS_H_ 20 21 struct ath10k; 22 struct sk_buff; 23 24 struct wmi_ops { 25 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 27 28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 29 struct wmi_scan_ev_arg *arg); 30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_mgmt_rx_ev_arg *arg); 32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_ch_info_ev_arg *arg); 34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_vdev_start_ev_arg *arg); 36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_peer_kick_ev_arg *arg); 38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_swba_ev_arg *arg); 40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_phyerr_hdr_arg *arg); 42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 43 int left_len, struct wmi_phyerr_ev_arg *arg); 44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_svc_rdy_ev_arg *arg); 46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_rdy_ev_arg *arg); 48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 49 struct ath10k_fw_stats *stats); 50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_roam_ev_arg *arg); 52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_wow_ev_arg *arg); 54 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, 55 struct wmi_echo_ev_arg *arg); 56 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 57 58 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 59 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 60 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 61 u16 rd5g, u16 ctl2g, u16 ctl5g, 62 enum wmi_dfs_region dfs_reg); 63 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 64 u32 value); 65 struct sk_buff *(*gen_init)(struct ath10k *ar); 66 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 67 const struct wmi_start_scan_arg *arg); 68 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 69 const struct wmi_stop_scan_arg *arg); 70 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 71 enum wmi_vdev_type type, 72 enum wmi_vdev_subtype subtype, 73 const u8 macaddr[ETH_ALEN]); 74 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 75 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 76 const struct wmi_vdev_start_request_arg *arg, 77 bool restart); 78 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 79 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 80 const u8 *bssid); 81 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 82 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 83 u32 param_id, u32 param_value); 84 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 85 const struct wmi_vdev_install_key_arg *arg); 86 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 87 const struct wmi_vdev_spectral_conf_arg *arg); 88 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 89 u32 trigger, u32 enable); 90 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 91 const struct wmi_wmm_params_all_arg *arg); 92 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 93 const u8 peer_addr[ETH_ALEN], 94 enum wmi_peer_type peer_type); 95 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 96 const u8 peer_addr[ETH_ALEN]); 97 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 98 const u8 peer_addr[ETH_ALEN], 99 u32 tid_bitmap); 100 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 101 const u8 *peer_addr, 102 enum wmi_peer_param param_id, 103 u32 param_value); 104 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 105 const struct wmi_peer_assoc_complete_arg *arg); 106 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 107 enum wmi_sta_ps_mode psmode); 108 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 109 enum wmi_sta_powersave_param param_id, 110 u32 value); 111 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 112 const u8 *mac, 113 enum wmi_ap_ps_peer_param param_id, 114 u32 value); 115 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 116 const struct wmi_scan_chan_list_arg *arg); 117 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 118 const void *bcn, size_t bcn_len, 119 u32 bcn_paddr, bool dtim_zero, 120 bool deliver_cab); 121 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 122 const struct wmi_wmm_params_all_arg *arg); 123 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 124 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 125 enum wmi_force_fw_hang_type type, 126 u32 delay_ms); 127 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 128 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable, 129 u32 log_level); 130 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 131 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 132 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 133 u32 period, u32 duration, 134 u32 next_offset, 135 u32 enabled); 136 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 137 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 138 const u8 *mac); 139 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 140 const u8 *mac, u32 tid, u32 buf_size); 141 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 142 const u8 *mac, u32 tid, 143 u32 status); 144 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 145 const u8 *mac, u32 tid, u32 initiator, 146 u32 reason); 147 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 148 u32 tim_ie_offset, struct sk_buff *bcn, 149 u32 prb_caps, u32 prb_erp, 150 void *prb_ies, size_t prb_ies_len); 151 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 152 struct sk_buff *bcn); 153 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 154 const u8 *p2p_ie); 155 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 156 const u8 peer_addr[ETH_ALEN], 157 const struct wmi_sta_uapsd_auto_trig_arg *args, 158 u32 num_ac); 159 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 160 const struct wmi_sta_keepalive_arg *arg); 161 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 162 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 163 enum wmi_wow_wakeup_event event, 164 u32 enable); 165 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 166 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 167 u32 pattern_id, 168 const u8 *pattern, 169 const u8 *mask, 170 int pattern_len, 171 int pattern_offset); 172 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 173 u32 pattern_id); 174 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 175 u32 vdev_id, 176 enum wmi_tdls_state state); 177 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 178 const struct wmi_tdls_peer_update_cmd_arg *arg, 179 const struct wmi_tdls_peer_capab_arg *cap, 180 const struct wmi_channel_arg *chan); 181 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 182 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 183 u32 param); 184 void (*fw_stats_fill)(struct ath10k *ar, 185 struct ath10k_fw_stats *fw_stats, 186 char *buf); 187 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 188 u8 enable, 189 u32 detect_level, 190 u32 detect_margin); 191 struct sk_buff *(*ext_resource_config)(struct ath10k *ar, 192 enum wmi_host_platform_type type, 193 u32 fw_feature_bitmap); 194 int (*get_vdev_subtype)(struct ath10k *ar, 195 enum wmi_vdev_subtype subtype); 196 struct sk_buff *(*gen_pdev_bss_chan_info_req) 197 (struct ath10k *ar, 198 enum wmi_bss_survey_req_type type); 199 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); 200 }; 201 202 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 203 204 static inline int 205 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 206 { 207 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 208 return -EOPNOTSUPP; 209 210 ar->wmi.ops->rx(ar, skb); 211 return 0; 212 } 213 214 static inline int 215 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 216 size_t len) 217 { 218 if (!ar->wmi.ops->map_svc) 219 return -EOPNOTSUPP; 220 221 ar->wmi.ops->map_svc(in, out, len); 222 return 0; 223 } 224 225 static inline int 226 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 227 struct wmi_scan_ev_arg *arg) 228 { 229 if (!ar->wmi.ops->pull_scan) 230 return -EOPNOTSUPP; 231 232 return ar->wmi.ops->pull_scan(ar, skb, arg); 233 } 234 235 static inline int 236 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 237 struct wmi_mgmt_rx_ev_arg *arg) 238 { 239 if (!ar->wmi.ops->pull_mgmt_rx) 240 return -EOPNOTSUPP; 241 242 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 243 } 244 245 static inline int 246 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 247 struct wmi_ch_info_ev_arg *arg) 248 { 249 if (!ar->wmi.ops->pull_ch_info) 250 return -EOPNOTSUPP; 251 252 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 253 } 254 255 static inline int 256 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 257 struct wmi_vdev_start_ev_arg *arg) 258 { 259 if (!ar->wmi.ops->pull_vdev_start) 260 return -EOPNOTSUPP; 261 262 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 263 } 264 265 static inline int 266 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 267 struct wmi_peer_kick_ev_arg *arg) 268 { 269 if (!ar->wmi.ops->pull_peer_kick) 270 return -EOPNOTSUPP; 271 272 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 273 } 274 275 static inline int 276 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 277 struct wmi_swba_ev_arg *arg) 278 { 279 if (!ar->wmi.ops->pull_swba) 280 return -EOPNOTSUPP; 281 282 return ar->wmi.ops->pull_swba(ar, skb, arg); 283 } 284 285 static inline int 286 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 287 struct wmi_phyerr_hdr_arg *arg) 288 { 289 if (!ar->wmi.ops->pull_phyerr_hdr) 290 return -EOPNOTSUPP; 291 292 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 293 } 294 295 static inline int 296 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 297 int left_len, struct wmi_phyerr_ev_arg *arg) 298 { 299 if (!ar->wmi.ops->pull_phyerr) 300 return -EOPNOTSUPP; 301 302 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 303 } 304 305 static inline int 306 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 307 struct wmi_svc_rdy_ev_arg *arg) 308 { 309 if (!ar->wmi.ops->pull_svc_rdy) 310 return -EOPNOTSUPP; 311 312 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 313 } 314 315 static inline int 316 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 317 struct wmi_rdy_ev_arg *arg) 318 { 319 if (!ar->wmi.ops->pull_rdy) 320 return -EOPNOTSUPP; 321 322 return ar->wmi.ops->pull_rdy(ar, skb, arg); 323 } 324 325 static inline int 326 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 327 struct ath10k_fw_stats *stats) 328 { 329 if (!ar->wmi.ops->pull_fw_stats) 330 return -EOPNOTSUPP; 331 332 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 333 } 334 335 static inline int 336 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 337 struct wmi_roam_ev_arg *arg) 338 { 339 if (!ar->wmi.ops->pull_roam_ev) 340 return -EOPNOTSUPP; 341 342 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 343 } 344 345 static inline int 346 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 347 struct wmi_wow_ev_arg *arg) 348 { 349 if (!ar->wmi.ops->pull_wow_event) 350 return -EOPNOTSUPP; 351 352 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 353 } 354 355 static inline int 356 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, 357 struct wmi_echo_ev_arg *arg) 358 { 359 if (!ar->wmi.ops->pull_echo_ev) 360 return -EOPNOTSUPP; 361 362 return ar->wmi.ops->pull_echo_ev(ar, skb, arg); 363 } 364 365 static inline enum wmi_txbf_conf 366 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 367 { 368 if (!ar->wmi.ops->get_txbf_conf_scheme) 369 return WMI_TXBF_CONF_UNSUPPORTED; 370 371 return ar->wmi.ops->get_txbf_conf_scheme(ar); 372 } 373 374 static inline int 375 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 376 { 377 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 378 struct sk_buff *skb; 379 int ret; 380 u32 mgmt_tx_cmdid; 381 382 if (!ar->wmi.ops->gen_mgmt_tx) 383 return -EOPNOTSUPP; 384 385 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 386 if (IS_ERR(skb)) 387 return PTR_ERR(skb); 388 389 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 390 ar->running_fw->fw_file.fw_features)) 391 mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid; 392 else 393 mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid; 394 395 ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid); 396 if (ret) 397 return ret; 398 399 /* FIXME There's no ACK event for Management Tx. This probably 400 * shouldn't be called here either. 401 */ 402 info->flags |= IEEE80211_TX_STAT_ACK; 403 ieee80211_tx_status_irqsafe(ar->hw, msdu); 404 405 return 0; 406 } 407 408 static inline int 409 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 410 u16 ctl2g, u16 ctl5g, 411 enum wmi_dfs_region dfs_reg) 412 { 413 struct sk_buff *skb; 414 415 if (!ar->wmi.ops->gen_pdev_set_rd) 416 return -EOPNOTSUPP; 417 418 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 419 dfs_reg); 420 if (IS_ERR(skb)) 421 return PTR_ERR(skb); 422 423 return ath10k_wmi_cmd_send(ar, skb, 424 ar->wmi.cmd->pdev_set_regdomain_cmdid); 425 } 426 427 static inline int 428 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 429 { 430 struct sk_buff *skb; 431 432 if (!ar->wmi.ops->gen_pdev_suspend) 433 return -EOPNOTSUPP; 434 435 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 436 if (IS_ERR(skb)) 437 return PTR_ERR(skb); 438 439 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 440 } 441 442 static inline int 443 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 444 { 445 struct sk_buff *skb; 446 447 if (!ar->wmi.ops->gen_pdev_resume) 448 return -EOPNOTSUPP; 449 450 skb = ar->wmi.ops->gen_pdev_resume(ar); 451 if (IS_ERR(skb)) 452 return PTR_ERR(skb); 453 454 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 455 } 456 457 static inline int 458 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 459 { 460 struct sk_buff *skb; 461 462 if (!ar->wmi.ops->gen_pdev_set_param) 463 return -EOPNOTSUPP; 464 465 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 466 if (IS_ERR(skb)) 467 return PTR_ERR(skb); 468 469 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 470 } 471 472 static inline int 473 ath10k_wmi_cmd_init(struct ath10k *ar) 474 { 475 struct sk_buff *skb; 476 477 if (!ar->wmi.ops->gen_init) 478 return -EOPNOTSUPP; 479 480 skb = ar->wmi.ops->gen_init(ar); 481 if (IS_ERR(skb)) 482 return PTR_ERR(skb); 483 484 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 485 } 486 487 static inline int 488 ath10k_wmi_start_scan(struct ath10k *ar, 489 const struct wmi_start_scan_arg *arg) 490 { 491 struct sk_buff *skb; 492 493 if (!ar->wmi.ops->gen_start_scan) 494 return -EOPNOTSUPP; 495 496 skb = ar->wmi.ops->gen_start_scan(ar, arg); 497 if (IS_ERR(skb)) 498 return PTR_ERR(skb); 499 500 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 501 } 502 503 static inline int 504 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 505 { 506 struct sk_buff *skb; 507 508 if (!ar->wmi.ops->gen_stop_scan) 509 return -EOPNOTSUPP; 510 511 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 512 if (IS_ERR(skb)) 513 return PTR_ERR(skb); 514 515 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 516 } 517 518 static inline int 519 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 520 enum wmi_vdev_type type, 521 enum wmi_vdev_subtype subtype, 522 const u8 macaddr[ETH_ALEN]) 523 { 524 struct sk_buff *skb; 525 526 if (!ar->wmi.ops->gen_vdev_create) 527 return -EOPNOTSUPP; 528 529 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 530 if (IS_ERR(skb)) 531 return PTR_ERR(skb); 532 533 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 534 } 535 536 static inline int 537 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 538 { 539 struct sk_buff *skb; 540 541 if (!ar->wmi.ops->gen_vdev_delete) 542 return -EOPNOTSUPP; 543 544 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 545 if (IS_ERR(skb)) 546 return PTR_ERR(skb); 547 548 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 549 } 550 551 static inline int 552 ath10k_wmi_vdev_start(struct ath10k *ar, 553 const struct wmi_vdev_start_request_arg *arg) 554 { 555 struct sk_buff *skb; 556 557 if (!ar->wmi.ops->gen_vdev_start) 558 return -EOPNOTSUPP; 559 560 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 561 if (IS_ERR(skb)) 562 return PTR_ERR(skb); 563 564 return ath10k_wmi_cmd_send(ar, skb, 565 ar->wmi.cmd->vdev_start_request_cmdid); 566 } 567 568 static inline int 569 ath10k_wmi_vdev_restart(struct ath10k *ar, 570 const struct wmi_vdev_start_request_arg *arg) 571 { 572 struct sk_buff *skb; 573 574 if (!ar->wmi.ops->gen_vdev_start) 575 return -EOPNOTSUPP; 576 577 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 578 if (IS_ERR(skb)) 579 return PTR_ERR(skb); 580 581 return ath10k_wmi_cmd_send(ar, skb, 582 ar->wmi.cmd->vdev_restart_request_cmdid); 583 } 584 585 static inline int 586 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 587 { 588 struct sk_buff *skb; 589 590 if (!ar->wmi.ops->gen_vdev_stop) 591 return -EOPNOTSUPP; 592 593 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 594 if (IS_ERR(skb)) 595 return PTR_ERR(skb); 596 597 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 598 } 599 600 static inline int 601 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 602 { 603 struct sk_buff *skb; 604 605 if (!ar->wmi.ops->gen_vdev_up) 606 return -EOPNOTSUPP; 607 608 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 609 if (IS_ERR(skb)) 610 return PTR_ERR(skb); 611 612 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 613 } 614 615 static inline int 616 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 617 { 618 struct sk_buff *skb; 619 620 if (!ar->wmi.ops->gen_vdev_down) 621 return -EOPNOTSUPP; 622 623 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 624 if (IS_ERR(skb)) 625 return PTR_ERR(skb); 626 627 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 628 } 629 630 static inline int 631 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 632 u32 param_value) 633 { 634 struct sk_buff *skb; 635 636 if (!ar->wmi.ops->gen_vdev_set_param) 637 return -EOPNOTSUPP; 638 639 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 640 param_value); 641 if (IS_ERR(skb)) 642 return PTR_ERR(skb); 643 644 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 645 } 646 647 static inline int 648 ath10k_wmi_vdev_install_key(struct ath10k *ar, 649 const struct wmi_vdev_install_key_arg *arg) 650 { 651 struct sk_buff *skb; 652 653 if (!ar->wmi.ops->gen_vdev_install_key) 654 return -EOPNOTSUPP; 655 656 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 657 if (IS_ERR(skb)) 658 return PTR_ERR(skb); 659 660 return ath10k_wmi_cmd_send(ar, skb, 661 ar->wmi.cmd->vdev_install_key_cmdid); 662 } 663 664 static inline int 665 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 666 const struct wmi_vdev_spectral_conf_arg *arg) 667 { 668 struct sk_buff *skb; 669 u32 cmd_id; 670 671 if (!ar->wmi.ops->gen_vdev_spectral_conf) 672 return -EOPNOTSUPP; 673 674 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 675 if (IS_ERR(skb)) 676 return PTR_ERR(skb); 677 678 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 679 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 680 } 681 682 static inline int 683 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 684 u32 enable) 685 { 686 struct sk_buff *skb; 687 u32 cmd_id; 688 689 if (!ar->wmi.ops->gen_vdev_spectral_enable) 690 return -EOPNOTSUPP; 691 692 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 693 enable); 694 if (IS_ERR(skb)) 695 return PTR_ERR(skb); 696 697 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 698 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 699 } 700 701 static inline int 702 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 703 const u8 peer_addr[ETH_ALEN], 704 const struct wmi_sta_uapsd_auto_trig_arg *args, 705 u32 num_ac) 706 { 707 struct sk_buff *skb; 708 u32 cmd_id; 709 710 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 711 return -EOPNOTSUPP; 712 713 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 714 num_ac); 715 if (IS_ERR(skb)) 716 return PTR_ERR(skb); 717 718 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 719 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 720 } 721 722 static inline int 723 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 724 const struct wmi_wmm_params_all_arg *arg) 725 { 726 struct sk_buff *skb; 727 u32 cmd_id; 728 729 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 730 if (IS_ERR(skb)) 731 return PTR_ERR(skb); 732 733 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 734 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 735 } 736 737 static inline int 738 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 739 const u8 peer_addr[ETH_ALEN], 740 enum wmi_peer_type peer_type) 741 { 742 struct sk_buff *skb; 743 744 if (!ar->wmi.ops->gen_peer_create) 745 return -EOPNOTSUPP; 746 747 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 748 if (IS_ERR(skb)) 749 return PTR_ERR(skb); 750 751 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 752 } 753 754 static inline int 755 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 756 const u8 peer_addr[ETH_ALEN]) 757 { 758 struct sk_buff *skb; 759 760 if (!ar->wmi.ops->gen_peer_delete) 761 return -EOPNOTSUPP; 762 763 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 764 if (IS_ERR(skb)) 765 return PTR_ERR(skb); 766 767 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 768 } 769 770 static inline int 771 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 772 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 773 { 774 struct sk_buff *skb; 775 776 if (!ar->wmi.ops->gen_peer_flush) 777 return -EOPNOTSUPP; 778 779 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 780 if (IS_ERR(skb)) 781 return PTR_ERR(skb); 782 783 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 784 } 785 786 static inline int 787 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 788 enum wmi_peer_param param_id, u32 param_value) 789 { 790 struct sk_buff *skb; 791 792 if (!ar->wmi.ops->gen_peer_set_param) 793 return -EOPNOTSUPP; 794 795 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 796 param_value); 797 if (IS_ERR(skb)) 798 return PTR_ERR(skb); 799 800 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 801 } 802 803 static inline int 804 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 805 enum wmi_sta_ps_mode psmode) 806 { 807 struct sk_buff *skb; 808 809 if (!ar->wmi.ops->gen_set_psmode) 810 return -EOPNOTSUPP; 811 812 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 813 if (IS_ERR(skb)) 814 return PTR_ERR(skb); 815 816 return ath10k_wmi_cmd_send(ar, skb, 817 ar->wmi.cmd->sta_powersave_mode_cmdid); 818 } 819 820 static inline int 821 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 822 enum wmi_sta_powersave_param param_id, u32 value) 823 { 824 struct sk_buff *skb; 825 826 if (!ar->wmi.ops->gen_set_sta_ps) 827 return -EOPNOTSUPP; 828 829 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 830 if (IS_ERR(skb)) 831 return PTR_ERR(skb); 832 833 return ath10k_wmi_cmd_send(ar, skb, 834 ar->wmi.cmd->sta_powersave_param_cmdid); 835 } 836 837 static inline int 838 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 839 enum wmi_ap_ps_peer_param param_id, u32 value) 840 { 841 struct sk_buff *skb; 842 843 if (!ar->wmi.ops->gen_set_ap_ps) 844 return -EOPNOTSUPP; 845 846 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 847 if (IS_ERR(skb)) 848 return PTR_ERR(skb); 849 850 return ath10k_wmi_cmd_send(ar, skb, 851 ar->wmi.cmd->ap_ps_peer_param_cmdid); 852 } 853 854 static inline int 855 ath10k_wmi_scan_chan_list(struct ath10k *ar, 856 const struct wmi_scan_chan_list_arg *arg) 857 { 858 struct sk_buff *skb; 859 860 if (!ar->wmi.ops->gen_scan_chan_list) 861 return -EOPNOTSUPP; 862 863 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 864 if (IS_ERR(skb)) 865 return PTR_ERR(skb); 866 867 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 868 } 869 870 static inline int 871 ath10k_wmi_peer_assoc(struct ath10k *ar, 872 const struct wmi_peer_assoc_complete_arg *arg) 873 { 874 struct sk_buff *skb; 875 876 if (!ar->wmi.ops->gen_peer_assoc) 877 return -EOPNOTSUPP; 878 879 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 880 if (IS_ERR(skb)) 881 return PTR_ERR(skb); 882 883 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 884 } 885 886 static inline int 887 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 888 const void *bcn, size_t bcn_len, 889 u32 bcn_paddr, bool dtim_zero, 890 bool deliver_cab) 891 { 892 struct sk_buff *skb; 893 int ret; 894 895 if (!ar->wmi.ops->gen_beacon_dma) 896 return -EOPNOTSUPP; 897 898 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 899 dtim_zero, deliver_cab); 900 if (IS_ERR(skb)) 901 return PTR_ERR(skb); 902 903 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 904 ar->wmi.cmd->pdev_send_bcn_cmdid); 905 if (ret) { 906 dev_kfree_skb(skb); 907 return ret; 908 } 909 910 return 0; 911 } 912 913 static inline int 914 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 915 const struct wmi_wmm_params_all_arg *arg) 916 { 917 struct sk_buff *skb; 918 919 if (!ar->wmi.ops->gen_pdev_set_wmm) 920 return -EOPNOTSUPP; 921 922 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 923 if (IS_ERR(skb)) 924 return PTR_ERR(skb); 925 926 return ath10k_wmi_cmd_send(ar, skb, 927 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 928 } 929 930 static inline int 931 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 932 { 933 struct sk_buff *skb; 934 935 if (!ar->wmi.ops->gen_request_stats) 936 return -EOPNOTSUPP; 937 938 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 939 if (IS_ERR(skb)) 940 return PTR_ERR(skb); 941 942 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 943 } 944 945 static inline int 946 ath10k_wmi_force_fw_hang(struct ath10k *ar, 947 enum wmi_force_fw_hang_type type, u32 delay_ms) 948 { 949 struct sk_buff *skb; 950 951 if (!ar->wmi.ops->gen_force_fw_hang) 952 return -EOPNOTSUPP; 953 954 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 955 if (IS_ERR(skb)) 956 return PTR_ERR(skb); 957 958 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 959 } 960 961 static inline int 962 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) 963 { 964 struct sk_buff *skb; 965 966 if (!ar->wmi.ops->gen_dbglog_cfg) 967 return -EOPNOTSUPP; 968 969 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 970 if (IS_ERR(skb)) 971 return PTR_ERR(skb); 972 973 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 974 } 975 976 static inline int 977 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 978 { 979 struct sk_buff *skb; 980 981 if (!ar->wmi.ops->gen_pktlog_enable) 982 return -EOPNOTSUPP; 983 984 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 985 if (IS_ERR(skb)) 986 return PTR_ERR(skb); 987 988 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 989 } 990 991 static inline int 992 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 993 { 994 struct sk_buff *skb; 995 996 if (!ar->wmi.ops->gen_pktlog_disable) 997 return -EOPNOTSUPP; 998 999 skb = ar->wmi.ops->gen_pktlog_disable(ar); 1000 if (IS_ERR(skb)) 1001 return PTR_ERR(skb); 1002 1003 return ath10k_wmi_cmd_send(ar, skb, 1004 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 1005 } 1006 1007 static inline int 1008 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 1009 u32 next_offset, u32 enabled) 1010 { 1011 struct sk_buff *skb; 1012 1013 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 1014 return -EOPNOTSUPP; 1015 1016 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 1017 next_offset, enabled); 1018 if (IS_ERR(skb)) 1019 return PTR_ERR(skb); 1020 1021 return ath10k_wmi_cmd_send(ar, skb, 1022 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 1023 } 1024 1025 static inline int 1026 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 1027 { 1028 struct sk_buff *skb; 1029 1030 if (!ar->wmi.ops->gen_pdev_get_temperature) 1031 return -EOPNOTSUPP; 1032 1033 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1034 if (IS_ERR(skb)) 1035 return PTR_ERR(skb); 1036 1037 return ath10k_wmi_cmd_send(ar, skb, 1038 ar->wmi.cmd->pdev_get_temperature_cmdid); 1039 } 1040 1041 static inline int 1042 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1043 { 1044 struct sk_buff *skb; 1045 1046 if (!ar->wmi.ops->gen_addba_clear_resp) 1047 return -EOPNOTSUPP; 1048 1049 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1050 if (IS_ERR(skb)) 1051 return PTR_ERR(skb); 1052 1053 return ath10k_wmi_cmd_send(ar, skb, 1054 ar->wmi.cmd->addba_clear_resp_cmdid); 1055 } 1056 1057 static inline int 1058 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1059 u32 tid, u32 buf_size) 1060 { 1061 struct sk_buff *skb; 1062 1063 if (!ar->wmi.ops->gen_addba_send) 1064 return -EOPNOTSUPP; 1065 1066 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1067 if (IS_ERR(skb)) 1068 return PTR_ERR(skb); 1069 1070 return ath10k_wmi_cmd_send(ar, skb, 1071 ar->wmi.cmd->addba_send_cmdid); 1072 } 1073 1074 static inline int 1075 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1076 u32 tid, u32 status) 1077 { 1078 struct sk_buff *skb; 1079 1080 if (!ar->wmi.ops->gen_addba_set_resp) 1081 return -EOPNOTSUPP; 1082 1083 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1084 if (IS_ERR(skb)) 1085 return PTR_ERR(skb); 1086 1087 return ath10k_wmi_cmd_send(ar, skb, 1088 ar->wmi.cmd->addba_set_resp_cmdid); 1089 } 1090 1091 static inline int 1092 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1093 u32 tid, u32 initiator, u32 reason) 1094 { 1095 struct sk_buff *skb; 1096 1097 if (!ar->wmi.ops->gen_delba_send) 1098 return -EOPNOTSUPP; 1099 1100 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1101 reason); 1102 if (IS_ERR(skb)) 1103 return PTR_ERR(skb); 1104 1105 return ath10k_wmi_cmd_send(ar, skb, 1106 ar->wmi.cmd->delba_send_cmdid); 1107 } 1108 1109 static inline int 1110 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1111 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1112 void *prb_ies, size_t prb_ies_len) 1113 { 1114 struct sk_buff *skb; 1115 1116 if (!ar->wmi.ops->gen_bcn_tmpl) 1117 return -EOPNOTSUPP; 1118 1119 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1120 prb_caps, prb_erp, prb_ies, 1121 prb_ies_len); 1122 if (IS_ERR(skb)) 1123 return PTR_ERR(skb); 1124 1125 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1126 } 1127 1128 static inline int 1129 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1130 { 1131 struct sk_buff *skb; 1132 1133 if (!ar->wmi.ops->gen_prb_tmpl) 1134 return -EOPNOTSUPP; 1135 1136 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1137 if (IS_ERR(skb)) 1138 return PTR_ERR(skb); 1139 1140 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1141 } 1142 1143 static inline int 1144 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1145 { 1146 struct sk_buff *skb; 1147 1148 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1149 return -EOPNOTSUPP; 1150 1151 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1152 if (IS_ERR(skb)) 1153 return PTR_ERR(skb); 1154 1155 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1156 } 1157 1158 static inline int 1159 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1160 const struct wmi_sta_keepalive_arg *arg) 1161 { 1162 struct sk_buff *skb; 1163 u32 cmd_id; 1164 1165 if (!ar->wmi.ops->gen_sta_keepalive) 1166 return -EOPNOTSUPP; 1167 1168 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1169 if (IS_ERR(skb)) 1170 return PTR_ERR(skb); 1171 1172 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1173 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1174 } 1175 1176 static inline int 1177 ath10k_wmi_wow_enable(struct ath10k *ar) 1178 { 1179 struct sk_buff *skb; 1180 u32 cmd_id; 1181 1182 if (!ar->wmi.ops->gen_wow_enable) 1183 return -EOPNOTSUPP; 1184 1185 skb = ar->wmi.ops->gen_wow_enable(ar); 1186 if (IS_ERR(skb)) 1187 return PTR_ERR(skb); 1188 1189 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1190 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1191 } 1192 1193 static inline int 1194 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1195 enum wmi_wow_wakeup_event event, 1196 u32 enable) 1197 { 1198 struct sk_buff *skb; 1199 u32 cmd_id; 1200 1201 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1202 return -EOPNOTSUPP; 1203 1204 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1205 if (IS_ERR(skb)) 1206 return PTR_ERR(skb); 1207 1208 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1209 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1210 } 1211 1212 static inline int 1213 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1214 { 1215 struct sk_buff *skb; 1216 u32 cmd_id; 1217 1218 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1219 return -EOPNOTSUPP; 1220 1221 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1222 if (IS_ERR(skb)) 1223 return PTR_ERR(skb); 1224 1225 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1226 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1227 } 1228 1229 static inline int 1230 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1231 const u8 *pattern, const u8 *mask, 1232 int pattern_len, int pattern_offset) 1233 { 1234 struct sk_buff *skb; 1235 u32 cmd_id; 1236 1237 if (!ar->wmi.ops->gen_wow_add_pattern) 1238 return -EOPNOTSUPP; 1239 1240 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1241 pattern, mask, pattern_len, 1242 pattern_offset); 1243 if (IS_ERR(skb)) 1244 return PTR_ERR(skb); 1245 1246 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1247 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1248 } 1249 1250 static inline int 1251 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1252 { 1253 struct sk_buff *skb; 1254 u32 cmd_id; 1255 1256 if (!ar->wmi.ops->gen_wow_del_pattern) 1257 return -EOPNOTSUPP; 1258 1259 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1260 if (IS_ERR(skb)) 1261 return PTR_ERR(skb); 1262 1263 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1264 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1265 } 1266 1267 static inline int 1268 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1269 enum wmi_tdls_state state) 1270 { 1271 struct sk_buff *skb; 1272 1273 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1274 return -EOPNOTSUPP; 1275 1276 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1277 if (IS_ERR(skb)) 1278 return PTR_ERR(skb); 1279 1280 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1281 } 1282 1283 static inline int 1284 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1285 const struct wmi_tdls_peer_update_cmd_arg *arg, 1286 const struct wmi_tdls_peer_capab_arg *cap, 1287 const struct wmi_channel_arg *chan) 1288 { 1289 struct sk_buff *skb; 1290 1291 if (!ar->wmi.ops->gen_tdls_peer_update) 1292 return -EOPNOTSUPP; 1293 1294 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1295 if (IS_ERR(skb)) 1296 return PTR_ERR(skb); 1297 1298 return ath10k_wmi_cmd_send(ar, skb, 1299 ar->wmi.cmd->tdls_peer_update_cmdid); 1300 } 1301 1302 static inline int 1303 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1304 { 1305 struct sk_buff *skb; 1306 1307 if (!ar->wmi.ops->gen_adaptive_qcs) 1308 return -EOPNOTSUPP; 1309 1310 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1311 if (IS_ERR(skb)) 1312 return PTR_ERR(skb); 1313 1314 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1315 } 1316 1317 static inline int 1318 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1319 { 1320 struct sk_buff *skb; 1321 1322 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1323 return -EOPNOTSUPP; 1324 1325 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1326 1327 if (IS_ERR(skb)) 1328 return PTR_ERR(skb); 1329 1330 return ath10k_wmi_cmd_send(ar, skb, 1331 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1332 } 1333 1334 static inline int 1335 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1336 char *buf) 1337 { 1338 if (!ar->wmi.ops->fw_stats_fill) 1339 return -EOPNOTSUPP; 1340 1341 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1342 return 0; 1343 } 1344 1345 static inline int 1346 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1347 u32 detect_level, u32 detect_margin) 1348 { 1349 struct sk_buff *skb; 1350 1351 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1352 return -EOPNOTSUPP; 1353 1354 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1355 detect_level, 1356 detect_margin); 1357 1358 if (IS_ERR(skb)) 1359 return PTR_ERR(skb); 1360 1361 return ath10k_wmi_cmd_send(ar, skb, 1362 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1363 } 1364 1365 static inline int 1366 ath10k_wmi_ext_resource_config(struct ath10k *ar, 1367 enum wmi_host_platform_type type, 1368 u32 fw_feature_bitmap) 1369 { 1370 struct sk_buff *skb; 1371 1372 if (!ar->wmi.ops->ext_resource_config) 1373 return -EOPNOTSUPP; 1374 1375 skb = ar->wmi.ops->ext_resource_config(ar, type, 1376 fw_feature_bitmap); 1377 1378 if (IS_ERR(skb)) 1379 return PTR_ERR(skb); 1380 1381 return ath10k_wmi_cmd_send(ar, skb, 1382 ar->wmi.cmd->ext_resource_cfg_cmdid); 1383 } 1384 1385 static inline int 1386 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1387 { 1388 if (!ar->wmi.ops->get_vdev_subtype) 1389 return -EOPNOTSUPP; 1390 1391 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1392 } 1393 1394 static inline int 1395 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar, 1396 enum wmi_bss_survey_req_type type) 1397 { 1398 struct ath10k_wmi *wmi = &ar->wmi; 1399 struct sk_buff *skb; 1400 1401 if (!wmi->ops->gen_pdev_bss_chan_info_req) 1402 return -EOPNOTSUPP; 1403 1404 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type); 1405 if (IS_ERR(skb)) 1406 return PTR_ERR(skb); 1407 1408 return ath10k_wmi_cmd_send(ar, skb, 1409 wmi->cmd->pdev_bss_chan_info_request_cmdid); 1410 } 1411 1412 static inline int 1413 ath10k_wmi_echo(struct ath10k *ar, u32 value) 1414 { 1415 struct ath10k_wmi *wmi = &ar->wmi; 1416 struct sk_buff *skb; 1417 1418 if (!wmi->ops->gen_echo) 1419 return -EOPNOTSUPP; 1420 1421 skb = wmi->ops->gen_echo(ar, value); 1422 if (IS_ERR(skb)) 1423 return PTR_ERR(skb); 1424 1425 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid); 1426 } 1427 1428 #endif 1429