1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _WMI_OPS_H_ 19 #define _WMI_OPS_H_ 20 21 struct ath10k; 22 struct sk_buff; 23 24 struct wmi_ops { 25 void (*rx)(struct ath10k *ar, struct sk_buff *skb); 26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); 27 28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, 29 struct wmi_scan_ev_arg *arg); 30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb, 31 struct wmi_mgmt_rx_ev_arg *arg); 32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, 33 struct wmi_ch_info_ev_arg *arg); 34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, 35 struct wmi_vdev_start_ev_arg *arg); 36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb, 37 struct wmi_peer_kick_ev_arg *arg); 38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb, 39 struct wmi_swba_ev_arg *arg); 40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb, 41 struct wmi_phyerr_hdr_arg *arg); 42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf, 43 int left_len, struct wmi_phyerr_ev_arg *arg); 44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb, 45 struct wmi_svc_rdy_ev_arg *arg); 46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb, 47 struct wmi_rdy_ev_arg *arg); 48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb, 49 struct ath10k_fw_stats *stats); 50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb, 51 struct wmi_roam_ev_arg *arg); 52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, 53 struct wmi_wow_ev_arg *arg); 54 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); 55 56 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); 57 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); 58 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, 59 u16 rd5g, u16 ctl2g, u16 ctl5g, 60 enum wmi_dfs_region dfs_reg); 61 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id, 62 u32 value); 63 struct sk_buff *(*gen_init)(struct ath10k *ar); 64 struct sk_buff *(*gen_start_scan)(struct ath10k *ar, 65 const struct wmi_start_scan_arg *arg); 66 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar, 67 const struct wmi_stop_scan_arg *arg); 68 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id, 69 enum wmi_vdev_type type, 70 enum wmi_vdev_subtype subtype, 71 const u8 macaddr[ETH_ALEN]); 72 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id); 73 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar, 74 const struct wmi_vdev_start_request_arg *arg, 75 bool restart); 76 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id); 77 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid, 78 const u8 *bssid); 79 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id); 80 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id, 81 u32 param_id, u32 param_value); 82 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar, 83 const struct wmi_vdev_install_key_arg *arg); 84 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar, 85 const struct wmi_vdev_spectral_conf_arg *arg); 86 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, 87 u32 trigger, u32 enable); 88 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id, 89 const struct wmi_wmm_params_all_arg *arg); 90 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, 91 const u8 peer_addr[ETH_ALEN], 92 enum wmi_peer_type peer_type); 93 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, 94 const u8 peer_addr[ETH_ALEN]); 95 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id, 96 const u8 peer_addr[ETH_ALEN], 97 u32 tid_bitmap); 98 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id, 99 const u8 *peer_addr, 100 enum wmi_peer_param param_id, 101 u32 param_value); 102 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar, 103 const struct wmi_peer_assoc_complete_arg *arg); 104 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id, 105 enum wmi_sta_ps_mode psmode); 106 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id, 107 enum wmi_sta_powersave_param param_id, 108 u32 value); 109 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id, 110 const u8 *mac, 111 enum wmi_ap_ps_peer_param param_id, 112 u32 value); 113 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, 114 const struct wmi_scan_chan_list_arg *arg); 115 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, 116 const void *bcn, size_t bcn_len, 117 u32 bcn_paddr, bool dtim_zero, 118 bool deliver_cab); 119 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, 120 const struct wmi_wmm_params_all_arg *arg); 121 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask); 122 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, 123 enum wmi_force_fw_hang_type type, 124 u32 delay_ms); 125 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); 126 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable, 127 u32 log_level); 128 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); 129 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); 130 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, 131 u32 period, u32 duration, 132 u32 next_offset, 133 u32 enabled); 134 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); 135 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id, 136 const u8 *mac); 137 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id, 138 const u8 *mac, u32 tid, u32 buf_size); 139 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id, 140 const u8 *mac, u32 tid, 141 u32 status); 142 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id, 143 const u8 *mac, u32 tid, u32 initiator, 144 u32 reason); 145 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id, 146 u32 tim_ie_offset, struct sk_buff *bcn, 147 u32 prb_caps, u32 prb_erp, 148 void *prb_ies, size_t prb_ies_len); 149 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id, 150 struct sk_buff *bcn); 151 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id, 152 const u8 *p2p_ie); 153 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id, 154 const u8 peer_addr[ETH_ALEN], 155 const struct wmi_sta_uapsd_auto_trig_arg *args, 156 u32 num_ac); 157 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar, 158 const struct wmi_sta_keepalive_arg *arg); 159 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar); 160 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id, 161 enum wmi_wow_wakeup_event event, 162 u32 enable); 163 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar); 164 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id, 165 u32 pattern_id, 166 const u8 *pattern, 167 const u8 *mask, 168 int pattern_len, 169 int pattern_offset); 170 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id, 171 u32 pattern_id); 172 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar, 173 u32 vdev_id, 174 enum wmi_tdls_state state); 175 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar, 176 const struct wmi_tdls_peer_update_cmd_arg *arg, 177 const struct wmi_tdls_peer_capab_arg *cap, 178 const struct wmi_channel_arg *chan); 179 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); 180 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, 181 u32 param); 182 void (*fw_stats_fill)(struct ath10k *ar, 183 struct ath10k_fw_stats *fw_stats, 184 char *buf); 185 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar, 186 u8 enable, 187 u32 detect_level, 188 u32 detect_margin); 189 int (*get_vdev_subtype)(struct ath10k *ar, 190 enum wmi_vdev_subtype subtype); 191 }; 192 193 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); 194 195 static inline int 196 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb) 197 { 198 if (WARN_ON_ONCE(!ar->wmi.ops->rx)) 199 return -EOPNOTSUPP; 200 201 ar->wmi.ops->rx(ar, skb); 202 return 0; 203 } 204 205 static inline int 206 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, 207 size_t len) 208 { 209 if (!ar->wmi.ops->map_svc) 210 return -EOPNOTSUPP; 211 212 ar->wmi.ops->map_svc(in, out, len); 213 return 0; 214 } 215 216 static inline int 217 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, 218 struct wmi_scan_ev_arg *arg) 219 { 220 if (!ar->wmi.ops->pull_scan) 221 return -EOPNOTSUPP; 222 223 return ar->wmi.ops->pull_scan(ar, skb, arg); 224 } 225 226 static inline int 227 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, 228 struct wmi_mgmt_rx_ev_arg *arg) 229 { 230 if (!ar->wmi.ops->pull_mgmt_rx) 231 return -EOPNOTSUPP; 232 233 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg); 234 } 235 236 static inline int 237 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb, 238 struct wmi_ch_info_ev_arg *arg) 239 { 240 if (!ar->wmi.ops->pull_ch_info) 241 return -EOPNOTSUPP; 242 243 return ar->wmi.ops->pull_ch_info(ar, skb, arg); 244 } 245 246 static inline int 247 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb, 248 struct wmi_vdev_start_ev_arg *arg) 249 { 250 if (!ar->wmi.ops->pull_vdev_start) 251 return -EOPNOTSUPP; 252 253 return ar->wmi.ops->pull_vdev_start(ar, skb, arg); 254 } 255 256 static inline int 257 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb, 258 struct wmi_peer_kick_ev_arg *arg) 259 { 260 if (!ar->wmi.ops->pull_peer_kick) 261 return -EOPNOTSUPP; 262 263 return ar->wmi.ops->pull_peer_kick(ar, skb, arg); 264 } 265 266 static inline int 267 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb, 268 struct wmi_swba_ev_arg *arg) 269 { 270 if (!ar->wmi.ops->pull_swba) 271 return -EOPNOTSUPP; 272 273 return ar->wmi.ops->pull_swba(ar, skb, arg); 274 } 275 276 static inline int 277 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb, 278 struct wmi_phyerr_hdr_arg *arg) 279 { 280 if (!ar->wmi.ops->pull_phyerr_hdr) 281 return -EOPNOTSUPP; 282 283 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg); 284 } 285 286 static inline int 287 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf, 288 int left_len, struct wmi_phyerr_ev_arg *arg) 289 { 290 if (!ar->wmi.ops->pull_phyerr) 291 return -EOPNOTSUPP; 292 293 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg); 294 } 295 296 static inline int 297 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb, 298 struct wmi_svc_rdy_ev_arg *arg) 299 { 300 if (!ar->wmi.ops->pull_svc_rdy) 301 return -EOPNOTSUPP; 302 303 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg); 304 } 305 306 static inline int 307 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, 308 struct wmi_rdy_ev_arg *arg) 309 { 310 if (!ar->wmi.ops->pull_rdy) 311 return -EOPNOTSUPP; 312 313 return ar->wmi.ops->pull_rdy(ar, skb, arg); 314 } 315 316 static inline int 317 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, 318 struct ath10k_fw_stats *stats) 319 { 320 if (!ar->wmi.ops->pull_fw_stats) 321 return -EOPNOTSUPP; 322 323 return ar->wmi.ops->pull_fw_stats(ar, skb, stats); 324 } 325 326 static inline int 327 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb, 328 struct wmi_roam_ev_arg *arg) 329 { 330 if (!ar->wmi.ops->pull_roam_ev) 331 return -EOPNOTSUPP; 332 333 return ar->wmi.ops->pull_roam_ev(ar, skb, arg); 334 } 335 336 static inline int 337 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, 338 struct wmi_wow_ev_arg *arg) 339 { 340 if (!ar->wmi.ops->pull_wow_event) 341 return -EOPNOTSUPP; 342 343 return ar->wmi.ops->pull_wow_event(ar, skb, arg); 344 } 345 346 static inline enum wmi_txbf_conf 347 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) 348 { 349 if (!ar->wmi.ops->get_txbf_conf_scheme) 350 return WMI_TXBF_CONF_UNSUPPORTED; 351 352 return ar->wmi.ops->get_txbf_conf_scheme(ar); 353 } 354 355 static inline int 356 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) 357 { 358 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 359 struct sk_buff *skb; 360 int ret; 361 362 if (!ar->wmi.ops->gen_mgmt_tx) 363 return -EOPNOTSUPP; 364 365 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu); 366 if (IS_ERR(skb)) 367 return PTR_ERR(skb); 368 369 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid); 370 if (ret) 371 return ret; 372 373 /* FIXME There's no ACK event for Management Tx. This probably 374 * shouldn't be called here either. */ 375 info->flags |= IEEE80211_TX_STAT_ACK; 376 ieee80211_tx_status_irqsafe(ar->hw, msdu); 377 378 return 0; 379 } 380 381 static inline int 382 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, 383 u16 ctl2g, u16 ctl5g, 384 enum wmi_dfs_region dfs_reg) 385 { 386 struct sk_buff *skb; 387 388 if (!ar->wmi.ops->gen_pdev_set_rd) 389 return -EOPNOTSUPP; 390 391 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g, 392 dfs_reg); 393 if (IS_ERR(skb)) 394 return PTR_ERR(skb); 395 396 return ath10k_wmi_cmd_send(ar, skb, 397 ar->wmi.cmd->pdev_set_regdomain_cmdid); 398 } 399 400 static inline int 401 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) 402 { 403 struct sk_buff *skb; 404 405 if (!ar->wmi.ops->gen_pdev_suspend) 406 return -EOPNOTSUPP; 407 408 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt); 409 if (IS_ERR(skb)) 410 return PTR_ERR(skb); 411 412 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid); 413 } 414 415 static inline int 416 ath10k_wmi_pdev_resume_target(struct ath10k *ar) 417 { 418 struct sk_buff *skb; 419 420 if (!ar->wmi.ops->gen_pdev_resume) 421 return -EOPNOTSUPP; 422 423 skb = ar->wmi.ops->gen_pdev_resume(ar); 424 if (IS_ERR(skb)) 425 return PTR_ERR(skb); 426 427 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid); 428 } 429 430 static inline int 431 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) 432 { 433 struct sk_buff *skb; 434 435 if (!ar->wmi.ops->gen_pdev_set_param) 436 return -EOPNOTSUPP; 437 438 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value); 439 if (IS_ERR(skb)) 440 return PTR_ERR(skb); 441 442 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); 443 } 444 445 static inline int 446 ath10k_wmi_cmd_init(struct ath10k *ar) 447 { 448 struct sk_buff *skb; 449 450 if (!ar->wmi.ops->gen_init) 451 return -EOPNOTSUPP; 452 453 skb = ar->wmi.ops->gen_init(ar); 454 if (IS_ERR(skb)) 455 return PTR_ERR(skb); 456 457 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid); 458 } 459 460 static inline int 461 ath10k_wmi_start_scan(struct ath10k *ar, 462 const struct wmi_start_scan_arg *arg) 463 { 464 struct sk_buff *skb; 465 466 if (!ar->wmi.ops->gen_start_scan) 467 return -EOPNOTSUPP; 468 469 skb = ar->wmi.ops->gen_start_scan(ar, arg); 470 if (IS_ERR(skb)) 471 return PTR_ERR(skb); 472 473 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); 474 } 475 476 static inline int 477 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) 478 { 479 struct sk_buff *skb; 480 481 if (!ar->wmi.ops->gen_stop_scan) 482 return -EOPNOTSUPP; 483 484 skb = ar->wmi.ops->gen_stop_scan(ar, arg); 485 if (IS_ERR(skb)) 486 return PTR_ERR(skb); 487 488 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); 489 } 490 491 static inline int 492 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, 493 enum wmi_vdev_type type, 494 enum wmi_vdev_subtype subtype, 495 const u8 macaddr[ETH_ALEN]) 496 { 497 struct sk_buff *skb; 498 499 if (!ar->wmi.ops->gen_vdev_create) 500 return -EOPNOTSUPP; 501 502 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr); 503 if (IS_ERR(skb)) 504 return PTR_ERR(skb); 505 506 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid); 507 } 508 509 static inline int 510 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) 511 { 512 struct sk_buff *skb; 513 514 if (!ar->wmi.ops->gen_vdev_delete) 515 return -EOPNOTSUPP; 516 517 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id); 518 if (IS_ERR(skb)) 519 return PTR_ERR(skb); 520 521 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); 522 } 523 524 static inline int 525 ath10k_wmi_vdev_start(struct ath10k *ar, 526 const struct wmi_vdev_start_request_arg *arg) 527 { 528 struct sk_buff *skb; 529 530 if (!ar->wmi.ops->gen_vdev_start) 531 return -EOPNOTSUPP; 532 533 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false); 534 if (IS_ERR(skb)) 535 return PTR_ERR(skb); 536 537 return ath10k_wmi_cmd_send(ar, skb, 538 ar->wmi.cmd->vdev_start_request_cmdid); 539 } 540 541 static inline int 542 ath10k_wmi_vdev_restart(struct ath10k *ar, 543 const struct wmi_vdev_start_request_arg *arg) 544 { 545 struct sk_buff *skb; 546 547 if (!ar->wmi.ops->gen_vdev_start) 548 return -EOPNOTSUPP; 549 550 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true); 551 if (IS_ERR(skb)) 552 return PTR_ERR(skb); 553 554 return ath10k_wmi_cmd_send(ar, skb, 555 ar->wmi.cmd->vdev_restart_request_cmdid); 556 } 557 558 static inline int 559 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) 560 { 561 struct sk_buff *skb; 562 563 if (!ar->wmi.ops->gen_vdev_stop) 564 return -EOPNOTSUPP; 565 566 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id); 567 if (IS_ERR(skb)) 568 return PTR_ERR(skb); 569 570 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); 571 } 572 573 static inline int 574 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) 575 { 576 struct sk_buff *skb; 577 578 if (!ar->wmi.ops->gen_vdev_up) 579 return -EOPNOTSUPP; 580 581 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid); 582 if (IS_ERR(skb)) 583 return PTR_ERR(skb); 584 585 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid); 586 } 587 588 static inline int 589 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) 590 { 591 struct sk_buff *skb; 592 593 if (!ar->wmi.ops->gen_vdev_down) 594 return -EOPNOTSUPP; 595 596 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id); 597 if (IS_ERR(skb)) 598 return PTR_ERR(skb); 599 600 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); 601 } 602 603 static inline int 604 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, 605 u32 param_value) 606 { 607 struct sk_buff *skb; 608 609 if (!ar->wmi.ops->gen_vdev_set_param) 610 return -EOPNOTSUPP; 611 612 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id, 613 param_value); 614 if (IS_ERR(skb)) 615 return PTR_ERR(skb); 616 617 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid); 618 } 619 620 static inline int 621 ath10k_wmi_vdev_install_key(struct ath10k *ar, 622 const struct wmi_vdev_install_key_arg *arg) 623 { 624 struct sk_buff *skb; 625 626 if (!ar->wmi.ops->gen_vdev_install_key) 627 return -EOPNOTSUPP; 628 629 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg); 630 if (IS_ERR(skb)) 631 return PTR_ERR(skb); 632 633 return ath10k_wmi_cmd_send(ar, skb, 634 ar->wmi.cmd->vdev_install_key_cmdid); 635 } 636 637 static inline int 638 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, 639 const struct wmi_vdev_spectral_conf_arg *arg) 640 { 641 struct sk_buff *skb; 642 u32 cmd_id; 643 644 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg); 645 if (IS_ERR(skb)) 646 return PTR_ERR(skb); 647 648 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; 649 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 650 } 651 652 static inline int 653 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, 654 u32 enable) 655 { 656 struct sk_buff *skb; 657 u32 cmd_id; 658 659 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger, 660 enable); 661 if (IS_ERR(skb)) 662 return PTR_ERR(skb); 663 664 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; 665 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 666 } 667 668 static inline int 669 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 670 const u8 peer_addr[ETH_ALEN], 671 const struct wmi_sta_uapsd_auto_trig_arg *args, 672 u32 num_ac) 673 { 674 struct sk_buff *skb; 675 u32 cmd_id; 676 677 if (!ar->wmi.ops->gen_vdev_sta_uapsd) 678 return -EOPNOTSUPP; 679 680 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args, 681 num_ac); 682 if (IS_ERR(skb)) 683 return PTR_ERR(skb); 684 685 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid; 686 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 687 } 688 689 static inline int 690 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 691 const struct wmi_wmm_params_all_arg *arg) 692 { 693 struct sk_buff *skb; 694 u32 cmd_id; 695 696 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg); 697 if (IS_ERR(skb)) 698 return PTR_ERR(skb); 699 700 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid; 701 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 702 } 703 704 static inline int 705 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, 706 const u8 peer_addr[ETH_ALEN], 707 enum wmi_peer_type peer_type) 708 { 709 struct sk_buff *skb; 710 711 if (!ar->wmi.ops->gen_peer_create) 712 return -EOPNOTSUPP; 713 714 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type); 715 if (IS_ERR(skb)) 716 return PTR_ERR(skb); 717 718 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); 719 } 720 721 static inline int 722 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, 723 const u8 peer_addr[ETH_ALEN]) 724 { 725 struct sk_buff *skb; 726 727 if (!ar->wmi.ops->gen_peer_delete) 728 return -EOPNOTSUPP; 729 730 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr); 731 if (IS_ERR(skb)) 732 return PTR_ERR(skb); 733 734 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); 735 } 736 737 static inline int 738 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, 739 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 740 { 741 struct sk_buff *skb; 742 743 if (!ar->wmi.ops->gen_peer_flush) 744 return -EOPNOTSUPP; 745 746 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap); 747 if (IS_ERR(skb)) 748 return PTR_ERR(skb); 749 750 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); 751 } 752 753 static inline int 754 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr, 755 enum wmi_peer_param param_id, u32 param_value) 756 { 757 struct sk_buff *skb; 758 759 if (!ar->wmi.ops->gen_peer_set_param) 760 return -EOPNOTSUPP; 761 762 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id, 763 param_value); 764 if (IS_ERR(skb)) 765 return PTR_ERR(skb); 766 767 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid); 768 } 769 770 static inline int 771 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, 772 enum wmi_sta_ps_mode psmode) 773 { 774 struct sk_buff *skb; 775 776 if (!ar->wmi.ops->gen_set_psmode) 777 return -EOPNOTSUPP; 778 779 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode); 780 if (IS_ERR(skb)) 781 return PTR_ERR(skb); 782 783 return ath10k_wmi_cmd_send(ar, skb, 784 ar->wmi.cmd->sta_powersave_mode_cmdid); 785 } 786 787 static inline int 788 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, 789 enum wmi_sta_powersave_param param_id, u32 value) 790 { 791 struct sk_buff *skb; 792 793 if (!ar->wmi.ops->gen_set_sta_ps) 794 return -EOPNOTSUPP; 795 796 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value); 797 if (IS_ERR(skb)) 798 return PTR_ERR(skb); 799 800 return ath10k_wmi_cmd_send(ar, skb, 801 ar->wmi.cmd->sta_powersave_param_cmdid); 802 } 803 804 static inline int 805 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, 806 enum wmi_ap_ps_peer_param param_id, u32 value) 807 { 808 struct sk_buff *skb; 809 810 if (!ar->wmi.ops->gen_set_ap_ps) 811 return -EOPNOTSUPP; 812 813 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value); 814 if (IS_ERR(skb)) 815 return PTR_ERR(skb); 816 817 return ath10k_wmi_cmd_send(ar, skb, 818 ar->wmi.cmd->ap_ps_peer_param_cmdid); 819 } 820 821 static inline int 822 ath10k_wmi_scan_chan_list(struct ath10k *ar, 823 const struct wmi_scan_chan_list_arg *arg) 824 { 825 struct sk_buff *skb; 826 827 if (!ar->wmi.ops->gen_scan_chan_list) 828 return -EOPNOTSUPP; 829 830 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg); 831 if (IS_ERR(skb)) 832 return PTR_ERR(skb); 833 834 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); 835 } 836 837 static inline int 838 ath10k_wmi_peer_assoc(struct ath10k *ar, 839 const struct wmi_peer_assoc_complete_arg *arg) 840 { 841 struct sk_buff *skb; 842 843 if (!ar->wmi.ops->gen_peer_assoc) 844 return -EOPNOTSUPP; 845 846 skb = ar->wmi.ops->gen_peer_assoc(ar, arg); 847 if (IS_ERR(skb)) 848 return PTR_ERR(skb); 849 850 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid); 851 } 852 853 static inline int 854 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id, 855 const void *bcn, size_t bcn_len, 856 u32 bcn_paddr, bool dtim_zero, 857 bool deliver_cab) 858 { 859 struct sk_buff *skb; 860 int ret; 861 862 if (!ar->wmi.ops->gen_beacon_dma) 863 return -EOPNOTSUPP; 864 865 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr, 866 dtim_zero, deliver_cab); 867 if (IS_ERR(skb)) 868 return PTR_ERR(skb); 869 870 ret = ath10k_wmi_cmd_send_nowait(ar, skb, 871 ar->wmi.cmd->pdev_send_bcn_cmdid); 872 if (ret) { 873 dev_kfree_skb(skb); 874 return ret; 875 } 876 877 return 0; 878 } 879 880 static inline int 881 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, 882 const struct wmi_wmm_params_all_arg *arg) 883 { 884 struct sk_buff *skb; 885 886 if (!ar->wmi.ops->gen_pdev_set_wmm) 887 return -EOPNOTSUPP; 888 889 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg); 890 if (IS_ERR(skb)) 891 return PTR_ERR(skb); 892 893 return ath10k_wmi_cmd_send(ar, skb, 894 ar->wmi.cmd->pdev_set_wmm_params_cmdid); 895 } 896 897 static inline int 898 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask) 899 { 900 struct sk_buff *skb; 901 902 if (!ar->wmi.ops->gen_request_stats) 903 return -EOPNOTSUPP; 904 905 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask); 906 if (IS_ERR(skb)) 907 return PTR_ERR(skb); 908 909 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); 910 } 911 912 static inline int 913 ath10k_wmi_force_fw_hang(struct ath10k *ar, 914 enum wmi_force_fw_hang_type type, u32 delay_ms) 915 { 916 struct sk_buff *skb; 917 918 if (!ar->wmi.ops->gen_force_fw_hang) 919 return -EOPNOTSUPP; 920 921 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms); 922 if (IS_ERR(skb)) 923 return PTR_ERR(skb); 924 925 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); 926 } 927 928 static inline int 929 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level) 930 { 931 struct sk_buff *skb; 932 933 if (!ar->wmi.ops->gen_dbglog_cfg) 934 return -EOPNOTSUPP; 935 936 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level); 937 if (IS_ERR(skb)) 938 return PTR_ERR(skb); 939 940 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid); 941 } 942 943 static inline int 944 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter) 945 { 946 struct sk_buff *skb; 947 948 if (!ar->wmi.ops->gen_pktlog_enable) 949 return -EOPNOTSUPP; 950 951 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter); 952 if (IS_ERR(skb)) 953 return PTR_ERR(skb); 954 955 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid); 956 } 957 958 static inline int 959 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar) 960 { 961 struct sk_buff *skb; 962 963 if (!ar->wmi.ops->gen_pktlog_disable) 964 return -EOPNOTSUPP; 965 966 skb = ar->wmi.ops->gen_pktlog_disable(ar); 967 if (IS_ERR(skb)) 968 return PTR_ERR(skb); 969 970 return ath10k_wmi_cmd_send(ar, skb, 971 ar->wmi.cmd->pdev_pktlog_disable_cmdid); 972 } 973 974 static inline int 975 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration, 976 u32 next_offset, u32 enabled) 977 { 978 struct sk_buff *skb; 979 980 if (!ar->wmi.ops->gen_pdev_set_quiet_mode) 981 return -EOPNOTSUPP; 982 983 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration, 984 next_offset, enabled); 985 if (IS_ERR(skb)) 986 return PTR_ERR(skb); 987 988 return ath10k_wmi_cmd_send(ar, skb, 989 ar->wmi.cmd->pdev_set_quiet_mode_cmdid); 990 } 991 992 static inline int 993 ath10k_wmi_pdev_get_temperature(struct ath10k *ar) 994 { 995 struct sk_buff *skb; 996 997 if (!ar->wmi.ops->gen_pdev_get_temperature) 998 return -EOPNOTSUPP; 999 1000 skb = ar->wmi.ops->gen_pdev_get_temperature(ar); 1001 if (IS_ERR(skb)) 1002 return PTR_ERR(skb); 1003 1004 return ath10k_wmi_cmd_send(ar, skb, 1005 ar->wmi.cmd->pdev_get_temperature_cmdid); 1006 } 1007 1008 static inline int 1009 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac) 1010 { 1011 struct sk_buff *skb; 1012 1013 if (!ar->wmi.ops->gen_addba_clear_resp) 1014 return -EOPNOTSUPP; 1015 1016 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac); 1017 if (IS_ERR(skb)) 1018 return PTR_ERR(skb); 1019 1020 return ath10k_wmi_cmd_send(ar, skb, 1021 ar->wmi.cmd->addba_clear_resp_cmdid); 1022 } 1023 1024 static inline int 1025 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1026 u32 tid, u32 buf_size) 1027 { 1028 struct sk_buff *skb; 1029 1030 if (!ar->wmi.ops->gen_addba_send) 1031 return -EOPNOTSUPP; 1032 1033 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size); 1034 if (IS_ERR(skb)) 1035 return PTR_ERR(skb); 1036 1037 return ath10k_wmi_cmd_send(ar, skb, 1038 ar->wmi.cmd->addba_send_cmdid); 1039 } 1040 1041 static inline int 1042 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1043 u32 tid, u32 status) 1044 { 1045 struct sk_buff *skb; 1046 1047 if (!ar->wmi.ops->gen_addba_set_resp) 1048 return -EOPNOTSUPP; 1049 1050 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status); 1051 if (IS_ERR(skb)) 1052 return PTR_ERR(skb); 1053 1054 return ath10k_wmi_cmd_send(ar, skb, 1055 ar->wmi.cmd->addba_set_resp_cmdid); 1056 } 1057 1058 static inline int 1059 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac, 1060 u32 tid, u32 initiator, u32 reason) 1061 { 1062 struct sk_buff *skb; 1063 1064 if (!ar->wmi.ops->gen_delba_send) 1065 return -EOPNOTSUPP; 1066 1067 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator, 1068 reason); 1069 if (IS_ERR(skb)) 1070 return PTR_ERR(skb); 1071 1072 return ath10k_wmi_cmd_send(ar, skb, 1073 ar->wmi.cmd->delba_send_cmdid); 1074 } 1075 1076 static inline int 1077 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset, 1078 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp, 1079 void *prb_ies, size_t prb_ies_len) 1080 { 1081 struct sk_buff *skb; 1082 1083 if (!ar->wmi.ops->gen_bcn_tmpl) 1084 return -EOPNOTSUPP; 1085 1086 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn, 1087 prb_caps, prb_erp, prb_ies, 1088 prb_ies_len); 1089 if (IS_ERR(skb)) 1090 return PTR_ERR(skb); 1091 1092 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid); 1093 } 1094 1095 static inline int 1096 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb) 1097 { 1098 struct sk_buff *skb; 1099 1100 if (!ar->wmi.ops->gen_prb_tmpl) 1101 return -EOPNOTSUPP; 1102 1103 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb); 1104 if (IS_ERR(skb)) 1105 return PTR_ERR(skb); 1106 1107 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid); 1108 } 1109 1110 static inline int 1111 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie) 1112 { 1113 struct sk_buff *skb; 1114 1115 if (!ar->wmi.ops->gen_p2p_go_bcn_ie) 1116 return -EOPNOTSUPP; 1117 1118 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie); 1119 if (IS_ERR(skb)) 1120 return PTR_ERR(skb); 1121 1122 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie); 1123 } 1124 1125 static inline int 1126 ath10k_wmi_sta_keepalive(struct ath10k *ar, 1127 const struct wmi_sta_keepalive_arg *arg) 1128 { 1129 struct sk_buff *skb; 1130 u32 cmd_id; 1131 1132 if (!ar->wmi.ops->gen_sta_keepalive) 1133 return -EOPNOTSUPP; 1134 1135 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg); 1136 if (IS_ERR(skb)) 1137 return PTR_ERR(skb); 1138 1139 cmd_id = ar->wmi.cmd->sta_keepalive_cmd; 1140 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1141 } 1142 1143 static inline int 1144 ath10k_wmi_wow_enable(struct ath10k *ar) 1145 { 1146 struct sk_buff *skb; 1147 u32 cmd_id; 1148 1149 if (!ar->wmi.ops->gen_wow_enable) 1150 return -EOPNOTSUPP; 1151 1152 skb = ar->wmi.ops->gen_wow_enable(ar); 1153 if (IS_ERR(skb)) 1154 return PTR_ERR(skb); 1155 1156 cmd_id = ar->wmi.cmd->wow_enable_cmdid; 1157 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1158 } 1159 1160 static inline int 1161 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id, 1162 enum wmi_wow_wakeup_event event, 1163 u32 enable) 1164 { 1165 struct sk_buff *skb; 1166 u32 cmd_id; 1167 1168 if (!ar->wmi.ops->gen_wow_add_wakeup_event) 1169 return -EOPNOTSUPP; 1170 1171 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable); 1172 if (IS_ERR(skb)) 1173 return PTR_ERR(skb); 1174 1175 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid; 1176 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1177 } 1178 1179 static inline int 1180 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar) 1181 { 1182 struct sk_buff *skb; 1183 u32 cmd_id; 1184 1185 if (!ar->wmi.ops->gen_wow_host_wakeup_ind) 1186 return -EOPNOTSUPP; 1187 1188 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar); 1189 if (IS_ERR(skb)) 1190 return PTR_ERR(skb); 1191 1192 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid; 1193 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1194 } 1195 1196 static inline int 1197 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id, 1198 const u8 *pattern, const u8 *mask, 1199 int pattern_len, int pattern_offset) 1200 { 1201 struct sk_buff *skb; 1202 u32 cmd_id; 1203 1204 if (!ar->wmi.ops->gen_wow_add_pattern) 1205 return -EOPNOTSUPP; 1206 1207 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id, 1208 pattern, mask, pattern_len, 1209 pattern_offset); 1210 if (IS_ERR(skb)) 1211 return PTR_ERR(skb); 1212 1213 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid; 1214 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1215 } 1216 1217 static inline int 1218 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id) 1219 { 1220 struct sk_buff *skb; 1221 u32 cmd_id; 1222 1223 if (!ar->wmi.ops->gen_wow_del_pattern) 1224 return -EOPNOTSUPP; 1225 1226 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id); 1227 if (IS_ERR(skb)) 1228 return PTR_ERR(skb); 1229 1230 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid; 1231 return ath10k_wmi_cmd_send(ar, skb, cmd_id); 1232 } 1233 1234 static inline int 1235 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 1236 enum wmi_tdls_state state) 1237 { 1238 struct sk_buff *skb; 1239 1240 if (!ar->wmi.ops->gen_update_fw_tdls_state) 1241 return -EOPNOTSUPP; 1242 1243 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state); 1244 if (IS_ERR(skb)) 1245 return PTR_ERR(skb); 1246 1247 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid); 1248 } 1249 1250 static inline int 1251 ath10k_wmi_tdls_peer_update(struct ath10k *ar, 1252 const struct wmi_tdls_peer_update_cmd_arg *arg, 1253 const struct wmi_tdls_peer_capab_arg *cap, 1254 const struct wmi_channel_arg *chan) 1255 { 1256 struct sk_buff *skb; 1257 1258 if (!ar->wmi.ops->gen_tdls_peer_update) 1259 return -EOPNOTSUPP; 1260 1261 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan); 1262 if (IS_ERR(skb)) 1263 return PTR_ERR(skb); 1264 1265 return ath10k_wmi_cmd_send(ar, skb, 1266 ar->wmi.cmd->tdls_peer_update_cmdid); 1267 } 1268 1269 static inline int 1270 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable) 1271 { 1272 struct sk_buff *skb; 1273 1274 if (!ar->wmi.ops->gen_adaptive_qcs) 1275 return -EOPNOTSUPP; 1276 1277 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable); 1278 if (IS_ERR(skb)) 1279 return PTR_ERR(skb); 1280 1281 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid); 1282 } 1283 1284 static inline int 1285 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param) 1286 { 1287 struct sk_buff *skb; 1288 1289 if (!ar->wmi.ops->gen_pdev_get_tpc_config) 1290 return -EOPNOTSUPP; 1291 1292 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param); 1293 1294 if (IS_ERR(skb)) 1295 return PTR_ERR(skb); 1296 1297 return ath10k_wmi_cmd_send(ar, skb, 1298 ar->wmi.cmd->pdev_get_tpc_config_cmdid); 1299 } 1300 1301 static inline int 1302 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, 1303 char *buf) 1304 { 1305 if (!ar->wmi.ops->fw_stats_fill) 1306 return -EOPNOTSUPP; 1307 1308 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf); 1309 return 0; 1310 } 1311 1312 static inline int 1313 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, 1314 u32 detect_level, u32 detect_margin) 1315 { 1316 struct sk_buff *skb; 1317 1318 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca) 1319 return -EOPNOTSUPP; 1320 1321 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable, 1322 detect_level, 1323 detect_margin); 1324 1325 if (IS_ERR(skb)) 1326 return PTR_ERR(skb); 1327 1328 return ath10k_wmi_cmd_send(ar, skb, 1329 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); 1330 } 1331 1332 static inline int 1333 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) 1334 { 1335 if (!ar->wmi.ops->get_vdev_subtype) 1336 return -EOPNOTSUPP; 1337 1338 return ar->wmi.ops->get_vdev_subtype(ar, subtype); 1339 } 1340 1341 #endif 1342