1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2021 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2017 Intel Deutschland GmbH 6 */ 7 #include <linux/jiffies.h> 8 #include <net/mac80211.h> 9 10 #include "fw/notif-wait.h" 11 #include "iwl-trans.h" 12 #include "fw-api.h" 13 #include "time-event.h" 14 #include "mvm.h" 15 #include "iwl-io.h" 16 #include "iwl-prph.h" 17 18 /* 19 * For the high priority TE use a time event type that has similar priority to 20 * the FW's action scan priority. 21 */ 22 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE 23 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC 24 25 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, 26 struct iwl_mvm_time_event_data *te_data) 27 { 28 lockdep_assert_held(&mvm->time_event_lock); 29 30 if (!te_data || !te_data->vif) 31 return; 32 33 list_del(&te_data->list); 34 35 /* 36 * the list is only used for AUX ROC events so make sure it is always 37 * initialized 38 */ 39 INIT_LIST_HEAD(&te_data->list); 40 41 te_data->running = false; 42 te_data->uid = 0; 43 te_data->id = TE_MAX; 44 te_data->vif = NULL; 45 } 46 47 void iwl_mvm_roc_done_wk(struct work_struct *wk) 48 { 49 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); 50 51 /* 52 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. 53 * This will cause the TX path to drop offchannel transmissions. 54 * That would also be done by mac80211, but it is racy, in particular 55 * in the case that the time event actually completed in the firmware 56 * (which is handled in iwl_mvm_te_handle_notif). 57 */ 58 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 59 clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 60 61 synchronize_net(); 62 63 /* 64 * Flush the offchannel queue -- this is called when the time 65 * event finishes or is canceled, so that frames queued for it 66 * won't get stuck on the queue and be transmitted in the next 67 * time event. 68 */ 69 70 mutex_lock(&mvm->mutex); 71 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { 72 struct iwl_mvm_vif *mvmvif; 73 74 /* 75 * NB: access to this pointer would be racy, but the flush bit 76 * can only be set when we had a P2P-Device VIF, and we have a 77 * flush of this work in iwl_mvm_prepare_mac_removal() so it's 78 * not really racy. 79 */ 80 81 if (!WARN_ON(!mvm->p2p_device_vif)) { 82 mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); 83 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); 84 } 85 } else { 86 /* do the same in case of hot spot 2.0 */ 87 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); 88 /* In newer version of this command an aux station is added only 89 * in cases of dedicated tx queue and need to be removed in end 90 * of use */ 91 if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 92 ADD_STA, 0) >= 12) 93 iwl_mvm_rm_aux_sta(mvm); 94 } 95 96 mutex_unlock(&mvm->mutex); 97 } 98 99 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) 100 { 101 /* 102 * Of course, our status bit is just as racy as mac80211, so in 103 * addition, fire off the work struct which will drop all frames 104 * from the hardware queues that made it through the race. First 105 * it will of course synchronize the TX path to make sure that 106 * any *new* TX will be rejected. 107 */ 108 schedule_work(&mvm->roc_done_wk); 109 } 110 111 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) 112 { 113 struct ieee80211_vif *csa_vif; 114 115 rcu_read_lock(); 116 117 csa_vif = rcu_dereference(mvm->csa_vif); 118 if (!csa_vif || !csa_vif->csa_active) 119 goto out_unlock; 120 121 IWL_DEBUG_TE(mvm, "CSA NOA started\n"); 122 123 /* 124 * CSA NoA is started but we still have beacons to 125 * transmit on the current channel. 126 * So we just do nothing here and the switch 127 * will be performed on the last TBTT. 128 */ 129 if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { 130 IWL_WARN(mvm, "CSA NOA started too early\n"); 131 goto out_unlock; 132 } 133 134 ieee80211_csa_finish(csa_vif); 135 136 rcu_read_unlock(); 137 138 RCU_INIT_POINTER(mvm->csa_vif, NULL); 139 140 return; 141 142 out_unlock: 143 rcu_read_unlock(); 144 } 145 146 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, 147 struct ieee80211_vif *vif, 148 const char *errmsg) 149 { 150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 151 152 if (vif->type != NL80211_IFTYPE_STATION) 153 return false; 154 155 if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && 156 vif->bss_conf.dtim_period) 157 return false; 158 if (errmsg) 159 IWL_ERR(mvm, "%s\n", errmsg); 160 161 if (mvmvif->csa_bcn_pending) { 162 struct iwl_mvm_sta *mvmsta; 163 164 rcu_read_lock(); 165 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 166 if (!WARN_ON(!mvmsta)) 167 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 168 rcu_read_unlock(); 169 } 170 171 if (vif->bss_conf.assoc) { 172 /* 173 * When not associated, this will be called from 174 * iwl_mvm_event_mlme_callback_ini() 175 */ 176 iwl_dbg_tlv_time_point(&mvm->fwrt, 177 IWL_FW_INI_TIME_POINT_ASSOC_FAILED, 178 NULL); 179 } 180 181 iwl_mvm_connection_loss(mvm, vif, errmsg); 182 return true; 183 } 184 185 static void 186 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, 187 struct iwl_mvm_time_event_data *te_data, 188 struct iwl_time_event_notif *notif) 189 { 190 struct ieee80211_vif *vif = te_data->vif; 191 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 192 193 if (!notif->status) 194 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); 195 196 switch (te_data->vif->type) { 197 case NL80211_IFTYPE_AP: 198 if (!notif->status) 199 mvmvif->csa_failed = true; 200 iwl_mvm_csa_noa_start(mvm); 201 break; 202 case NL80211_IFTYPE_STATION: 203 if (!notif->status) { 204 iwl_mvm_connection_loss(mvm, vif, 205 "CSA TE failed to start"); 206 break; 207 } 208 iwl_mvm_csa_client_absent(mvm, te_data->vif); 209 cancel_delayed_work(&mvmvif->csa_work); 210 ieee80211_chswitch_done(te_data->vif, true); 211 break; 212 default: 213 /* should never happen */ 214 WARN_ON_ONCE(1); 215 break; 216 } 217 218 /* we don't need it anymore */ 219 iwl_mvm_te_clear_data(mvm, te_data); 220 } 221 222 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, 223 struct iwl_time_event_notif *notif, 224 struct iwl_mvm_time_event_data *te_data) 225 { 226 struct iwl_fw_dbg_trigger_tlv *trig; 227 struct iwl_fw_dbg_trigger_time_event *te_trig; 228 int i; 229 230 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, 231 ieee80211_vif_to_wdev(te_data->vif), 232 FW_DBG_TRIGGER_TIME_EVENT); 233 if (!trig) 234 return; 235 236 te_trig = (void *)trig->data; 237 238 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { 239 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); 240 u32 trig_action_bitmap = 241 le32_to_cpu(te_trig->time_events[i].action_bitmap); 242 u32 trig_status_bitmap = 243 le32_to_cpu(te_trig->time_events[i].status_bitmap); 244 245 if (trig_te_id != te_data->id || 246 !(trig_action_bitmap & le32_to_cpu(notif->action)) || 247 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) 248 continue; 249 250 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 251 "Time event %d Action 0x%x received status: %d", 252 te_data->id, 253 le32_to_cpu(notif->action), 254 le32_to_cpu(notif->status)); 255 break; 256 } 257 } 258 259 static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) 260 { 261 /* 262 * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the 263 * roc_done_wk is already scheduled or running, so don't schedule it 264 * again to avoid a race where the roc_done_wk clears this bit after 265 * it is set here, affecting the next run of the roc_done_wk. 266 */ 267 if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) 268 iwl_mvm_roc_finished(mvm); 269 } 270 271 /* 272 * Handles a FW notification for an event that is known to the driver. 273 * 274 * @mvm: the mvm component 275 * @te_data: the time event data 276 * @notif: the notification data corresponding the time event data. 277 */ 278 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, 279 struct iwl_mvm_time_event_data *te_data, 280 struct iwl_time_event_notif *notif) 281 { 282 lockdep_assert_held(&mvm->time_event_lock); 283 284 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", 285 le32_to_cpu(notif->unique_id), 286 le32_to_cpu(notif->action)); 287 288 iwl_mvm_te_check_trigger(mvm, notif, te_data); 289 290 /* 291 * The FW sends the start/end time event notifications even for events 292 * that it fails to schedule. This is indicated in the status field of 293 * the notification. This happens in cases that the scheduler cannot 294 * find a schedule that can handle the event (for example requesting a 295 * P2P Device discoveribility, while there are other higher priority 296 * events in the system). 297 */ 298 if (!le32_to_cpu(notif->status)) { 299 const char *msg; 300 301 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) 302 msg = "Time Event start notification failure"; 303 else 304 msg = "Time Event end notification failure"; 305 306 IWL_DEBUG_TE(mvm, "%s\n", msg); 307 308 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { 309 iwl_mvm_te_clear_data(mvm, te_data); 310 return; 311 } 312 } 313 314 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { 315 IWL_DEBUG_TE(mvm, 316 "TE ended - current time %lu, estimated end %lu\n", 317 jiffies, te_data->end_jiffies); 318 319 switch (te_data->vif->type) { 320 case NL80211_IFTYPE_P2P_DEVICE: 321 ieee80211_remain_on_channel_expired(mvm->hw); 322 iwl_mvm_p2p_roc_finished(mvm); 323 break; 324 case NL80211_IFTYPE_STATION: 325 /* 326 * If we are switching channel, don't disconnect 327 * if the time event is already done. Beacons can 328 * be delayed a bit after the switch. 329 */ 330 if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { 331 IWL_DEBUG_TE(mvm, 332 "No beacon heard and the CS time event is over, don't disconnect\n"); 333 break; 334 } 335 336 /* 337 * By now, we should have finished association 338 * and know the dtim period. 339 */ 340 iwl_mvm_te_check_disconnect(mvm, te_data->vif, 341 !te_data->vif->bss_conf.assoc ? 342 "Not associated and the time event is over already..." : 343 "No beacon heard and the time event is over already..."); 344 break; 345 default: 346 break; 347 } 348 349 iwl_mvm_te_clear_data(mvm, te_data); 350 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { 351 te_data->running = true; 352 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); 353 354 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 355 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 356 ieee80211_ready_on_channel(mvm->hw); 357 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { 358 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); 359 } 360 } else { 361 IWL_WARN(mvm, "Got TE with unknown action\n"); 362 } 363 } 364 365 /* 366 * Handle A Aux ROC time event 367 */ 368 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, 369 struct iwl_time_event_notif *notif) 370 { 371 struct iwl_mvm_time_event_data *te_data, *tmp; 372 bool aux_roc_te = false; 373 374 list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { 375 if (le32_to_cpu(notif->unique_id) == te_data->uid) { 376 aux_roc_te = true; 377 break; 378 } 379 } 380 if (!aux_roc_te) /* Not a Aux ROC time event */ 381 return -EINVAL; 382 383 iwl_mvm_te_check_trigger(mvm, notif, te_data); 384 385 IWL_DEBUG_TE(mvm, 386 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", 387 le32_to_cpu(notif->unique_id), 388 le32_to_cpu(notif->action), le32_to_cpu(notif->status)); 389 390 if (!le32_to_cpu(notif->status) || 391 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { 392 /* End TE, notify mac80211 */ 393 ieee80211_remain_on_channel_expired(mvm->hw); 394 iwl_mvm_roc_finished(mvm); /* flush aux queue */ 395 list_del(&te_data->list); /* remove from list */ 396 te_data->running = false; 397 te_data->vif = NULL; 398 te_data->uid = 0; 399 te_data->id = TE_MAX; 400 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { 401 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 402 te_data->running = true; 403 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 404 } else { 405 IWL_DEBUG_TE(mvm, 406 "ERROR: Unknown Aux ROC Time Event (action = %d)\n", 407 le32_to_cpu(notif->action)); 408 return -EINVAL; 409 } 410 411 return 0; 412 } 413 414 /* 415 * The Rx handler for time event notifications 416 */ 417 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, 418 struct iwl_rx_cmd_buffer *rxb) 419 { 420 struct iwl_rx_packet *pkt = rxb_addr(rxb); 421 struct iwl_time_event_notif *notif = (void *)pkt->data; 422 struct iwl_mvm_time_event_data *te_data, *tmp; 423 424 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", 425 le32_to_cpu(notif->unique_id), 426 le32_to_cpu(notif->action)); 427 428 spin_lock_bh(&mvm->time_event_lock); 429 /* This time event is triggered for Aux ROC request */ 430 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) 431 goto unlock; 432 433 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { 434 if (le32_to_cpu(notif->unique_id) == te_data->uid) 435 iwl_mvm_te_handle_notif(mvm, te_data, notif); 436 } 437 unlock: 438 spin_unlock_bh(&mvm->time_event_lock); 439 } 440 441 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, 442 struct iwl_rx_packet *pkt, void *data) 443 { 444 struct iwl_mvm *mvm = 445 container_of(notif_wait, struct iwl_mvm, notif_wait); 446 struct iwl_mvm_time_event_data *te_data = data; 447 struct iwl_time_event_notif *resp; 448 int resp_len = iwl_rx_packet_payload_len(pkt); 449 450 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) 451 return true; 452 453 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 454 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); 455 return true; 456 } 457 458 resp = (void *)pkt->data; 459 460 /* te_data->uid is already set in the TIME_EVENT_CMD response */ 461 if (le32_to_cpu(resp->unique_id) != te_data->uid) 462 return false; 463 464 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", 465 te_data->uid); 466 if (!resp->status) 467 IWL_ERR(mvm, 468 "TIME_EVENT_NOTIFICATION received but not executed\n"); 469 470 return true; 471 } 472 473 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, 474 struct iwl_rx_packet *pkt, void *data) 475 { 476 struct iwl_mvm *mvm = 477 container_of(notif_wait, struct iwl_mvm, notif_wait); 478 struct iwl_mvm_time_event_data *te_data = data; 479 struct iwl_time_event_resp *resp; 480 int resp_len = iwl_rx_packet_payload_len(pkt); 481 482 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) 483 return true; 484 485 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 486 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); 487 return true; 488 } 489 490 resp = (void *)pkt->data; 491 492 /* we should never get a response to another TIME_EVENT_CMD here */ 493 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) 494 return false; 495 496 te_data->uid = le32_to_cpu(resp->unique_id); 497 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 498 te_data->uid); 499 return true; 500 } 501 502 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, 503 struct ieee80211_vif *vif, 504 struct iwl_mvm_time_event_data *te_data, 505 struct iwl_time_event_cmd *te_cmd) 506 { 507 static const u16 time_event_response[] = { TIME_EVENT_CMD }; 508 struct iwl_notification_wait wait_time_event; 509 int ret; 510 511 lockdep_assert_held(&mvm->mutex); 512 513 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", 514 le32_to_cpu(te_cmd->duration)); 515 516 spin_lock_bh(&mvm->time_event_lock); 517 if (WARN_ON(te_data->id != TE_MAX)) { 518 spin_unlock_bh(&mvm->time_event_lock); 519 return -EIO; 520 } 521 te_data->vif = vif; 522 te_data->duration = le32_to_cpu(te_cmd->duration); 523 te_data->id = le32_to_cpu(te_cmd->id); 524 list_add_tail(&te_data->list, &mvm->time_event_list); 525 spin_unlock_bh(&mvm->time_event_lock); 526 527 /* 528 * Use a notification wait, which really just processes the 529 * command response and doesn't wait for anything, in order 530 * to be able to process the response and get the UID inside 531 * the RX path. Using CMD_WANT_SKB doesn't work because it 532 * stores the buffer and then wakes up this thread, by which 533 * time another notification (that the time event started) 534 * might already be processed unsuccessfully. 535 */ 536 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 537 time_event_response, 538 ARRAY_SIZE(time_event_response), 539 iwl_mvm_time_event_response, te_data); 540 541 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, 542 sizeof(*te_cmd), te_cmd); 543 if (ret) { 544 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); 545 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 546 goto out_clear_te; 547 } 548 549 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 550 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 551 /* should never fail */ 552 WARN_ON_ONCE(ret); 553 554 if (ret) { 555 out_clear_te: 556 spin_lock_bh(&mvm->time_event_lock); 557 iwl_mvm_te_clear_data(mvm, te_data); 558 spin_unlock_bh(&mvm->time_event_lock); 559 } 560 return ret; 561 } 562 563 void iwl_mvm_protect_session(struct iwl_mvm *mvm, 564 struct ieee80211_vif *vif, 565 u32 duration, u32 min_duration, 566 u32 max_delay, bool wait_for_notif) 567 { 568 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 569 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 570 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; 571 struct iwl_notification_wait wait_te_notif; 572 struct iwl_time_event_cmd time_cmd = {}; 573 574 lockdep_assert_held(&mvm->mutex); 575 576 if (te_data->running && 577 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { 578 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", 579 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 580 return; 581 } 582 583 if (te_data->running) { 584 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", 585 te_data->uid, 586 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 587 /* 588 * we don't have enough time 589 * cancel the current TE and issue a new one 590 * Of course it would be better to remove the old one only 591 * when the new one is added, but we don't care if we are off 592 * channel for a bit. All we need to do, is not to return 593 * before we actually begin to be on the channel. 594 */ 595 iwl_mvm_stop_session_protection(mvm, vif); 596 } 597 598 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 599 time_cmd.id_and_color = 600 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 601 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); 602 603 time_cmd.apply_time = cpu_to_le32(0); 604 605 time_cmd.max_frags = TE_V2_FRAG_NONE; 606 time_cmd.max_delay = cpu_to_le32(max_delay); 607 /* TODO: why do we need to interval = bi if it is not periodic? */ 608 time_cmd.interval = cpu_to_le32(1); 609 time_cmd.duration = cpu_to_le32(duration); 610 time_cmd.repeat = 1; 611 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 612 TE_V2_NOTIF_HOST_EVENT_END | 613 TE_V2_START_IMMEDIATELY); 614 615 if (!wait_for_notif) { 616 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 617 return; 618 } 619 620 /* 621 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use 622 * right after we send the time event 623 */ 624 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, 625 te_notif_response, 626 ARRAY_SIZE(te_notif_response), 627 iwl_mvm_te_notif, te_data); 628 629 /* If TE was sent OK - wait for the notification that started */ 630 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { 631 IWL_ERR(mvm, "Failed to add TE to protect session\n"); 632 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); 633 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, 634 TU_TO_JIFFIES(max_delay))) { 635 IWL_ERR(mvm, "Failed to protect session until TE\n"); 636 } 637 } 638 639 static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, 640 struct iwl_mvm_vif *mvmvif, 641 u32 id) 642 { 643 struct iwl_mvm_session_prot_cmd cmd = { 644 .id_and_color = 645 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 646 mvmvif->color)), 647 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 648 .conf_id = cpu_to_le32(id), 649 }; 650 int ret; 651 652 ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 653 MAC_CONF_GROUP, 0), 654 0, sizeof(cmd), &cmd); 655 if (ret) 656 IWL_ERR(mvm, 657 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); 658 } 659 660 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, 661 struct iwl_mvm_time_event_data *te_data, 662 u32 *uid) 663 { 664 u32 id; 665 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 666 enum nl80211_iftype iftype; 667 668 if (!te_data->vif) 669 return false; 670 671 iftype = te_data->vif->type; 672 673 /* 674 * It is possible that by the time we got to this point the time 675 * event was already removed. 676 */ 677 spin_lock_bh(&mvm->time_event_lock); 678 679 /* Save time event uid before clearing its data */ 680 *uid = te_data->uid; 681 id = te_data->id; 682 683 /* 684 * The clear_data function handles time events that were already removed 685 */ 686 iwl_mvm_te_clear_data(mvm, te_data); 687 spin_unlock_bh(&mvm->time_event_lock); 688 689 /* When session protection is supported, the te_data->id field 690 * is reused to save session protection's configuration. 691 */ 692 if (fw_has_capa(&mvm->fw->ucode_capa, 693 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 694 if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { 695 /* Session protection is still ongoing. Cancel it */ 696 iwl_mvm_cancel_session_protection(mvm, mvmvif, id); 697 if (iftype == NL80211_IFTYPE_P2P_DEVICE) { 698 iwl_mvm_p2p_roc_finished(mvm); 699 } 700 } 701 return false; 702 } else { 703 /* It is possible that by the time we try to remove it, the 704 * time event has already ended and removed. In such a case 705 * there is no need to send a removal command. 706 */ 707 if (id == TE_MAX) { 708 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); 709 return false; 710 } 711 } 712 713 return true; 714 } 715 716 /* 717 * Explicit request to remove a aux roc time event. The removal of a time 718 * event needs to be synchronized with the flow of a time event's end 719 * notification, which also removes the time event from the op mode 720 * data structures. 721 */ 722 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, 723 struct iwl_mvm_vif *mvmvif, 724 struct iwl_mvm_time_event_data *te_data) 725 { 726 struct iwl_hs20_roc_req aux_cmd = {}; 727 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); 728 729 u32 uid; 730 int ret; 731 732 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) 733 return; 734 735 aux_cmd.event_unique_id = cpu_to_le32(uid); 736 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 737 aux_cmd.id_and_color = 738 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 739 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", 740 le32_to_cpu(aux_cmd.event_unique_id)); 741 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, 742 len, &aux_cmd); 743 744 if (WARN_ON(ret)) 745 return; 746 } 747 748 /* 749 * Explicit request to remove a time event. The removal of a time event needs to 750 * be synchronized with the flow of a time event's end notification, which also 751 * removes the time event from the op mode data structures. 752 */ 753 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, 754 struct iwl_mvm_vif *mvmvif, 755 struct iwl_mvm_time_event_data *te_data) 756 { 757 struct iwl_time_event_cmd time_cmd = {}; 758 u32 uid; 759 int ret; 760 761 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) 762 return; 763 764 /* When we remove a TE, the UID is to be set in the id field */ 765 time_cmd.id = cpu_to_le32(uid); 766 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 767 time_cmd.id_and_color = 768 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 769 770 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); 771 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, 772 sizeof(time_cmd), &time_cmd); 773 if (ret) 774 IWL_ERR(mvm, "Couldn't remove the time event\n"); 775 } 776 777 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, 778 struct ieee80211_vif *vif) 779 { 780 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 781 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 782 u32 id; 783 784 lockdep_assert_held(&mvm->mutex); 785 786 spin_lock_bh(&mvm->time_event_lock); 787 id = te_data->id; 788 spin_unlock_bh(&mvm->time_event_lock); 789 790 if (fw_has_capa(&mvm->fw->ucode_capa, 791 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 792 if (id != SESSION_PROTECT_CONF_ASSOC) { 793 IWL_DEBUG_TE(mvm, 794 "don't remove session protection id=%u\n", 795 id); 796 return; 797 } 798 } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { 799 IWL_DEBUG_TE(mvm, 800 "don't remove TE with id=%u (not session protection)\n", 801 id); 802 return; 803 } 804 805 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 806 } 807 808 void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, 809 struct iwl_rx_cmd_buffer *rxb) 810 { 811 struct iwl_rx_packet *pkt = rxb_addr(rxb); 812 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; 813 struct ieee80211_vif *vif; 814 struct iwl_mvm_vif *mvmvif; 815 816 rcu_read_lock(); 817 vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id), 818 true); 819 820 if (!vif) 821 goto out_unlock; 822 823 mvmvif = iwl_mvm_vif_from_mac80211(vif); 824 825 /* The vif is not a P2P_DEVICE, maintain its time_event_data */ 826 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { 827 struct iwl_mvm_time_event_data *te_data = 828 &mvmvif->time_event_data; 829 830 if (!le32_to_cpu(notif->status)) { 831 iwl_mvm_te_check_disconnect(mvm, vif, 832 "Session protection failure"); 833 spin_lock_bh(&mvm->time_event_lock); 834 iwl_mvm_te_clear_data(mvm, te_data); 835 spin_unlock_bh(&mvm->time_event_lock); 836 } 837 838 if (le32_to_cpu(notif->start)) { 839 spin_lock_bh(&mvm->time_event_lock); 840 te_data->running = le32_to_cpu(notif->start); 841 te_data->end_jiffies = 842 TU_TO_EXP_TIME(te_data->duration); 843 spin_unlock_bh(&mvm->time_event_lock); 844 } else { 845 /* 846 * By now, we should have finished association 847 * and know the dtim period. 848 */ 849 iwl_mvm_te_check_disconnect(mvm, vif, 850 !vif->bss_conf.assoc ? 851 "Not associated and the session protection is over already..." : 852 "No beacon heard and the session protection is over already..."); 853 spin_lock_bh(&mvm->time_event_lock); 854 iwl_mvm_te_clear_data(mvm, te_data); 855 spin_unlock_bh(&mvm->time_event_lock); 856 } 857 858 goto out_unlock; 859 } 860 861 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { 862 /* End TE, notify mac80211 */ 863 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; 864 ieee80211_remain_on_channel_expired(mvm->hw); 865 iwl_mvm_p2p_roc_finished(mvm); 866 } else if (le32_to_cpu(notif->start)) { 867 if (WARN_ON(mvmvif->time_event_data.id != 868 le32_to_cpu(notif->conf_id))) 869 goto out_unlock; 870 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 871 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 872 } 873 874 out_unlock: 875 rcu_read_unlock(); 876 } 877 878 static int 879 iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, 880 struct ieee80211_vif *vif, 881 int duration, 882 enum ieee80211_roc_type type) 883 { 884 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 885 struct iwl_mvm_session_prot_cmd cmd = { 886 .id_and_color = 887 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 888 mvmvif->color)), 889 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 890 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 891 }; 892 893 lockdep_assert_held(&mvm->mutex); 894 895 /* The time_event_data.id field is reused to save session 896 * protection's configuration. 897 */ 898 switch (type) { 899 case IEEE80211_ROC_TYPE_NORMAL: 900 mvmvif->time_event_data.id = 901 SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV; 902 break; 903 case IEEE80211_ROC_TYPE_MGMT_TX: 904 mvmvif->time_event_data.id = 905 SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION; 906 break; 907 default: 908 WARN_ONCE(1, "Got an invalid ROC type\n"); 909 return -EINVAL; 910 } 911 912 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); 913 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 914 MAC_CONF_GROUP, 0), 915 0, sizeof(cmd), &cmd); 916 } 917 918 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 919 int duration, enum ieee80211_roc_type type) 920 { 921 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 922 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 923 struct iwl_time_event_cmd time_cmd = {}; 924 925 lockdep_assert_held(&mvm->mutex); 926 if (te_data->running) { 927 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); 928 return -EBUSY; 929 } 930 931 if (fw_has_capa(&mvm->fw->ucode_capa, 932 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 933 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif, 934 duration, 935 type); 936 937 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 938 time_cmd.id_and_color = 939 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 940 941 switch (type) { 942 case IEEE80211_ROC_TYPE_NORMAL: 943 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); 944 break; 945 case IEEE80211_ROC_TYPE_MGMT_TX: 946 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); 947 break; 948 default: 949 WARN_ONCE(1, "Got an invalid ROC type\n"); 950 return -EINVAL; 951 } 952 953 time_cmd.apply_time = cpu_to_le32(0); 954 time_cmd.interval = cpu_to_le32(1); 955 956 /* 957 * The P2P Device TEs can have lower priority than other events 958 * that are being scheduled by the driver/fw, and thus it might not be 959 * scheduled. To improve the chances of it being scheduled, allow them 960 * to be fragmented, and in addition allow them to be delayed. 961 */ 962 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); 963 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); 964 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); 965 time_cmd.repeat = 1; 966 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 967 TE_V2_NOTIF_HOST_EVENT_END | 968 TE_V2_START_IMMEDIATELY); 969 970 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 971 } 972 973 static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm) 974 { 975 struct iwl_mvm_time_event_data *te_data; 976 977 lockdep_assert_held(&mvm->mutex); 978 979 spin_lock_bh(&mvm->time_event_lock); 980 981 /* 982 * Iterate over the list of time events and find the time event that is 983 * associated with a P2P_DEVICE interface. 984 * This assumes that a P2P_DEVICE interface can have only a single time 985 * event at any given time and this time event coresponds to a ROC 986 * request 987 */ 988 list_for_each_entry(te_data, &mvm->time_event_list, list) { 989 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) 990 goto out; 991 } 992 993 /* There can only be at most one AUX ROC time event, we just use the 994 * list to simplify/unify code. Remove it if it exists. 995 */ 996 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, 997 struct iwl_mvm_time_event_data, 998 list); 999 out: 1000 spin_unlock_bh(&mvm->time_event_lock); 1001 return te_data; 1002 } 1003 1004 void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) 1005 { 1006 struct iwl_mvm_time_event_data *te_data; 1007 u32 uid; 1008 1009 te_data = iwl_mvm_get_roc_te(mvm); 1010 if (te_data) 1011 __iwl_mvm_remove_time_event(mvm, te_data, &uid); 1012 } 1013 1014 void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1015 { 1016 struct iwl_mvm_vif *mvmvif; 1017 struct iwl_mvm_time_event_data *te_data; 1018 1019 if (fw_has_capa(&mvm->fw->ucode_capa, 1020 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 1021 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1022 1023 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1024 iwl_mvm_cancel_session_protection(mvm, mvmvif, 1025 mvmvif->time_event_data.id); 1026 iwl_mvm_p2p_roc_finished(mvm); 1027 } else { 1028 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, 1029 &mvmvif->time_event_data); 1030 iwl_mvm_roc_finished(mvm); 1031 } 1032 1033 return; 1034 } 1035 1036 te_data = iwl_mvm_get_roc_te(mvm); 1037 if (!te_data) { 1038 IWL_WARN(mvm, "No remain on channel event\n"); 1039 return; 1040 } 1041 1042 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 1043 1044 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1045 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1046 iwl_mvm_p2p_roc_finished(mvm); 1047 } else { 1048 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); 1049 iwl_mvm_roc_finished(mvm); 1050 } 1051 } 1052 1053 void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, 1054 struct ieee80211_vif *vif) 1055 { 1056 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1057 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1058 u32 id; 1059 1060 lockdep_assert_held(&mvm->mutex); 1061 1062 spin_lock_bh(&mvm->time_event_lock); 1063 id = te_data->id; 1064 spin_unlock_bh(&mvm->time_event_lock); 1065 1066 if (id != TE_CHANNEL_SWITCH_PERIOD) 1067 return; 1068 1069 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1070 } 1071 1072 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, 1073 struct ieee80211_vif *vif, 1074 u32 duration, u32 apply_time) 1075 { 1076 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1077 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1078 struct iwl_time_event_cmd time_cmd = {}; 1079 1080 lockdep_assert_held(&mvm->mutex); 1081 1082 if (te_data->running) { 1083 u32 id; 1084 1085 spin_lock_bh(&mvm->time_event_lock); 1086 id = te_data->id; 1087 spin_unlock_bh(&mvm->time_event_lock); 1088 1089 if (id == TE_CHANNEL_SWITCH_PERIOD) { 1090 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); 1091 return -EBUSY; 1092 } 1093 1094 /* 1095 * Remove the session protection time event to allow the 1096 * channel switch. If we got here, we just heard a beacon so 1097 * the session protection is not needed anymore anyway. 1098 */ 1099 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1100 } 1101 1102 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 1103 time_cmd.id_and_color = 1104 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 1105 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); 1106 time_cmd.apply_time = cpu_to_le32(apply_time); 1107 time_cmd.max_frags = TE_V2_FRAG_NONE; 1108 time_cmd.duration = cpu_to_le32(duration); 1109 time_cmd.repeat = 1; 1110 time_cmd.interval = cpu_to_le32(1); 1111 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 1112 TE_V2_ABSENCE); 1113 if (!apply_time) 1114 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); 1115 1116 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 1117 } 1118 1119 static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, 1120 struct iwl_rx_packet *pkt, void *data) 1121 { 1122 struct iwl_mvm *mvm = 1123 container_of(notif_wait, struct iwl_mvm, notif_wait); 1124 struct iwl_mvm_session_prot_notif *resp; 1125 int resp_len = iwl_rx_packet_payload_len(pkt); 1126 1127 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || 1128 pkt->hdr.group_id != MAC_CONF_GROUP)) 1129 return true; 1130 1131 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 1132 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); 1133 return true; 1134 } 1135 1136 resp = (void *)pkt->data; 1137 1138 if (!resp->status) 1139 IWL_ERR(mvm, 1140 "TIME_EVENT_NOTIFICATION received but not executed\n"); 1141 1142 return true; 1143 } 1144 1145 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 1146 struct ieee80211_vif *vif, 1147 u32 duration, u32 min_duration, 1148 bool wait_for_notif) 1149 { 1150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1151 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1152 const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, 1153 MAC_CONF_GROUP, 0) }; 1154 struct iwl_notification_wait wait_notif; 1155 struct iwl_mvm_session_prot_cmd cmd = { 1156 .id_and_color = 1157 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1158 mvmvif->color)), 1159 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 1160 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 1161 }; 1162 1163 /* The time_event_data.id field is reused to save session 1164 * protection's configuration. 1165 */ 1166 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; 1167 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); 1168 1169 lockdep_assert_held(&mvm->mutex); 1170 1171 spin_lock_bh(&mvm->time_event_lock); 1172 if (te_data->running && 1173 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { 1174 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", 1175 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 1176 spin_unlock_bh(&mvm->time_event_lock); 1177 1178 return; 1179 } 1180 1181 iwl_mvm_te_clear_data(mvm, te_data); 1182 te_data->duration = le32_to_cpu(cmd.duration_tu); 1183 te_data->vif = vif; 1184 spin_unlock_bh(&mvm->time_event_lock); 1185 1186 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", 1187 le32_to_cpu(cmd.duration_tu)); 1188 1189 if (!wait_for_notif) { 1190 if (iwl_mvm_send_cmd_pdu(mvm, 1191 iwl_cmd_id(SESSION_PROTECTION_CMD, 1192 MAC_CONF_GROUP, 0), 1193 0, sizeof(cmd), &cmd)) { 1194 IWL_ERR(mvm, 1195 "Couldn't send the SESSION_PROTECTION_CMD\n"); 1196 spin_lock_bh(&mvm->time_event_lock); 1197 iwl_mvm_te_clear_data(mvm, te_data); 1198 spin_unlock_bh(&mvm->time_event_lock); 1199 } 1200 1201 return; 1202 } 1203 1204 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, 1205 notif, ARRAY_SIZE(notif), 1206 iwl_mvm_session_prot_notif, NULL); 1207 1208 if (iwl_mvm_send_cmd_pdu(mvm, 1209 iwl_cmd_id(SESSION_PROTECTION_CMD, 1210 MAC_CONF_GROUP, 0), 1211 0, sizeof(cmd), &cmd)) { 1212 IWL_ERR(mvm, 1213 "Couldn't send the SESSION_PROTECTION_CMD\n"); 1214 iwl_remove_notification(&mvm->notif_wait, &wait_notif); 1215 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, 1216 TU_TO_JIFFIES(100))) { 1217 IWL_ERR(mvm, 1218 "Failed to protect session until session protection\n"); 1219 } 1220 } 1221