1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2021 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2017 Intel Deutschland GmbH 6 */ 7 #include <linux/jiffies.h> 8 #include <net/mac80211.h> 9 10 #include "fw/notif-wait.h" 11 #include "iwl-trans.h" 12 #include "fw-api.h" 13 #include "time-event.h" 14 #include "mvm.h" 15 #include "iwl-io.h" 16 #include "iwl-prph.h" 17 18 /* 19 * For the high priority TE use a time event type that has similar priority to 20 * the FW's action scan priority. 21 */ 22 #define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE 23 #define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC 24 25 void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, 26 struct iwl_mvm_time_event_data *te_data) 27 { 28 lockdep_assert_held(&mvm->time_event_lock); 29 30 if (!te_data || !te_data->vif) 31 return; 32 33 list_del(&te_data->list); 34 35 /* 36 * the list is only used for AUX ROC events so make sure it is always 37 * initialized 38 */ 39 INIT_LIST_HEAD(&te_data->list); 40 41 te_data->running = false; 42 te_data->uid = 0; 43 te_data->id = TE_MAX; 44 te_data->vif = NULL; 45 } 46 47 void iwl_mvm_roc_done_wk(struct work_struct *wk) 48 { 49 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); 50 51 /* 52 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. 53 * This will cause the TX path to drop offchannel transmissions. 54 * That would also be done by mac80211, but it is racy, in particular 55 * in the case that the time event actually completed in the firmware 56 * (which is handled in iwl_mvm_te_handle_notif). 57 */ 58 clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 59 clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 60 61 synchronize_net(); 62 63 /* 64 * Flush the offchannel queue -- this is called when the time 65 * event finishes or is canceled, so that frames queued for it 66 * won't get stuck on the queue and be transmitted in the next 67 * time event. 68 */ 69 70 mutex_lock(&mvm->mutex); 71 if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) { 72 struct iwl_mvm_vif *mvmvif; 73 74 /* 75 * NB: access to this pointer would be racy, but the flush bit 76 * can only be set when we had a P2P-Device VIF, and we have a 77 * flush of this work in iwl_mvm_prepare_mac_removal() so it's 78 * not really racy. 79 */ 80 81 if (!WARN_ON(!mvm->p2p_device_vif)) { 82 mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); 83 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); 84 } 85 } else { 86 /* do the same in case of hot spot 2.0 */ 87 iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); 88 /* In newer version of this command an aux station is added only 89 * in cases of dedicated tx queue and need to be removed in end 90 * of use */ 91 if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, 92 ADD_STA, 0) >= 12) 93 iwl_mvm_rm_aux_sta(mvm); 94 } 95 96 mutex_unlock(&mvm->mutex); 97 } 98 99 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) 100 { 101 /* 102 * Of course, our status bit is just as racy as mac80211, so in 103 * addition, fire off the work struct which will drop all frames 104 * from the hardware queues that made it through the race. First 105 * it will of course synchronize the TX path to make sure that 106 * any *new* TX will be rejected. 107 */ 108 schedule_work(&mvm->roc_done_wk); 109 } 110 111 static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) 112 { 113 struct ieee80211_vif *csa_vif; 114 115 rcu_read_lock(); 116 117 csa_vif = rcu_dereference(mvm->csa_vif); 118 if (!csa_vif || !csa_vif->csa_active) 119 goto out_unlock; 120 121 IWL_DEBUG_TE(mvm, "CSA NOA started\n"); 122 123 /* 124 * CSA NoA is started but we still have beacons to 125 * transmit on the current channel. 126 * So we just do nothing here and the switch 127 * will be performed on the last TBTT. 128 */ 129 if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { 130 IWL_WARN(mvm, "CSA NOA started too early\n"); 131 goto out_unlock; 132 } 133 134 ieee80211_csa_finish(csa_vif); 135 136 rcu_read_unlock(); 137 138 RCU_INIT_POINTER(mvm->csa_vif, NULL); 139 140 return; 141 142 out_unlock: 143 rcu_read_unlock(); 144 } 145 146 static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, 147 struct ieee80211_vif *vif, 148 const char *errmsg) 149 { 150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 151 152 if (vif->type != NL80211_IFTYPE_STATION) 153 return false; 154 155 if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && 156 vif->bss_conf.dtim_period) 157 return false; 158 if (errmsg) 159 IWL_ERR(mvm, "%s\n", errmsg); 160 161 if (mvmvif->csa_bcn_pending) { 162 struct iwl_mvm_sta *mvmsta; 163 164 rcu_read_lock(); 165 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 166 if (!WARN_ON(!mvmsta)) 167 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 168 rcu_read_unlock(); 169 } 170 171 if (vif->bss_conf.assoc) { 172 /* 173 * When not associated, this will be called from 174 * iwl_mvm_event_mlme_callback_ini() 175 */ 176 iwl_dbg_tlv_time_point(&mvm->fwrt, 177 IWL_FW_INI_TIME_POINT_ASSOC_FAILED, 178 NULL); 179 } 180 181 iwl_mvm_connection_loss(mvm, vif, errmsg); 182 return true; 183 } 184 185 static void 186 iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm, 187 struct iwl_mvm_time_event_data *te_data, 188 struct iwl_time_event_notif *notif) 189 { 190 struct ieee80211_vif *vif = te_data->vif; 191 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 192 193 if (!notif->status) 194 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); 195 196 switch (te_data->vif->type) { 197 case NL80211_IFTYPE_AP: 198 if (!notif->status) 199 mvmvif->csa_failed = true; 200 iwl_mvm_csa_noa_start(mvm); 201 break; 202 case NL80211_IFTYPE_STATION: 203 if (!notif->status) { 204 iwl_mvm_connection_loss(mvm, vif, 205 "CSA TE failed to start"); 206 break; 207 } 208 iwl_mvm_csa_client_absent(mvm, te_data->vif); 209 cancel_delayed_work(&mvmvif->csa_work); 210 ieee80211_chswitch_done(te_data->vif, true); 211 break; 212 default: 213 /* should never happen */ 214 WARN_ON_ONCE(1); 215 break; 216 } 217 218 /* we don't need it anymore */ 219 iwl_mvm_te_clear_data(mvm, te_data); 220 } 221 222 static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, 223 struct iwl_time_event_notif *notif, 224 struct iwl_mvm_time_event_data *te_data) 225 { 226 struct iwl_fw_dbg_trigger_tlv *trig; 227 struct iwl_fw_dbg_trigger_time_event *te_trig; 228 int i; 229 230 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, 231 ieee80211_vif_to_wdev(te_data->vif), 232 FW_DBG_TRIGGER_TIME_EVENT); 233 if (!trig) 234 return; 235 236 te_trig = (void *)trig->data; 237 238 for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { 239 u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id); 240 u32 trig_action_bitmap = 241 le32_to_cpu(te_trig->time_events[i].action_bitmap); 242 u32 trig_status_bitmap = 243 le32_to_cpu(te_trig->time_events[i].status_bitmap); 244 245 if (trig_te_id != te_data->id || 246 !(trig_action_bitmap & le32_to_cpu(notif->action)) || 247 !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) 248 continue; 249 250 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 251 "Time event %d Action 0x%x received status: %d", 252 te_data->id, 253 le32_to_cpu(notif->action), 254 le32_to_cpu(notif->status)); 255 break; 256 } 257 } 258 259 static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm) 260 { 261 /* 262 * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the 263 * roc_done_wk is already scheduled or running, so don't schedule it 264 * again to avoid a race where the roc_done_wk clears this bit after 265 * it is set here, affecting the next run of the roc_done_wk. 266 */ 267 if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) 268 iwl_mvm_roc_finished(mvm); 269 } 270 271 /* 272 * Handles a FW notification for an event that is known to the driver. 273 * 274 * @mvm: the mvm component 275 * @te_data: the time event data 276 * @notif: the notification data corresponding the time event data. 277 */ 278 static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, 279 struct iwl_mvm_time_event_data *te_data, 280 struct iwl_time_event_notif *notif) 281 { 282 lockdep_assert_held(&mvm->time_event_lock); 283 284 IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n", 285 le32_to_cpu(notif->unique_id), 286 le32_to_cpu(notif->action)); 287 288 iwl_mvm_te_check_trigger(mvm, notif, te_data); 289 290 /* 291 * The FW sends the start/end time event notifications even for events 292 * that it fails to schedule. This is indicated in the status field of 293 * the notification. This happens in cases that the scheduler cannot 294 * find a schedule that can handle the event (for example requesting a 295 * P2P Device discoveribility, while there are other higher priority 296 * events in the system). 297 */ 298 if (!le32_to_cpu(notif->status)) { 299 const char *msg; 300 301 if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START)) 302 msg = "Time Event start notification failure"; 303 else 304 msg = "Time Event end notification failure"; 305 306 IWL_DEBUG_TE(mvm, "%s\n", msg); 307 308 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) { 309 iwl_mvm_te_clear_data(mvm, te_data); 310 return; 311 } 312 } 313 314 if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) { 315 IWL_DEBUG_TE(mvm, 316 "TE ended - current time %lu, estimated end %lu\n", 317 jiffies, te_data->end_jiffies); 318 319 switch (te_data->vif->type) { 320 case NL80211_IFTYPE_P2P_DEVICE: 321 ieee80211_remain_on_channel_expired(mvm->hw); 322 iwl_mvm_p2p_roc_finished(mvm); 323 break; 324 case NL80211_IFTYPE_STATION: 325 /* 326 * If we are switching channel, don't disconnect 327 * if the time event is already done. Beacons can 328 * be delayed a bit after the switch. 329 */ 330 if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { 331 IWL_DEBUG_TE(mvm, 332 "No beacon heard and the CS time event is over, don't disconnect\n"); 333 break; 334 } 335 336 /* 337 * By now, we should have finished association 338 * and know the dtim period. 339 */ 340 iwl_mvm_te_check_disconnect(mvm, te_data->vif, 341 !te_data->vif->bss_conf.assoc ? 342 "Not associated and the time event is over already..." : 343 "No beacon heard and the time event is over already..."); 344 break; 345 default: 346 break; 347 } 348 349 iwl_mvm_te_clear_data(mvm, te_data); 350 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { 351 te_data->running = true; 352 te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); 353 354 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 355 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 356 ieee80211_ready_on_channel(mvm->hw); 357 } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { 358 iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); 359 } 360 } else { 361 IWL_WARN(mvm, "Got TE with unknown action\n"); 362 } 363 } 364 365 /* 366 * Handle A Aux ROC time event 367 */ 368 static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, 369 struct iwl_time_event_notif *notif) 370 { 371 struct iwl_mvm_time_event_data *te_data, *tmp; 372 bool aux_roc_te = false; 373 374 list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { 375 if (le32_to_cpu(notif->unique_id) == te_data->uid) { 376 aux_roc_te = true; 377 break; 378 } 379 } 380 if (!aux_roc_te) /* Not a Aux ROC time event */ 381 return -EINVAL; 382 383 iwl_mvm_te_check_trigger(mvm, notif, te_data); 384 385 IWL_DEBUG_TE(mvm, 386 "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", 387 le32_to_cpu(notif->unique_id), 388 le32_to_cpu(notif->action), le32_to_cpu(notif->status)); 389 390 if (!le32_to_cpu(notif->status) || 391 le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { 392 /* End TE, notify mac80211 */ 393 ieee80211_remain_on_channel_expired(mvm->hw); 394 iwl_mvm_roc_finished(mvm); /* flush aux queue */ 395 list_del(&te_data->list); /* remove from list */ 396 te_data->running = false; 397 te_data->vif = NULL; 398 te_data->uid = 0; 399 te_data->id = TE_MAX; 400 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { 401 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 402 te_data->running = true; 403 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 404 } else { 405 IWL_DEBUG_TE(mvm, 406 "ERROR: Unknown Aux ROC Time Event (action = %d)\n", 407 le32_to_cpu(notif->action)); 408 return -EINVAL; 409 } 410 411 return 0; 412 } 413 414 /* 415 * The Rx handler for time event notifications 416 */ 417 void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, 418 struct iwl_rx_cmd_buffer *rxb) 419 { 420 struct iwl_rx_packet *pkt = rxb_addr(rxb); 421 struct iwl_time_event_notif *notif = (void *)pkt->data; 422 struct iwl_mvm_time_event_data *te_data, *tmp; 423 424 IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n", 425 le32_to_cpu(notif->unique_id), 426 le32_to_cpu(notif->action)); 427 428 spin_lock_bh(&mvm->time_event_lock); 429 /* This time event is triggered for Aux ROC request */ 430 if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif)) 431 goto unlock; 432 433 list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) { 434 if (le32_to_cpu(notif->unique_id) == te_data->uid) 435 iwl_mvm_te_handle_notif(mvm, te_data, notif); 436 } 437 unlock: 438 spin_unlock_bh(&mvm->time_event_lock); 439 } 440 441 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, 442 struct iwl_rx_packet *pkt, void *data) 443 { 444 struct iwl_mvm *mvm = 445 container_of(notif_wait, struct iwl_mvm, notif_wait); 446 struct iwl_mvm_time_event_data *te_data = data; 447 struct iwl_time_event_notif *resp; 448 int resp_len = iwl_rx_packet_payload_len(pkt); 449 450 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) 451 return true; 452 453 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 454 IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); 455 return true; 456 } 457 458 resp = (void *)pkt->data; 459 460 /* te_data->uid is already set in the TIME_EVENT_CMD response */ 461 if (le32_to_cpu(resp->unique_id) != te_data->uid) 462 return false; 463 464 IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", 465 te_data->uid); 466 if (!resp->status) 467 IWL_ERR(mvm, 468 "TIME_EVENT_NOTIFICATION received but not executed\n"); 469 470 return true; 471 } 472 473 static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, 474 struct iwl_rx_packet *pkt, void *data) 475 { 476 struct iwl_mvm *mvm = 477 container_of(notif_wait, struct iwl_mvm, notif_wait); 478 struct iwl_mvm_time_event_data *te_data = data; 479 struct iwl_time_event_resp *resp; 480 int resp_len = iwl_rx_packet_payload_len(pkt); 481 482 if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD)) 483 return true; 484 485 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 486 IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n"); 487 return true; 488 } 489 490 resp = (void *)pkt->data; 491 492 /* we should never get a response to another TIME_EVENT_CMD here */ 493 if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id)) 494 return false; 495 496 te_data->uid = le32_to_cpu(resp->unique_id); 497 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 498 te_data->uid); 499 return true; 500 } 501 502 static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, 503 struct ieee80211_vif *vif, 504 struct iwl_mvm_time_event_data *te_data, 505 struct iwl_time_event_cmd *te_cmd) 506 { 507 static const u16 time_event_response[] = { TIME_EVENT_CMD }; 508 struct iwl_notification_wait wait_time_event; 509 int ret; 510 511 lockdep_assert_held(&mvm->mutex); 512 513 IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n", 514 le32_to_cpu(te_cmd->duration)); 515 516 spin_lock_bh(&mvm->time_event_lock); 517 if (WARN_ON(te_data->id != TE_MAX)) { 518 spin_unlock_bh(&mvm->time_event_lock); 519 return -EIO; 520 } 521 te_data->vif = vif; 522 te_data->duration = le32_to_cpu(te_cmd->duration); 523 te_data->id = le32_to_cpu(te_cmd->id); 524 list_add_tail(&te_data->list, &mvm->time_event_list); 525 spin_unlock_bh(&mvm->time_event_lock); 526 527 /* 528 * Use a notification wait, which really just processes the 529 * command response and doesn't wait for anything, in order 530 * to be able to process the response and get the UID inside 531 * the RX path. Using CMD_WANT_SKB doesn't work because it 532 * stores the buffer and then wakes up this thread, by which 533 * time another notification (that the time event started) 534 * might already be processed unsuccessfully. 535 */ 536 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 537 time_event_response, 538 ARRAY_SIZE(time_event_response), 539 iwl_mvm_time_event_response, te_data); 540 541 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, 542 sizeof(*te_cmd), te_cmd); 543 if (ret) { 544 IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); 545 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 546 goto out_clear_te; 547 } 548 549 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 550 ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 551 /* should never fail */ 552 WARN_ON_ONCE(ret); 553 554 if (ret) { 555 out_clear_te: 556 spin_lock_bh(&mvm->time_event_lock); 557 iwl_mvm_te_clear_data(mvm, te_data); 558 spin_unlock_bh(&mvm->time_event_lock); 559 } 560 return ret; 561 } 562 563 void iwl_mvm_protect_session(struct iwl_mvm *mvm, 564 struct ieee80211_vif *vif, 565 u32 duration, u32 min_duration, 566 u32 max_delay, bool wait_for_notif) 567 { 568 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 569 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 570 const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; 571 struct iwl_notification_wait wait_te_notif; 572 struct iwl_time_event_cmd time_cmd = {}; 573 574 lockdep_assert_held(&mvm->mutex); 575 576 if (te_data->running && 577 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { 578 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", 579 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 580 return; 581 } 582 583 if (te_data->running) { 584 IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n", 585 te_data->uid, 586 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 587 /* 588 * we don't have enough time 589 * cancel the current TE and issue a new one 590 * Of course it would be better to remove the old one only 591 * when the new one is added, but we don't care if we are off 592 * channel for a bit. All we need to do, is not to return 593 * before we actually begin to be on the channel. 594 */ 595 iwl_mvm_stop_session_protection(mvm, vif); 596 } 597 598 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 599 time_cmd.id_and_color = 600 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 601 time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); 602 603 time_cmd.apply_time = cpu_to_le32(0); 604 605 time_cmd.max_frags = TE_V2_FRAG_NONE; 606 time_cmd.max_delay = cpu_to_le32(max_delay); 607 /* TODO: why do we need to interval = bi if it is not periodic? */ 608 time_cmd.interval = cpu_to_le32(1); 609 time_cmd.duration = cpu_to_le32(duration); 610 time_cmd.repeat = 1; 611 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 612 TE_V2_NOTIF_HOST_EVENT_END | 613 TE_V2_START_IMMEDIATELY); 614 615 if (!wait_for_notif) { 616 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 617 return; 618 } 619 620 /* 621 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use 622 * right after we send the time event 623 */ 624 iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, 625 te_notif_response, 626 ARRAY_SIZE(te_notif_response), 627 iwl_mvm_te_notif, te_data); 628 629 /* If TE was sent OK - wait for the notification that started */ 630 if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { 631 IWL_ERR(mvm, "Failed to add TE to protect session\n"); 632 iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); 633 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, 634 TU_TO_JIFFIES(max_delay))) { 635 IWL_ERR(mvm, "Failed to protect session until TE\n"); 636 } 637 } 638 639 static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, 640 struct iwl_mvm_vif *mvmvif, 641 u32 id) 642 { 643 struct iwl_mvm_session_prot_cmd cmd = { 644 .id_and_color = 645 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 646 mvmvif->color)), 647 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 648 .conf_id = cpu_to_le32(id), 649 }; 650 int ret; 651 652 ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 653 MAC_CONF_GROUP, 0), 654 0, sizeof(cmd), &cmd); 655 if (ret) 656 IWL_ERR(mvm, 657 "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); 658 } 659 660 static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, 661 struct iwl_mvm_time_event_data *te_data, 662 u32 *uid) 663 { 664 u32 id; 665 struct iwl_mvm_vif *mvmvif; 666 enum nl80211_iftype iftype; 667 668 if (!te_data->vif) 669 return false; 670 671 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 672 iftype = te_data->vif->type; 673 674 /* 675 * It is possible that by the time we got to this point the time 676 * event was already removed. 677 */ 678 spin_lock_bh(&mvm->time_event_lock); 679 680 /* Save time event uid before clearing its data */ 681 *uid = te_data->uid; 682 id = te_data->id; 683 684 /* 685 * The clear_data function handles time events that were already removed 686 */ 687 iwl_mvm_te_clear_data(mvm, te_data); 688 spin_unlock_bh(&mvm->time_event_lock); 689 690 /* When session protection is supported, the te_data->id field 691 * is reused to save session protection's configuration. 692 */ 693 if (fw_has_capa(&mvm->fw->ucode_capa, 694 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 695 if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { 696 /* Session protection is still ongoing. Cancel it */ 697 iwl_mvm_cancel_session_protection(mvm, mvmvif, id); 698 if (iftype == NL80211_IFTYPE_P2P_DEVICE) { 699 iwl_mvm_p2p_roc_finished(mvm); 700 } 701 } 702 return false; 703 } else { 704 /* It is possible that by the time we try to remove it, the 705 * time event has already ended and removed. In such a case 706 * there is no need to send a removal command. 707 */ 708 if (id == TE_MAX) { 709 IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid); 710 return false; 711 } 712 } 713 714 return true; 715 } 716 717 /* 718 * Explicit request to remove a aux roc time event. The removal of a time 719 * event needs to be synchronized with the flow of a time event's end 720 * notification, which also removes the time event from the op mode 721 * data structures. 722 */ 723 static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, 724 struct iwl_mvm_vif *mvmvif, 725 struct iwl_mvm_time_event_data *te_data) 726 { 727 struct iwl_hs20_roc_req aux_cmd = {}; 728 u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); 729 730 u32 uid; 731 int ret; 732 733 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) 734 return; 735 736 aux_cmd.event_unique_id = cpu_to_le32(uid); 737 aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 738 aux_cmd.id_and_color = 739 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 740 IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", 741 le32_to_cpu(aux_cmd.event_unique_id)); 742 ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, 743 len, &aux_cmd); 744 745 if (WARN_ON(ret)) 746 return; 747 } 748 749 /* 750 * Explicit request to remove a time event. The removal of a time event needs to 751 * be synchronized with the flow of a time event's end notification, which also 752 * removes the time event from the op mode data structures. 753 */ 754 void iwl_mvm_remove_time_event(struct iwl_mvm *mvm, 755 struct iwl_mvm_vif *mvmvif, 756 struct iwl_mvm_time_event_data *te_data) 757 { 758 struct iwl_time_event_cmd time_cmd = {}; 759 u32 uid; 760 int ret; 761 762 if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid)) 763 return; 764 765 /* When we remove a TE, the UID is to be set in the id field */ 766 time_cmd.id = cpu_to_le32(uid); 767 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); 768 time_cmd.id_and_color = 769 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 770 771 IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); 772 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0, 773 sizeof(time_cmd), &time_cmd); 774 if (ret) 775 IWL_ERR(mvm, "Couldn't remove the time event\n"); 776 } 777 778 void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, 779 struct ieee80211_vif *vif) 780 { 781 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 782 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 783 u32 id; 784 785 lockdep_assert_held(&mvm->mutex); 786 787 spin_lock_bh(&mvm->time_event_lock); 788 id = te_data->id; 789 spin_unlock_bh(&mvm->time_event_lock); 790 791 if (fw_has_capa(&mvm->fw->ucode_capa, 792 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 793 if (id != SESSION_PROTECT_CONF_ASSOC) { 794 IWL_DEBUG_TE(mvm, 795 "don't remove session protection id=%u\n", 796 id); 797 return; 798 } 799 } else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { 800 IWL_DEBUG_TE(mvm, 801 "don't remove TE with id=%u (not session protection)\n", 802 id); 803 return; 804 } 805 806 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 807 } 808 809 void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, 810 struct iwl_rx_cmd_buffer *rxb) 811 { 812 struct iwl_rx_packet *pkt = rxb_addr(rxb); 813 struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data; 814 struct ieee80211_vif *vif; 815 struct iwl_mvm_vif *mvmvif; 816 817 rcu_read_lock(); 818 vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id), 819 true); 820 821 if (!vif) 822 goto out_unlock; 823 824 mvmvif = iwl_mvm_vif_from_mac80211(vif); 825 826 /* The vif is not a P2P_DEVICE, maintain its time_event_data */ 827 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { 828 struct iwl_mvm_time_event_data *te_data = 829 &mvmvif->time_event_data; 830 831 if (!le32_to_cpu(notif->status)) { 832 iwl_mvm_te_check_disconnect(mvm, vif, 833 "Session protection failure"); 834 spin_lock_bh(&mvm->time_event_lock); 835 iwl_mvm_te_clear_data(mvm, te_data); 836 spin_unlock_bh(&mvm->time_event_lock); 837 } 838 839 if (le32_to_cpu(notif->start)) { 840 spin_lock_bh(&mvm->time_event_lock); 841 te_data->running = le32_to_cpu(notif->start); 842 te_data->end_jiffies = 843 TU_TO_EXP_TIME(te_data->duration); 844 spin_unlock_bh(&mvm->time_event_lock); 845 } else { 846 /* 847 * By now, we should have finished association 848 * and know the dtim period. 849 */ 850 iwl_mvm_te_check_disconnect(mvm, vif, 851 !vif->bss_conf.assoc ? 852 "Not associated and the session protection is over already..." : 853 "No beacon heard and the session protection is over already..."); 854 spin_lock_bh(&mvm->time_event_lock); 855 iwl_mvm_te_clear_data(mvm, te_data); 856 spin_unlock_bh(&mvm->time_event_lock); 857 } 858 859 goto out_unlock; 860 } 861 862 if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) { 863 /* End TE, notify mac80211 */ 864 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; 865 ieee80211_remain_on_channel_expired(mvm->hw); 866 iwl_mvm_p2p_roc_finished(mvm); 867 } else if (le32_to_cpu(notif->start)) { 868 if (WARN_ON(mvmvif->time_event_data.id != 869 le32_to_cpu(notif->conf_id))) 870 goto out_unlock; 871 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); 872 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 873 } 874 875 out_unlock: 876 rcu_read_unlock(); 877 } 878 879 static int 880 iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, 881 struct ieee80211_vif *vif, 882 int duration, 883 enum ieee80211_roc_type type) 884 { 885 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 886 struct iwl_mvm_session_prot_cmd cmd = { 887 .id_and_color = 888 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 889 mvmvif->color)), 890 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 891 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 892 }; 893 894 lockdep_assert_held(&mvm->mutex); 895 896 /* The time_event_data.id field is reused to save session 897 * protection's configuration. 898 */ 899 switch (type) { 900 case IEEE80211_ROC_TYPE_NORMAL: 901 mvmvif->time_event_data.id = 902 SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV; 903 break; 904 case IEEE80211_ROC_TYPE_MGMT_TX: 905 mvmvif->time_event_data.id = 906 SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION; 907 break; 908 default: 909 WARN_ONCE(1, "Got an invalid ROC type\n"); 910 return -EINVAL; 911 } 912 913 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); 914 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 915 MAC_CONF_GROUP, 0), 916 0, sizeof(cmd), &cmd); 917 } 918 919 int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 920 int duration, enum ieee80211_roc_type type) 921 { 922 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 923 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 924 struct iwl_time_event_cmd time_cmd = {}; 925 926 lockdep_assert_held(&mvm->mutex); 927 if (te_data->running) { 928 IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n"); 929 return -EBUSY; 930 } 931 932 if (fw_has_capa(&mvm->fw->ucode_capa, 933 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 934 return iwl_mvm_start_p2p_roc_session_protection(mvm, vif, 935 duration, 936 type); 937 938 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 939 time_cmd.id_and_color = 940 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 941 942 switch (type) { 943 case IEEE80211_ROC_TYPE_NORMAL: 944 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL); 945 break; 946 case IEEE80211_ROC_TYPE_MGMT_TX: 947 time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX); 948 break; 949 default: 950 WARN_ONCE(1, "Got an invalid ROC type\n"); 951 return -EINVAL; 952 } 953 954 time_cmd.apply_time = cpu_to_le32(0); 955 time_cmd.interval = cpu_to_le32(1); 956 957 /* 958 * The P2P Device TEs can have lower priority than other events 959 * that are being scheduled by the driver/fw, and thus it might not be 960 * scheduled. To improve the chances of it being scheduled, allow them 961 * to be fragmented, and in addition allow them to be delayed. 962 */ 963 time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS); 964 time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); 965 time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); 966 time_cmd.repeat = 1; 967 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 968 TE_V2_NOTIF_HOST_EVENT_END | 969 TE_V2_START_IMMEDIATELY); 970 971 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 972 } 973 974 static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm) 975 { 976 struct iwl_mvm_time_event_data *te_data; 977 978 lockdep_assert_held(&mvm->mutex); 979 980 spin_lock_bh(&mvm->time_event_lock); 981 982 /* 983 * Iterate over the list of time events and find the time event that is 984 * associated with a P2P_DEVICE interface. 985 * This assumes that a P2P_DEVICE interface can have only a single time 986 * event at any given time and this time event coresponds to a ROC 987 * request 988 */ 989 list_for_each_entry(te_data, &mvm->time_event_list, list) { 990 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) 991 goto out; 992 } 993 994 /* There can only be at most one AUX ROC time event, we just use the 995 * list to simplify/unify code. Remove it if it exists. 996 */ 997 te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, 998 struct iwl_mvm_time_event_data, 999 list); 1000 out: 1001 spin_unlock_bh(&mvm->time_event_lock); 1002 return te_data; 1003 } 1004 1005 void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) 1006 { 1007 struct iwl_mvm_time_event_data *te_data; 1008 u32 uid; 1009 1010 te_data = iwl_mvm_get_roc_te(mvm); 1011 if (te_data) 1012 __iwl_mvm_remove_time_event(mvm, te_data, &uid); 1013 } 1014 1015 void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1016 { 1017 struct iwl_mvm_vif *mvmvif; 1018 struct iwl_mvm_time_event_data *te_data; 1019 1020 if (fw_has_capa(&mvm->fw->ucode_capa, 1021 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 1022 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1023 1024 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1025 iwl_mvm_cancel_session_protection(mvm, mvmvif, 1026 mvmvif->time_event_data.id); 1027 iwl_mvm_p2p_roc_finished(mvm); 1028 } else { 1029 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, 1030 &mvmvif->time_event_data); 1031 iwl_mvm_roc_finished(mvm); 1032 } 1033 1034 return; 1035 } 1036 1037 te_data = iwl_mvm_get_roc_te(mvm); 1038 if (!te_data) { 1039 IWL_WARN(mvm, "No remain on channel event\n"); 1040 return; 1041 } 1042 1043 mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 1044 1045 if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1046 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1047 iwl_mvm_p2p_roc_finished(mvm); 1048 } else { 1049 iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); 1050 iwl_mvm_roc_finished(mvm); 1051 } 1052 } 1053 1054 void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm, 1055 struct ieee80211_vif *vif) 1056 { 1057 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1058 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1059 u32 id; 1060 1061 lockdep_assert_held(&mvm->mutex); 1062 1063 spin_lock_bh(&mvm->time_event_lock); 1064 id = te_data->id; 1065 spin_unlock_bh(&mvm->time_event_lock); 1066 1067 if (id != TE_CHANNEL_SWITCH_PERIOD) 1068 return; 1069 1070 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1071 } 1072 1073 int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, 1074 struct ieee80211_vif *vif, 1075 u32 duration, u32 apply_time) 1076 { 1077 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1078 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1079 struct iwl_time_event_cmd time_cmd = {}; 1080 1081 lockdep_assert_held(&mvm->mutex); 1082 1083 if (te_data->running) { 1084 u32 id; 1085 1086 spin_lock_bh(&mvm->time_event_lock); 1087 id = te_data->id; 1088 spin_unlock_bh(&mvm->time_event_lock); 1089 1090 if (id == TE_CHANNEL_SWITCH_PERIOD) { 1091 IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); 1092 return -EBUSY; 1093 } 1094 1095 /* 1096 * Remove the session protection time event to allow the 1097 * channel switch. If we got here, we just heard a beacon so 1098 * the session protection is not needed anymore anyway. 1099 */ 1100 iwl_mvm_remove_time_event(mvm, mvmvif, te_data); 1101 } 1102 1103 time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); 1104 time_cmd.id_and_color = 1105 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); 1106 time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD); 1107 time_cmd.apply_time = cpu_to_le32(apply_time); 1108 time_cmd.max_frags = TE_V2_FRAG_NONE; 1109 time_cmd.duration = cpu_to_le32(duration); 1110 time_cmd.repeat = 1; 1111 time_cmd.interval = cpu_to_le32(1); 1112 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 1113 TE_V2_ABSENCE); 1114 if (!apply_time) 1115 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); 1116 1117 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 1118 } 1119 1120 static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, 1121 struct iwl_rx_packet *pkt, void *data) 1122 { 1123 struct iwl_mvm *mvm = 1124 container_of(notif_wait, struct iwl_mvm, notif_wait); 1125 struct iwl_mvm_session_prot_notif *resp; 1126 int resp_len = iwl_rx_packet_payload_len(pkt); 1127 1128 if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || 1129 pkt->hdr.group_id != MAC_CONF_GROUP)) 1130 return true; 1131 1132 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 1133 IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); 1134 return true; 1135 } 1136 1137 resp = (void *)pkt->data; 1138 1139 if (!resp->status) 1140 IWL_ERR(mvm, 1141 "TIME_EVENT_NOTIFICATION received but not executed\n"); 1142 1143 return true; 1144 } 1145 1146 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 1147 struct ieee80211_vif *vif, 1148 u32 duration, u32 min_duration, 1149 bool wait_for_notif) 1150 { 1151 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1152 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1153 const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, 1154 MAC_CONF_GROUP, 0) }; 1155 struct iwl_notification_wait wait_notif; 1156 struct iwl_mvm_session_prot_cmd cmd = { 1157 .id_and_color = 1158 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1159 mvmvif->color)), 1160 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 1161 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 1162 }; 1163 1164 /* The time_event_data.id field is reused to save session 1165 * protection's configuration. 1166 */ 1167 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; 1168 cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); 1169 1170 lockdep_assert_held(&mvm->mutex); 1171 1172 spin_lock_bh(&mvm->time_event_lock); 1173 if (te_data->running && 1174 time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) { 1175 IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n", 1176 jiffies_to_msecs(te_data->end_jiffies - jiffies)); 1177 spin_unlock_bh(&mvm->time_event_lock); 1178 1179 return; 1180 } 1181 1182 iwl_mvm_te_clear_data(mvm, te_data); 1183 te_data->duration = le32_to_cpu(cmd.duration_tu); 1184 te_data->vif = vif; 1185 spin_unlock_bh(&mvm->time_event_lock); 1186 1187 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", 1188 le32_to_cpu(cmd.duration_tu)); 1189 1190 if (!wait_for_notif) { 1191 if (iwl_mvm_send_cmd_pdu(mvm, 1192 iwl_cmd_id(SESSION_PROTECTION_CMD, 1193 MAC_CONF_GROUP, 0), 1194 0, sizeof(cmd), &cmd)) { 1195 IWL_ERR(mvm, 1196 "Couldn't send the SESSION_PROTECTION_CMD\n"); 1197 spin_lock_bh(&mvm->time_event_lock); 1198 iwl_mvm_te_clear_data(mvm, te_data); 1199 spin_unlock_bh(&mvm->time_event_lock); 1200 } 1201 1202 return; 1203 } 1204 1205 iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, 1206 notif, ARRAY_SIZE(notif), 1207 iwl_mvm_session_prot_notif, NULL); 1208 1209 if (iwl_mvm_send_cmd_pdu(mvm, 1210 iwl_cmd_id(SESSION_PROTECTION_CMD, 1211 MAC_CONF_GROUP, 0), 1212 0, sizeof(cmd), &cmd)) { 1213 IWL_ERR(mvm, 1214 "Couldn't send the SESSION_PROTECTION_CMD\n"); 1215 iwl_remove_notification(&mvm->notif_wait, &wait_notif); 1216 } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, 1217 TU_TO_JIFFIES(100))) { 1218 IWL_ERR(mvm, 1219 "Failed to protect session until session protection\n"); 1220 } 1221 } 1222