1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 65 #include <linux/etherdevice.h> 66 #include <net/mac80211.h> 67 68 #include "mvm.h" 69 #include "fw/api/scan.h" 70 #include "iwl-io.h" 71 72 #define IWL_DENSE_EBS_SCAN_RATIO 5 73 #define IWL_SPARSE_EBS_SCAN_RATIO 1 74 75 #define IWL_SCAN_DWELL_ACTIVE 10 76 #define IWL_SCAN_DWELL_PASSIVE 110 77 #define IWL_SCAN_DWELL_FRAGMENTED 44 78 #define IWL_SCAN_DWELL_EXTENDED 90 79 #define IWL_SCAN_NUM_OF_FRAGS 3 80 #define IWL_SCAN_LAST_2_4_CHN 14 81 82 /* adaptive dwell max budget time [TU] for full scan */ 83 #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 84 /* adaptive dwell max budget time [TU] for directed scan */ 85 #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 86 /* adaptive dwell default high band APs number */ 87 #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8 88 /* adaptive dwell default low band APs number */ 89 #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2 90 /* adaptive dwell default APs number in social channels (1, 6, 11) */ 91 #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 92 /* number of scan channels */ 93 #define IWL_SCAN_NUM_CHANNELS 112 94 /* adaptive dwell default number of APs override */ 95 #define IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE 10 96 97 struct iwl_mvm_scan_timing_params { 98 u32 suspend_time; 99 u32 max_out_time; 100 }; 101 102 static struct iwl_mvm_scan_timing_params scan_timing[] = { 103 [IWL_SCAN_TYPE_UNASSOC] = { 104 .suspend_time = 0, 105 .max_out_time = 0, 106 }, 107 [IWL_SCAN_TYPE_WILD] = { 108 .suspend_time = 30, 109 .max_out_time = 120, 110 }, 111 [IWL_SCAN_TYPE_MILD] = { 112 .suspend_time = 120, 113 .max_out_time = 120, 114 }, 115 [IWL_SCAN_TYPE_FRAGMENTED] = { 116 .suspend_time = 95, 117 .max_out_time = 44, 118 }, 119 [IWL_SCAN_TYPE_FAST_BALANCE] = { 120 .suspend_time = 30, 121 .max_out_time = 37, 122 }, 123 }; 124 125 struct iwl_mvm_scan_params { 126 /* For CDB this is low band scan type, for non-CDB - type. */ 127 enum iwl_mvm_scan_type type; 128 enum iwl_mvm_scan_type hb_type; 129 u32 n_channels; 130 u16 delay; 131 int n_ssids; 132 struct cfg80211_ssid *ssids; 133 struct ieee80211_channel **channels; 134 u32 flags; 135 u8 *mac_addr; 136 u8 *mac_addr_mask; 137 bool no_cck; 138 bool pass_all; 139 int n_match_sets; 140 struct iwl_scan_probe_req preq; 141 struct cfg80211_match_set *match_sets; 142 int n_scan_plans; 143 struct cfg80211_sched_scan_plan *scan_plans; 144 u32 measurement_dwell; 145 }; 146 147 static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) 148 { 149 struct iwl_scan_req_umac *cmd = mvm->scan_cmd; 150 151 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) 152 return (void *)&cmd->v8.data; 153 154 if (iwl_mvm_is_adaptive_dwell_supported(mvm)) 155 return (void *)&cmd->v7.data; 156 157 if (iwl_mvm_cdb_scan_api(mvm)) 158 return (void *)&cmd->v6.data; 159 160 return (void *)&cmd->v1.data; 161 } 162 163 static inline struct iwl_scan_umac_chan_param * 164 iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) 165 { 166 struct iwl_scan_req_umac *cmd = mvm->scan_cmd; 167 168 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) 169 return &cmd->v8.channel; 170 171 if (iwl_mvm_is_adaptive_dwell_supported(mvm)) 172 return &cmd->v7.channel; 173 174 if (iwl_mvm_cdb_scan_api(mvm)) 175 return &cmd->v6.channel; 176 177 return &cmd->v1.channel; 178 } 179 180 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) 181 { 182 if (mvm->scan_rx_ant != ANT_NONE) 183 return mvm->scan_rx_ant; 184 return iwl_mvm_get_valid_rx_ant(mvm); 185 } 186 187 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm) 188 { 189 u16 rx_chain; 190 u8 rx_ant; 191 192 rx_ant = iwl_mvm_scan_rx_ant(mvm); 193 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS; 194 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS; 195 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS; 196 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS; 197 return cpu_to_le16(rx_chain); 198 } 199 200 static inline __le32 201 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, 202 bool no_cck) 203 { 204 u32 tx_ant; 205 206 iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx); 207 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; 208 209 if (band == NL80211_BAND_2GHZ && !no_cck) 210 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK | 211 tx_ant); 212 else 213 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); 214 } 215 216 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, 217 struct ieee80211_vif *vif) 218 { 219 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 220 int *global_cnt = data; 221 222 if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && 223 mvmvif->phy_ctxt->id < NUM_PHY_CTX) 224 *global_cnt += 1; 225 } 226 227 static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) 228 { 229 return mvm->tcm.result.global_load; 230 } 231 232 static enum iwl_mvm_traffic_load 233 iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) 234 { 235 return mvm->tcm.result.band_load[band]; 236 } 237 238 struct iwl_is_dcm_with_go_iterator_data { 239 struct ieee80211_vif *current_vif; 240 bool is_dcm_with_p2p_go; 241 }; 242 243 static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, 244 struct ieee80211_vif *vif) 245 { 246 struct iwl_is_dcm_with_go_iterator_data *data = _data; 247 struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); 248 struct iwl_mvm_vif *curr_mvmvif = 249 iwl_mvm_vif_from_mac80211(data->current_vif); 250 251 /* exclude the given vif */ 252 if (vif == data->current_vif) 253 return; 254 255 if (vif->type == NL80211_IFTYPE_AP && vif->p2p && 256 other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && 257 other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) 258 data->is_dcm_with_p2p_go = true; 259 } 260 261 static enum 262 iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, 263 struct ieee80211_vif *vif, 264 enum iwl_mvm_traffic_load load, 265 bool low_latency) 266 { 267 int global_cnt = 0; 268 269 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 270 IEEE80211_IFACE_ITER_NORMAL, 271 iwl_mvm_scan_condition_iterator, 272 &global_cnt); 273 if (!global_cnt) 274 return IWL_SCAN_TYPE_UNASSOC; 275 276 if (fw_has_api(&mvm->fw->ucode_capa, 277 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { 278 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && 279 (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) 280 return IWL_SCAN_TYPE_FRAGMENTED; 281 282 /* in case of DCM with GO where BSS DTIM interval < 220msec 283 * set all scan requests as fast-balance scan 284 * */ 285 if (vif && vif->type == NL80211_IFTYPE_STATION && 286 vif->bss_conf.dtim_period < 220) { 287 struct iwl_is_dcm_with_go_iterator_data data = { 288 .current_vif = vif, 289 .is_dcm_with_p2p_go = false, 290 }; 291 292 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 293 IEEE80211_IFACE_ITER_NORMAL, 294 iwl_mvm_is_dcm_with_go_iterator, 295 &data); 296 if (data.is_dcm_with_p2p_go) 297 return IWL_SCAN_TYPE_FAST_BALANCE; 298 } 299 } 300 301 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) 302 return IWL_SCAN_TYPE_MILD; 303 304 return IWL_SCAN_TYPE_WILD; 305 } 306 307 static enum 308 iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, 309 struct ieee80211_vif *vif) 310 { 311 enum iwl_mvm_traffic_load load; 312 bool low_latency; 313 314 load = iwl_mvm_get_traffic_load(mvm); 315 low_latency = iwl_mvm_low_latency(mvm); 316 317 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); 318 } 319 320 static enum 321 iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, 322 struct ieee80211_vif *vif, 323 enum nl80211_band band) 324 { 325 enum iwl_mvm_traffic_load load; 326 bool low_latency; 327 328 load = iwl_mvm_get_traffic_load_band(mvm, band); 329 low_latency = iwl_mvm_low_latency_band(mvm, band); 330 331 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency); 332 } 333 334 static int 335 iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, 336 struct cfg80211_scan_request *req, 337 struct iwl_mvm_scan_params *params) 338 { 339 u32 duration = scan_timing[params->type].max_out_time; 340 341 if (!req->duration) 342 return 0; 343 344 if (iwl_mvm_is_cdb_supported(mvm)) { 345 u32 hb_time = scan_timing[params->hb_type].max_out_time; 346 347 duration = min_t(u32, duration, hb_time); 348 } 349 350 if (req->duration_mandatory && req->duration > duration) { 351 IWL_DEBUG_SCAN(mvm, 352 "Measurement scan - too long dwell %hu (max out time %u)\n", 353 req->duration, 354 duration); 355 return -EOPNOTSUPP; 356 } 357 358 return min_t(u32, (u32)req->duration, duration); 359 } 360 361 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) 362 { 363 /* require rrm scan whenever the fw supports it */ 364 return fw_has_capa(&mvm->fw->ucode_capa, 365 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); 366 } 367 368 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) 369 { 370 int max_probe_len; 371 372 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE; 373 374 /* we create the 802.11 header and SSID element */ 375 max_probe_len -= 24 + 2; 376 377 /* DS parameter set element is added on 2.4GHZ band if required */ 378 if (iwl_mvm_rrm_scan_needed(mvm)) 379 max_probe_len -= 3; 380 381 return max_probe_len; 382 } 383 384 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) 385 { 386 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm); 387 388 /* TODO: [BUG] This function should return the maximum allowed size of 389 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs 390 * in the same command. So the correct implementation of this function 391 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan 392 * command has only 512 bytes and it would leave us with about 240 393 * bytes for scan IEs, which is clearly not enough. So meanwhile 394 * we will report an incorrect value. This may result in a failure to 395 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac 396 * functions with -ENOBUFS, if a large enough probe will be provided. 397 */ 398 return max_ie_len; 399 } 400 401 void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, 402 struct iwl_rx_cmd_buffer *rxb) 403 { 404 struct iwl_rx_packet *pkt = rxb_addr(rxb); 405 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; 406 407 IWL_DEBUG_SCAN(mvm, 408 "Scan offload iteration complete: status=0x%x scanned channels=%d\n", 409 notif->status, notif->scanned_channels); 410 411 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { 412 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); 413 ieee80211_sched_scan_results(mvm->hw); 414 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; 415 } 416 } 417 418 void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, 419 struct iwl_rx_cmd_buffer *rxb) 420 { 421 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); 422 ieee80211_sched_scan_results(mvm->hw); 423 } 424 425 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) 426 { 427 switch (status) { 428 case IWL_SCAN_EBS_SUCCESS: 429 return "successful"; 430 case IWL_SCAN_EBS_INACTIVE: 431 return "inactive"; 432 case IWL_SCAN_EBS_FAILED: 433 case IWL_SCAN_EBS_CHAN_NOT_FOUND: 434 default: 435 return "failed"; 436 } 437 } 438 439 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, 440 struct iwl_rx_cmd_buffer *rxb) 441 { 442 struct iwl_rx_packet *pkt = rxb_addr(rxb); 443 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; 444 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); 445 446 /* If this happens, the firmware has mistakenly sent an LMAC 447 * notification during UMAC scans -- warn and ignore it. 448 */ 449 if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa, 450 IWL_UCODE_TLV_CAPA_UMAC_SCAN))) 451 return; 452 453 /* scan status must be locked for proper checking */ 454 lockdep_assert_held(&mvm->mutex); 455 456 /* We first check if we were stopping a scan, in which case we 457 * just clear the stopping flag. Then we check if it was a 458 * firmware initiated stop, in which case we need to inform 459 * mac80211. 460 * Note that we can have a stopping and a running scan 461 * simultaneously, but we can't have two different types of 462 * scans stopping or running at the same time (since LMAC 463 * doesn't support it). 464 */ 465 466 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { 467 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); 468 469 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", 470 aborted ? "aborted" : "completed", 471 iwl_mvm_ebs_status_str(scan_notif->ebs_status)); 472 IWL_DEBUG_SCAN(mvm, 473 "Last line %d, Last iteration %d, Time after last iteration %d\n", 474 scan_notif->last_schedule_line, 475 scan_notif->last_schedule_iteration, 476 __le32_to_cpu(scan_notif->time_after_last_iter)); 477 478 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; 479 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { 480 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", 481 aborted ? "aborted" : "completed", 482 iwl_mvm_ebs_status_str(scan_notif->ebs_status)); 483 484 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; 485 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { 486 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); 487 488 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", 489 aborted ? "aborted" : "completed", 490 iwl_mvm_ebs_status_str(scan_notif->ebs_status)); 491 IWL_DEBUG_SCAN(mvm, 492 "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", 493 scan_notif->last_schedule_line, 494 scan_notif->last_schedule_iteration, 495 __le32_to_cpu(scan_notif->time_after_last_iter)); 496 497 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; 498 ieee80211_sched_scan_stopped(mvm->hw); 499 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 500 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { 501 struct cfg80211_scan_info info = { 502 .aborted = aborted, 503 }; 504 505 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", 506 aborted ? "aborted" : "completed", 507 iwl_mvm_ebs_status_str(scan_notif->ebs_status)); 508 509 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; 510 ieee80211_scan_completed(mvm->hw, &info); 511 cancel_delayed_work(&mvm->scan_timeout_dwork); 512 iwl_mvm_resume_tcm(mvm); 513 } else { 514 IWL_ERR(mvm, 515 "got scan complete notification but no scan is running\n"); 516 } 517 518 mvm->last_ebs_successful = 519 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || 520 scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; 521 } 522 523 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) 524 { 525 int i; 526 527 for (i = 0; i < PROBE_OPTION_MAX; i++) { 528 if (!ssid_list[i].len) 529 break; 530 if (ssid_list[i].len == ssid_len && 531 !memcmp(ssid_list->ssid, ssid, ssid_len)) 532 return i; 533 } 534 return -1; 535 } 536 537 /* We insert the SSIDs in an inverted order, because the FW will 538 * invert it back. 539 */ 540 static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, 541 struct iwl_ssid_ie *ssids, 542 u32 *ssid_bitmap) 543 { 544 int i, j; 545 int index; 546 u32 tmp_bitmap = 0; 547 548 /* 549 * copy SSIDs from match list. 550 * iwl_config_sched_scan_profiles() uses the order of these ssids to 551 * config match list. 552 */ 553 for (i = 0, j = params->n_match_sets - 1; 554 j >= 0 && i < PROBE_OPTION_MAX; 555 i++, j--) { 556 /* skip empty SSID matchsets */ 557 if (!params->match_sets[j].ssid.ssid_len) 558 continue; 559 ssids[i].id = WLAN_EID_SSID; 560 ssids[i].len = params->match_sets[j].ssid.ssid_len; 561 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, 562 ssids[i].len); 563 } 564 565 /* add SSIDs from scan SSID list */ 566 for (j = params->n_ssids - 1; 567 j >= 0 && i < PROBE_OPTION_MAX; 568 i++, j--) { 569 index = iwl_ssid_exist(params->ssids[j].ssid, 570 params->ssids[j].ssid_len, 571 ssids); 572 if (index < 0) { 573 ssids[i].id = WLAN_EID_SSID; 574 ssids[i].len = params->ssids[j].ssid_len; 575 memcpy(ssids[i].ssid, params->ssids[j].ssid, 576 ssids[i].len); 577 tmp_bitmap |= BIT(i); 578 } else { 579 tmp_bitmap |= BIT(index); 580 } 581 } 582 if (ssid_bitmap) 583 *ssid_bitmap = tmp_bitmap; 584 } 585 586 static int 587 iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, 588 struct cfg80211_sched_scan_request *req) 589 { 590 struct iwl_scan_offload_profile *profile; 591 struct iwl_scan_offload_profile_cfg *profile_cfg; 592 struct iwl_scan_offload_blacklist *blacklist; 593 struct iwl_host_cmd cmd = { 594 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, 595 .len[1] = sizeof(*profile_cfg), 596 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 597 .dataflags[1] = IWL_HCMD_DFL_NOCOPY, 598 }; 599 int blacklist_len; 600 int i; 601 int ret; 602 603 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) 604 return -EIO; 605 606 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) 607 blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; 608 else 609 blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; 610 611 blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL); 612 if (!blacklist) 613 return -ENOMEM; 614 615 profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL); 616 if (!profile_cfg) { 617 ret = -ENOMEM; 618 goto free_blacklist; 619 } 620 621 cmd.data[0] = blacklist; 622 cmd.len[0] = sizeof(*blacklist) * blacklist_len; 623 cmd.data[1] = profile_cfg; 624 625 /* No blacklist configuration */ 626 627 profile_cfg->num_profiles = req->n_match_sets; 628 profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN; 629 profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN; 630 profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN; 631 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) 632 profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; 633 634 for (i = 0; i < req->n_match_sets; i++) { 635 profile = &profile_cfg->profiles[i]; 636 profile->ssid_index = i; 637 /* Support any cipher and auth algorithm */ 638 profile->unicast_cipher = 0xff; 639 profile->auth_alg = 0xff; 640 profile->network_type = IWL_NETWORK_TYPE_ANY; 641 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; 642 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; 643 } 644 645 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n"); 646 647 ret = iwl_mvm_send_cmd(mvm, &cmd); 648 kfree(profile_cfg); 649 free_blacklist: 650 kfree(blacklist); 651 652 return ret; 653 } 654 655 static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, 656 struct cfg80211_sched_scan_request *req) 657 { 658 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { 659 IWL_DEBUG_SCAN(mvm, 660 "Sending scheduled scan with filtering, n_match_sets %d\n", 661 req->n_match_sets); 662 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 663 return false; 664 } 665 666 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); 667 668 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; 669 return true; 670 } 671 672 static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) 673 { 674 int ret; 675 struct iwl_host_cmd cmd = { 676 .id = SCAN_OFFLOAD_ABORT_CMD, 677 }; 678 u32 status = CAN_ABORT_STATUS; 679 680 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); 681 if (ret) 682 return ret; 683 684 if (status != CAN_ABORT_STATUS) { 685 /* 686 * The scan abort will return 1 for success or 687 * 2 for "failure". A failure condition can be 688 * due to simply not being in an active scan which 689 * can occur if we send the scan abort before the 690 * microcode has notified us that a scan is completed. 691 */ 692 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status); 693 ret = -ENOENT; 694 } 695 696 return ret; 697 } 698 699 static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, 700 struct iwl_scan_req_tx_cmd *tx_cmd, 701 bool no_cck) 702 { 703 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 704 TX_CMD_FLG_BT_DIS); 705 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, 706 NL80211_BAND_2GHZ, 707 no_cck); 708 tx_cmd[0].sta_id = mvm->aux_sta.sta_id; 709 710 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 711 TX_CMD_FLG_BT_DIS); 712 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, 713 NL80211_BAND_5GHZ, 714 no_cck); 715 tx_cmd[1].sta_id = mvm->aux_sta.sta_id; 716 } 717 718 static void 719 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm, 720 struct ieee80211_channel **channels, 721 int n_channels, u32 ssid_bitmap, 722 struct iwl_scan_req_lmac *cmd) 723 { 724 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data; 725 int i; 726 727 for (i = 0; i < n_channels; i++) { 728 channel_cfg[i].channel_num = 729 cpu_to_le16(channels[i]->hw_value); 730 channel_cfg[i].iter_count = cpu_to_le16(1); 731 channel_cfg[i].iter_interval = 0; 732 channel_cfg[i].flags = 733 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL | 734 ssid_bitmap); 735 } 736 } 737 738 static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies, 739 size_t len, u8 *const pos) 740 { 741 static const u8 before_ds_params[] = { 742 WLAN_EID_SSID, 743 WLAN_EID_SUPP_RATES, 744 WLAN_EID_REQUEST, 745 WLAN_EID_EXT_SUPP_RATES, 746 }; 747 size_t offs; 748 u8 *newpos = pos; 749 750 if (!iwl_mvm_rrm_scan_needed(mvm)) { 751 memcpy(newpos, ies, len); 752 return newpos + len; 753 } 754 755 offs = ieee80211_ie_split(ies, len, 756 before_ds_params, 757 ARRAY_SIZE(before_ds_params), 758 0); 759 760 memcpy(newpos, ies, offs); 761 newpos += offs; 762 763 /* Add a placeholder for DS Parameter Set element */ 764 *newpos++ = WLAN_EID_DS_PARAMS; 765 *newpos++ = 1; 766 *newpos++ = 0; 767 768 memcpy(newpos, ies + offs, len - offs); 769 newpos += len - offs; 770 771 return newpos; 772 } 773 774 #define WFA_TPC_IE_LEN 9 775 776 static void iwl_mvm_add_tpc_report_ie(u8 *pos) 777 { 778 pos[0] = WLAN_EID_VENDOR_SPECIFIC; 779 pos[1] = WFA_TPC_IE_LEN - 2; 780 pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff; 781 pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff; 782 pos[4] = WLAN_OUI_MICROSOFT & 0xff; 783 pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC; 784 pos[6] = 0; 785 /* pos[7] - tx power will be inserted by the FW */ 786 pos[7] = 0; 787 pos[8] = 0; 788 } 789 790 static void 791 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 792 struct ieee80211_scan_ies *ies, 793 struct iwl_mvm_scan_params *params) 794 { 795 struct ieee80211_mgmt *frame = (void *)params->preq.buf; 796 u8 *pos, *newpos; 797 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? 798 params->mac_addr : NULL; 799 800 /* 801 * Unfortunately, right now the offload scan doesn't support randomising 802 * within the firmware, so until the firmware API is ready we implement 803 * it in the driver. This means that the scan iterations won't really be 804 * random, only when it's restarted, but at least that helps a bit. 805 */ 806 if (mac_addr) 807 get_random_mask_addr(frame->sa, mac_addr, 808 params->mac_addr_mask); 809 else 810 memcpy(frame->sa, vif->addr, ETH_ALEN); 811 812 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 813 eth_broadcast_addr(frame->da); 814 eth_broadcast_addr(frame->bssid); 815 frame->seq_ctrl = 0; 816 817 pos = frame->u.probe_req.variable; 818 *pos++ = WLAN_EID_SSID; 819 *pos++ = 0; 820 821 params->preq.mac_header.offset = 0; 822 params->preq.mac_header.len = cpu_to_le16(24 + 2); 823 824 /* Insert ds parameter set element on 2.4 GHz band */ 825 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm, 826 ies->ies[NL80211_BAND_2GHZ], 827 ies->len[NL80211_BAND_2GHZ], 828 pos); 829 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); 830 params->preq.band_data[0].len = cpu_to_le16(newpos - pos); 831 pos = newpos; 832 833 memcpy(pos, ies->ies[NL80211_BAND_5GHZ], 834 ies->len[NL80211_BAND_5GHZ]); 835 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); 836 params->preq.band_data[1].len = 837 cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); 838 pos += ies->len[NL80211_BAND_5GHZ]; 839 840 memcpy(pos, ies->common_ies, ies->common_ie_len); 841 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); 842 843 if (iwl_mvm_rrm_scan_needed(mvm) && 844 !fw_has_capa(&mvm->fw->ucode_capa, 845 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) { 846 iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len); 847 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len + 848 WFA_TPC_IE_LEN); 849 } else { 850 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); 851 } 852 } 853 854 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, 855 struct iwl_scan_req_lmac *cmd, 856 struct iwl_mvm_scan_params *params) 857 { 858 cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; 859 cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; 860 cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; 861 cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; 862 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); 863 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); 864 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 865 } 866 867 static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, 868 struct ieee80211_scan_ies *ies, 869 int n_channels) 870 { 871 return ((n_ssids <= PROBE_OPTION_MAX) && 872 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) & 873 (ies->common_ie_len + 874 ies->len[NL80211_BAND_2GHZ] + 875 ies->len[NL80211_BAND_5GHZ] <= 876 iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); 877 } 878 879 static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, 880 struct ieee80211_vif *vif) 881 { 882 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; 883 bool low_latency; 884 885 if (iwl_mvm_is_cdb_supported(mvm)) 886 low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ); 887 else 888 low_latency = iwl_mvm_low_latency(mvm); 889 890 /* We can only use EBS if: 891 * 1. the feature is supported; 892 * 2. the last EBS was successful; 893 * 3. if only single scan, the single scan EBS API is supported; 894 * 4. it's not a p2p find operation. 895 * 5. we are not in low latency mode, 896 * or if fragmented ebs is supported by the FW 897 */ 898 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && 899 mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && 900 vif->type != NL80211_IFTYPE_P2P_DEVICE && 901 (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm))); 902 } 903 904 static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params) 905 { 906 return params->n_scan_plans == 1 && 907 params->scan_plans[0].iterations == 1; 908 } 909 910 static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type) 911 { 912 return (type == IWL_SCAN_TYPE_FRAGMENTED || 913 type == IWL_SCAN_TYPE_FAST_BALANCE); 914 } 915 916 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, 917 struct iwl_mvm_scan_params *params, 918 struct ieee80211_vif *vif) 919 { 920 int flags = 0; 921 922 if (params->n_ssids == 0) 923 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE; 924 925 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 926 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; 927 928 if (iwl_mvm_is_scan_fragmented(params->type)) 929 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; 930 931 if (iwl_mvm_rrm_scan_needed(mvm) && 932 fw_has_capa(&mvm->fw->ucode_capa, 933 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 934 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED; 935 936 if (params->pass_all) 937 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; 938 else 939 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; 940 941 #ifdef CONFIG_IWLWIFI_DEBUGFS 942 if (mvm->scan_iter_notif_enabled) 943 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; 944 #endif 945 946 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) 947 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; 948 949 if (iwl_mvm_is_regular_scan(params) && 950 vif->type != NL80211_IFTYPE_P2P_DEVICE && 951 !iwl_mvm_is_scan_fragmented(params->type)) 952 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL; 953 954 return flags; 955 } 956 957 static void 958 iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req, 959 struct iwl_scan_probe_req *src_p_req) 960 { 961 int i; 962 963 p_req->mac_header = src_p_req->mac_header; 964 for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++) 965 p_req->band_data[i] = src_p_req->band_data[i]; 966 p_req->common_data = src_p_req->common_data; 967 memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf)); 968 } 969 970 static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 971 struct iwl_mvm_scan_params *params) 972 { 973 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd; 974 struct iwl_scan_probe_req_v1 *preq = 975 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) * 976 mvm->fw->ucode_capa.n_scan_channels); 977 u32 ssid_bitmap = 0; 978 int i; 979 u8 band; 980 981 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) 982 return -EINVAL; 983 984 iwl_mvm_scan_lmac_dwell(mvm, cmd, params); 985 986 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); 987 cmd->iter_num = cpu_to_le32(1); 988 cmd->n_channels = (u8)params->n_channels; 989 990 cmd->delay = cpu_to_le32(params->delay); 991 992 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params, 993 vif)); 994 995 band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band); 996 cmd->flags = cpu_to_le32(band); 997 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP | 998 MAC_FILTER_IN_BEACON); 999 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck); 1000 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap); 1001 1002 /* this API uses bits 1-20 instead of 0-19 */ 1003 ssid_bitmap <<= 1; 1004 1005 for (i = 0; i < params->n_scan_plans; i++) { 1006 struct cfg80211_sched_scan_plan *scan_plan = 1007 ¶ms->scan_plans[i]; 1008 1009 cmd->schedule[i].delay = 1010 cpu_to_le16(scan_plan->interval); 1011 cmd->schedule[i].iterations = scan_plan->iterations; 1012 cmd->schedule[i].full_scan_mul = 1; 1013 } 1014 1015 /* 1016 * If the number of iterations of the last scan plan is set to 1017 * zero, it should run infinitely. However, this is not always the case. 1018 * For example, when regular scan is requested the driver sets one scan 1019 * plan with one iteration. 1020 */ 1021 if (!cmd->schedule[i - 1].iterations) 1022 cmd->schedule[i - 1].iterations = 0xff; 1023 1024 if (iwl_mvm_scan_use_ebs(mvm, vif)) { 1025 cmd->channel_opt[0].flags = 1026 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | 1027 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1028 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); 1029 cmd->channel_opt[0].non_ebs_ratio = 1030 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO); 1031 cmd->channel_opt[1].flags = 1032 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | 1033 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1034 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD); 1035 cmd->channel_opt[1].non_ebs_ratio = 1036 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO); 1037 } 1038 1039 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels, 1040 params->n_channels, ssid_bitmap, cmd); 1041 1042 iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq); 1043 1044 return 0; 1045 } 1046 1047 static int rate_to_scan_rate_flag(unsigned int rate) 1048 { 1049 static const int rate_to_scan_rate[IWL_RATE_COUNT] = { 1050 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M, 1051 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M, 1052 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M, 1053 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M, 1054 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M, 1055 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M, 1056 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M, 1057 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M, 1058 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M, 1059 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M, 1060 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M, 1061 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M, 1062 }; 1063 1064 return rate_to_scan_rate[rate]; 1065 } 1066 1067 static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm) 1068 { 1069 struct ieee80211_supported_band *band; 1070 unsigned int rates = 0; 1071 int i; 1072 1073 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 1074 for (i = 0; i < band->n_bitrates; i++) 1075 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); 1076 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 1077 for (i = 0; i < band->n_bitrates; i++) 1078 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value); 1079 1080 /* Set both basic rates and supported rates */ 1081 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates); 1082 1083 return cpu_to_le32(rates); 1084 } 1085 1086 static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm, 1087 struct iwl_scan_dwell *dwell) 1088 { 1089 dwell->active = IWL_SCAN_DWELL_ACTIVE; 1090 dwell->passive = IWL_SCAN_DWELL_PASSIVE; 1091 dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED; 1092 dwell->extended = IWL_SCAN_DWELL_EXTENDED; 1093 } 1094 1095 static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels, 1096 u32 max_channels) 1097 { 1098 struct ieee80211_supported_band *band; 1099 int i, j = 0; 1100 1101 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 1102 for (i = 0; i < band->n_channels && j < max_channels; i++, j++) 1103 channels[j] = band->channels[i].hw_value; 1104 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 1105 for (i = 0; i < band->n_channels && j < max_channels; i++, j++) 1106 channels[j] = band->channels[i].hw_value; 1107 } 1108 1109 static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, 1110 u32 flags, u8 channel_flags, 1111 u32 max_channels) 1112 { 1113 enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL); 1114 struct iwl_scan_config_v1 *cfg = config; 1115 1116 cfg->flags = cpu_to_le32(flags); 1117 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); 1118 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); 1119 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); 1120 cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time); 1121 cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time); 1122 1123 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); 1124 1125 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); 1126 1127 cfg->bcast_sta_id = mvm->aux_sta.sta_id; 1128 cfg->channel_flags = channel_flags; 1129 1130 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); 1131 } 1132 1133 static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, 1134 u32 flags, u8 channel_flags, 1135 u32 max_channels) 1136 { 1137 struct iwl_scan_config_v2 *cfg = config; 1138 1139 cfg->flags = cpu_to_le32(flags); 1140 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); 1141 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); 1142 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); 1143 1144 if (iwl_mvm_is_cdb_supported(mvm)) { 1145 enum iwl_mvm_scan_type lb_type, hb_type; 1146 1147 lb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1148 NL80211_BAND_2GHZ); 1149 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1150 NL80211_BAND_5GHZ); 1151 1152 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = 1153 cpu_to_le32(scan_timing[lb_type].max_out_time); 1154 cfg->suspend_time[SCAN_LB_LMAC_IDX] = 1155 cpu_to_le32(scan_timing[lb_type].suspend_time); 1156 1157 cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] = 1158 cpu_to_le32(scan_timing[hb_type].max_out_time); 1159 cfg->suspend_time[SCAN_HB_LMAC_IDX] = 1160 cpu_to_le32(scan_timing[hb_type].suspend_time); 1161 } else { 1162 enum iwl_mvm_scan_type type = 1163 iwl_mvm_get_scan_type(mvm, NULL); 1164 1165 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = 1166 cpu_to_le32(scan_timing[type].max_out_time); 1167 cfg->suspend_time[SCAN_LB_LMAC_IDX] = 1168 cpu_to_le32(scan_timing[type].suspend_time); 1169 } 1170 1171 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); 1172 1173 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); 1174 1175 cfg->bcast_sta_id = mvm->aux_sta.sta_id; 1176 cfg->channel_flags = channel_flags; 1177 1178 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels); 1179 } 1180 1181 static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) 1182 { 1183 void *cfg; 1184 int ret, cmd_size; 1185 struct iwl_host_cmd cmd = { 1186 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), 1187 }; 1188 enum iwl_mvm_scan_type type; 1189 enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; 1190 int num_channels = 1191 mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + 1192 mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; 1193 u32 flags; 1194 u8 channel_flags; 1195 1196 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) 1197 num_channels = mvm->fw->ucode_capa.n_scan_channels; 1198 1199 if (iwl_mvm_is_cdb_supported(mvm)) { 1200 type = iwl_mvm_get_scan_type_band(mvm, NULL, 1201 NL80211_BAND_2GHZ); 1202 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL, 1203 NL80211_BAND_5GHZ); 1204 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) 1205 return 0; 1206 } else { 1207 type = iwl_mvm_get_scan_type(mvm, NULL); 1208 if (type == mvm->scan_type) 1209 return 0; 1210 } 1211 1212 if (iwl_mvm_cdb_scan_api(mvm)) 1213 cmd_size = sizeof(struct iwl_scan_config_v2); 1214 else 1215 cmd_size = sizeof(struct iwl_scan_config_v1); 1216 cmd_size += num_channels; 1217 1218 cfg = kzalloc(cmd_size, GFP_KERNEL); 1219 if (!cfg) 1220 return -ENOMEM; 1221 1222 flags = SCAN_CONFIG_FLAG_ACTIVATE | 1223 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | 1224 SCAN_CONFIG_FLAG_SET_TX_CHAINS | 1225 SCAN_CONFIG_FLAG_SET_RX_CHAINS | 1226 SCAN_CONFIG_FLAG_SET_AUX_STA_ID | 1227 SCAN_CONFIG_FLAG_SET_ALL_TIMES | 1228 SCAN_CONFIG_FLAG_SET_LEGACY_RATES | 1229 SCAN_CONFIG_FLAG_SET_MAC_ADDR | 1230 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS | 1231 SCAN_CONFIG_N_CHANNELS(num_channels) | 1232 (iwl_mvm_is_scan_fragmented(type) ? 1233 SCAN_CONFIG_FLAG_SET_FRAGMENTED : 1234 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED); 1235 1236 channel_flags = IWL_CHANNEL_FLAG_EBS | 1237 IWL_CHANNEL_FLAG_ACCURATE_EBS | 1238 IWL_CHANNEL_FLAG_EBS_ADD | 1239 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; 1240 1241 /* 1242 * Check for fragmented scan on LMAC2 - high band. 1243 * LMAC1 - low band is checked above. 1244 */ 1245 if (iwl_mvm_cdb_scan_api(mvm)) { 1246 if (iwl_mvm_is_cdb_supported(mvm)) 1247 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ? 1248 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : 1249 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; 1250 iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags, 1251 num_channels); 1252 } else { 1253 iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags, 1254 num_channels); 1255 } 1256 1257 cmd.data[0] = cfg; 1258 cmd.len[0] = cmd_size; 1259 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; 1260 1261 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); 1262 1263 ret = iwl_mvm_send_cmd(mvm, &cmd); 1264 if (!ret) { 1265 mvm->scan_type = type; 1266 mvm->hb_scan_type = hb_type; 1267 } 1268 1269 kfree(cfg); 1270 return ret; 1271 } 1272 1273 int iwl_mvm_config_scan(struct iwl_mvm *mvm) 1274 { 1275 struct iwl_scan_config cfg; 1276 struct iwl_host_cmd cmd = { 1277 .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), 1278 .len[0] = sizeof(cfg), 1279 .data[0] = &cfg, 1280 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1281 }; 1282 1283 if (!iwl_mvm_is_reduced_config_scan_supported(mvm)) 1284 return iwl_mvm_legacy_config_scan(mvm); 1285 1286 memset(&cfg, 0, sizeof(cfg)); 1287 1288 cfg.bcast_sta_id = mvm->aux_sta.sta_id; 1289 cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); 1290 cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); 1291 1292 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); 1293 1294 return iwl_mvm_send_cmd(mvm, &cmd); 1295 } 1296 1297 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) 1298 { 1299 int i; 1300 1301 for (i = 0; i < mvm->max_scans; i++) 1302 if (mvm->scan_uid_status[i] == status) 1303 return i; 1304 1305 return -ENOENT; 1306 } 1307 1308 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, 1309 struct iwl_scan_req_umac *cmd, 1310 struct iwl_mvm_scan_params *params) 1311 { 1312 struct iwl_mvm_scan_timing_params *timing, *hb_timing; 1313 u8 active_dwell, passive_dwell; 1314 1315 timing = &scan_timing[params->type]; 1316 active_dwell = params->measurement_dwell ? 1317 params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; 1318 passive_dwell = params->measurement_dwell ? 1319 params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; 1320 1321 if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { 1322 cmd->v7.adwell_default_n_aps_social = 1323 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; 1324 cmd->v7.adwell_default_n_aps = 1325 IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; 1326 1327 if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm)) 1328 cmd->v9.adwell_default_hb_n_aps = 1329 IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; 1330 1331 /* if custom max budget was configured with debugfs */ 1332 if (IWL_MVM_ADWELL_MAX_BUDGET) 1333 cmd->v7.adwell_max_budget = 1334 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); 1335 else if (params->ssids && params->ssids[0].ssid_len) 1336 cmd->v7.adwell_max_budget = 1337 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); 1338 else 1339 cmd->v7.adwell_max_budget = 1340 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); 1341 1342 cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 1343 cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = 1344 cpu_to_le32(timing->max_out_time); 1345 cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = 1346 cpu_to_le32(timing->suspend_time); 1347 1348 if (iwl_mvm_is_cdb_supported(mvm)) { 1349 hb_timing = &scan_timing[params->hb_type]; 1350 1351 cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = 1352 cpu_to_le32(hb_timing->max_out_time); 1353 cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = 1354 cpu_to_le32(hb_timing->suspend_time); 1355 } 1356 1357 if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { 1358 cmd->v7.active_dwell = active_dwell; 1359 cmd->v7.passive_dwell = passive_dwell; 1360 cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; 1361 } else { 1362 cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; 1363 cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; 1364 if (iwl_mvm_is_cdb_supported(mvm)) { 1365 cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] = 1366 active_dwell; 1367 cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] = 1368 passive_dwell; 1369 } 1370 } 1371 } else { 1372 cmd->v1.extended_dwell = params->measurement_dwell ? 1373 params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED; 1374 cmd->v1.active_dwell = active_dwell; 1375 cmd->v1.passive_dwell = passive_dwell; 1376 cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; 1377 1378 if (iwl_mvm_is_cdb_supported(mvm)) { 1379 hb_timing = &scan_timing[params->hb_type]; 1380 1381 cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = 1382 cpu_to_le32(hb_timing->max_out_time); 1383 cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = 1384 cpu_to_le32(hb_timing->suspend_time); 1385 } 1386 1387 if (iwl_mvm_cdb_scan_api(mvm)) { 1388 cmd->v6.scan_priority = 1389 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 1390 cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = 1391 cpu_to_le32(timing->max_out_time); 1392 cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = 1393 cpu_to_le32(timing->suspend_time); 1394 } else { 1395 cmd->v1.scan_priority = 1396 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 1397 cmd->v1.max_out_time = 1398 cpu_to_le32(timing->max_out_time); 1399 cmd->v1.suspend_time = 1400 cpu_to_le32(timing->suspend_time); 1401 } 1402 } 1403 1404 if (iwl_mvm_is_regular_scan(params)) 1405 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 1406 else 1407 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); 1408 } 1409 1410 static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) 1411 { 1412 return iwl_mvm_is_regular_scan(params) ? 1413 IWL_SCAN_PRIORITY_EXT_6 : 1414 IWL_SCAN_PRIORITY_EXT_2; 1415 } 1416 1417 static void 1418 iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm, 1419 struct iwl_scan_general_params_v10 *general_params, 1420 struct iwl_mvm_scan_params *params) 1421 { 1422 struct iwl_mvm_scan_timing_params *timing, *hb_timing; 1423 u8 active_dwell, passive_dwell; 1424 1425 timing = &scan_timing[params->type]; 1426 active_dwell = params->measurement_dwell ? 1427 params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE; 1428 passive_dwell = params->measurement_dwell ? 1429 params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE; 1430 1431 general_params->adwell_default_social_chn = 1432 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; 1433 general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; 1434 general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; 1435 1436 /* if custom max budget was configured with debugfs */ 1437 if (IWL_MVM_ADWELL_MAX_BUDGET) 1438 general_params->adwell_max_budget = 1439 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); 1440 else if (params->ssids && params->ssids[0].ssid_len) 1441 general_params->adwell_max_budget = 1442 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); 1443 else 1444 general_params->adwell_max_budget = 1445 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); 1446 1447 general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); 1448 general_params->max_out_of_time[SCAN_LB_LMAC_IDX] = 1449 cpu_to_le32(timing->max_out_time); 1450 general_params->suspend_time[SCAN_LB_LMAC_IDX] = 1451 cpu_to_le32(timing->suspend_time); 1452 1453 hb_timing = &scan_timing[params->hb_type]; 1454 1455 general_params->max_out_of_time[SCAN_HB_LMAC_IDX] = 1456 cpu_to_le32(hb_timing->max_out_time); 1457 general_params->suspend_time[SCAN_HB_LMAC_IDX] = 1458 cpu_to_le32(hb_timing->suspend_time); 1459 1460 general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell; 1461 general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell; 1462 general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell; 1463 general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell; 1464 } 1465 1466 struct iwl_mvm_scan_channel_segment { 1467 u8 start_idx; 1468 u8 end_idx; 1469 u8 first_channel_id; 1470 u8 last_channel_id; 1471 u8 channel_spacing_shift; 1472 u8 band; 1473 }; 1474 1475 static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = { 1476 { 1477 .start_idx = 0, 1478 .end_idx = 13, 1479 .first_channel_id = 1, 1480 .last_channel_id = 14, 1481 .channel_spacing_shift = 0, 1482 .band = PHY_BAND_24 1483 }, 1484 { 1485 .start_idx = 14, 1486 .end_idx = 41, 1487 .first_channel_id = 36, 1488 .last_channel_id = 144, 1489 .channel_spacing_shift = 2, 1490 .band = PHY_BAND_5 1491 }, 1492 { 1493 .start_idx = 42, 1494 .end_idx = 50, 1495 .first_channel_id = 149, 1496 .last_channel_id = 181, 1497 .channel_spacing_shift = 2, 1498 .band = PHY_BAND_5 1499 }, 1500 }; 1501 1502 static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band) 1503 { 1504 int i, index; 1505 1506 if (!channel_id) 1507 return -EINVAL; 1508 1509 for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) { 1510 const struct iwl_mvm_scan_channel_segment *ch_segment = 1511 &scan_channel_segments[i]; 1512 u32 ch_offset; 1513 1514 if (ch_segment->band != band || 1515 ch_segment->first_channel_id > channel_id || 1516 ch_segment->last_channel_id < channel_id) 1517 continue; 1518 1519 ch_offset = (channel_id - ch_segment->first_channel_id) >> 1520 ch_segment->channel_spacing_shift; 1521 1522 index = scan_channel_segments[i].start_idx + ch_offset; 1523 if (index < IWL_SCAN_NUM_CHANNELS) 1524 return index; 1525 1526 break; 1527 } 1528 1529 return -EINVAL; 1530 } 1531 1532 static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type, 1533 u8 ch_id, u8 band, u8 *ch_bitmap, 1534 size_t bitmap_n_entries) 1535 { 1536 int i; 1537 static const u8 p2p_go_friendly_chs[] = { 1538 36, 40, 44, 48, 149, 153, 157, 161, 165, 1539 }; 1540 1541 if (vif_type != NL80211_IFTYPE_P2P_DEVICE) 1542 return; 1543 1544 for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) { 1545 if (p2p_go_friendly_chs[i] == ch_id) { 1546 int ch_idx, bitmap_idx; 1547 1548 ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band); 1549 if (ch_idx < 0) 1550 return; 1551 1552 bitmap_idx = ch_idx / 8; 1553 if (bitmap_idx >= bitmap_n_entries) 1554 return; 1555 1556 ch_idx = ch_idx % 8; 1557 ch_bitmap[bitmap_idx] |= BIT(ch_idx); 1558 1559 return; 1560 } 1561 } 1562 } 1563 1564 static void 1565 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm, 1566 struct ieee80211_channel **channels, 1567 int n_channels, u32 flags, 1568 struct iwl_scan_channel_cfg_umac *channel_cfg) 1569 { 1570 int i; 1571 1572 for (i = 0; i < n_channels; i++) { 1573 channel_cfg[i].flags = cpu_to_le32(flags); 1574 channel_cfg[i].v1.channel_num = channels[i]->hw_value; 1575 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { 1576 enum nl80211_band band = channels[i]->band; 1577 1578 channel_cfg[i].v2.band = 1579 iwl_mvm_phy_band_from_nl80211(band); 1580 channel_cfg[i].v2.iter_count = 1; 1581 channel_cfg[i].v2.iter_interval = 0; 1582 } else { 1583 channel_cfg[i].v1.iter_count = 1; 1584 channel_cfg[i].v1.iter_interval = 0; 1585 } 1586 } 1587 } 1588 1589 static void 1590 iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm, 1591 struct ieee80211_channel **channels, 1592 struct iwl_scan_channel_params_v4 *cp, 1593 int n_channels, u32 flags, 1594 enum nl80211_iftype vif_type) 1595 { 1596 u8 *bitmap = cp->adwell_ch_override_bitmap; 1597 size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap); 1598 int i; 1599 1600 for (i = 0; i < n_channels; i++) { 1601 enum nl80211_band band = channels[i]->band; 1602 struct iwl_scan_channel_cfg_umac *cfg = 1603 &cp->channel_config[i]; 1604 1605 cfg->flags = cpu_to_le32(flags); 1606 cfg->v2.channel_num = channels[i]->hw_value; 1607 cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); 1608 cfg->v2.iter_count = 1; 1609 cfg->v2.iter_interval = 0; 1610 1611 iwl_mvm_scan_ch_add_n_aps_override(vif_type, 1612 cfg->v2.channel_num, 1613 cfg->v2.band, bitmap, 1614 bitmap_n_entries); 1615 } 1616 } 1617 1618 static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, 1619 struct iwl_mvm_scan_params *params, 1620 struct ieee80211_vif *vif) 1621 { 1622 u8 flags = 0; 1623 1624 flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER; 1625 1626 if (iwl_mvm_scan_use_ebs(mvm, vif)) 1627 flags |= IWL_SCAN_CHANNEL_FLAG_EBS | 1628 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1629 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; 1630 1631 /* set fragmented ebs for fragmented scan on HB channels */ 1632 if (iwl_mvm_is_scan_fragmented(params->hb_type)) 1633 flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; 1634 1635 return flags; 1636 } 1637 1638 static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, 1639 struct iwl_mvm_scan_params *params, 1640 struct ieee80211_vif *vif, 1641 int type) 1642 { 1643 u16 flags = 0; 1644 1645 if (params->n_ssids == 0) 1646 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; 1647 1648 if (iwl_mvm_is_scan_fragmented(params->type)) 1649 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1; 1650 1651 if (iwl_mvm_is_scan_fragmented(params->hb_type)) 1652 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2; 1653 1654 if (params->pass_all) 1655 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL; 1656 else 1657 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH; 1658 1659 if (!iwl_mvm_is_regular_scan(params)) 1660 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC; 1661 1662 if (params->measurement_dwell || 1663 mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) 1664 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; 1665 1666 if (IWL_MVM_ADWELL_ENABLE) 1667 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL; 1668 1669 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) 1670 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE; 1671 1672 return flags; 1673 } 1674 1675 static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, 1676 struct iwl_mvm_scan_params *params, 1677 struct ieee80211_vif *vif) 1678 { 1679 u16 flags = 0; 1680 1681 if (params->n_ssids == 0) 1682 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE; 1683 1684 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) 1685 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; 1686 1687 if (iwl_mvm_is_scan_fragmented(params->type)) 1688 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; 1689 1690 if (iwl_mvm_is_cdb_supported(mvm) && 1691 iwl_mvm_is_scan_fragmented(params->hb_type)) 1692 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; 1693 1694 if (iwl_mvm_rrm_scan_needed(mvm) && 1695 fw_has_capa(&mvm->fw->ucode_capa, 1696 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 1697 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED; 1698 1699 if (params->pass_all) 1700 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL; 1701 else 1702 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH; 1703 1704 if (!iwl_mvm_is_regular_scan(params)) 1705 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; 1706 1707 if (params->measurement_dwell) 1708 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; 1709 1710 #ifdef CONFIG_IWLWIFI_DEBUGFS 1711 if (mvm->scan_iter_notif_enabled) 1712 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; 1713 #endif 1714 1715 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) 1716 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; 1717 1718 if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE) 1719 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL; 1720 1721 /* 1722 * Extended dwell is relevant only for low band to start with, as it is 1723 * being used for social channles only (1, 6, 11), so we can check 1724 * only scan type on low band also for CDB. 1725 */ 1726 if (iwl_mvm_is_regular_scan(params) && 1727 vif->type != NL80211_IFTYPE_P2P_DEVICE && 1728 !iwl_mvm_is_scan_fragmented(params->type) && 1729 !iwl_mvm_is_adaptive_dwell_supported(mvm) && 1730 !iwl_mvm_is_oce_supported(mvm)) 1731 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL; 1732 1733 if (iwl_mvm_is_oce_supported(mvm)) { 1734 if ((params->flags & 1735 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE)) 1736 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE; 1737 /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and 1738 * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares 1739 * the same bit, we need to make sure that we use this bit here 1740 * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be 1741 * used. */ 1742 if ((params->flags & 1743 NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) && 1744 !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm))) 1745 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP; 1746 if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)) 1747 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME; 1748 } 1749 1750 return flags; 1751 } 1752 1753 static int 1754 iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params, 1755 struct iwl_scan_umac_schedule *schedule, 1756 __le16 *delay) 1757 { 1758 int i; 1759 if (WARN_ON(!params->n_scan_plans || 1760 params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) 1761 return -EINVAL; 1762 1763 for (i = 0; i < params->n_scan_plans; i++) { 1764 struct cfg80211_sched_scan_plan *scan_plan = 1765 ¶ms->scan_plans[i]; 1766 1767 schedule[i].iter_count = scan_plan->iterations; 1768 schedule[i].interval = 1769 cpu_to_le16(scan_plan->interval); 1770 } 1771 1772 /* 1773 * If the number of iterations of the last scan plan is set to 1774 * zero, it should run infinitely. However, this is not always the case. 1775 * For example, when regular scan is requested the driver sets one scan 1776 * plan with one iteration. 1777 */ 1778 if (!schedule[params->n_scan_plans - 1].iter_count) 1779 schedule[params->n_scan_plans - 1].iter_count = 0xff; 1780 1781 *delay = cpu_to_le16(params->delay); 1782 1783 return 0; 1784 } 1785 1786 static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1787 struct iwl_mvm_scan_params *params, 1788 int type, int uid) 1789 { 1790 struct iwl_scan_req_umac *cmd = mvm->scan_cmd; 1791 struct iwl_scan_umac_chan_param *chan_param; 1792 void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); 1793 void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * 1794 mvm->fw->ucode_capa.n_scan_channels; 1795 struct iwl_scan_req_umac_tail_v2 *tail_v2 = 1796 (struct iwl_scan_req_umac_tail_v2 *)sec_part; 1797 struct iwl_scan_req_umac_tail_v1 *tail_v1; 1798 struct iwl_ssid_ie *direct_scan; 1799 int ret = 0; 1800 u32 ssid_bitmap = 0; 1801 u8 channel_flags = 0; 1802 u16 gen_flags; 1803 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); 1804 1805 chan_param = iwl_mvm_get_scan_req_umac_channel(mvm); 1806 1807 iwl_mvm_scan_umac_dwell(mvm, cmd, params); 1808 1809 mvm->scan_uid_status[uid] = type; 1810 1811 cmd->uid = cpu_to_le32(uid); 1812 gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif); 1813 cmd->general_flags = cpu_to_le16(gen_flags); 1814 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) { 1815 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED) 1816 cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] = 1817 IWL_SCAN_NUM_OF_FRAGS; 1818 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED) 1819 cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] = 1820 IWL_SCAN_NUM_OF_FRAGS; 1821 1822 cmd->v8.general_flags2 = 1823 IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER; 1824 } 1825 1826 cmd->scan_start_mac_id = scan_vif->id; 1827 1828 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) 1829 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); 1830 1831 if (iwl_mvm_scan_use_ebs(mvm, vif)) { 1832 channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1833 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1834 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; 1835 1836 /* set fragmented ebs for fragmented scan on HB channels */ 1837 if (iwl_mvm_is_frag_ebs_supported(mvm)) { 1838 if (gen_flags & 1839 IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED || 1840 (!iwl_mvm_is_cdb_supported(mvm) && 1841 gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)) 1842 channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; 1843 } 1844 } 1845 1846 chan_param->flags = channel_flags; 1847 chan_param->count = params->n_channels; 1848 1849 ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule, 1850 &tail_v2->delay); 1851 if (ret) 1852 return ret; 1853 1854 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { 1855 tail_v2->preq = params->preq; 1856 direct_scan = tail_v2->direct_scan; 1857 } else { 1858 tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part; 1859 iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq, 1860 ¶ms->preq); 1861 direct_scan = tail_v1->direct_scan; 1862 } 1863 iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap); 1864 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, 1865 params->n_channels, ssid_bitmap, 1866 cmd_data); 1867 return 0; 1868 } 1869 1870 static void 1871 iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm, 1872 struct iwl_mvm_scan_params *params, 1873 struct ieee80211_vif *vif, 1874 struct iwl_scan_general_params_v10 *gp, 1875 u16 gen_flags) 1876 { 1877 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); 1878 1879 iwl_mvm_scan_umac_dwell_v10(mvm, gp, params); 1880 1881 gp->flags = cpu_to_le16(gen_flags); 1882 1883 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) 1884 gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; 1885 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) 1886 gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; 1887 1888 gp->scan_start_mac_id = scan_vif->id; 1889 } 1890 1891 static void 1892 iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params, 1893 struct iwl_scan_probe_params_v3 *pp) 1894 { 1895 pp->preq = params->preq; 1896 pp->ssid_num = params->n_ssids; 1897 iwl_scan_build_ssids(params, pp->direct_scan, NULL); 1898 } 1899 1900 static void 1901 iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params, 1902 struct iwl_scan_probe_params_v4 *pp, 1903 u32 *bitmap_ssid) 1904 { 1905 pp->preq = params->preq; 1906 iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid); 1907 } 1908 1909 static void 1910 iwl_mvm_scan_umac_fill_ch_p_v3(struct iwl_mvm *mvm, 1911 struct iwl_mvm_scan_params *params, 1912 struct ieee80211_vif *vif, 1913 struct iwl_scan_channel_params_v3 *cp) 1914 { 1915 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); 1916 cp->count = params->n_channels; 1917 1918 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, 1919 params->n_channels, 0, 1920 cp->channel_config); 1921 } 1922 1923 static void 1924 iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm, 1925 struct iwl_mvm_scan_params *params, 1926 struct ieee80211_vif *vif, 1927 struct iwl_scan_channel_params_v4 *cp, 1928 u32 channel_cfg_flags) 1929 { 1930 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); 1931 cp->count = params->n_channels; 1932 cp->num_of_aps_override = IWL_SCAN_ADWELL_DEFAULT_N_APS_OVERRIDE; 1933 1934 iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp, 1935 params->n_channels, 1936 channel_cfg_flags, 1937 vif->type); 1938 } 1939 1940 static int iwl_mvm_scan_umac_v11(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1941 struct iwl_mvm_scan_params *params, int type, 1942 int uid) 1943 { 1944 struct iwl_scan_req_umac_v11 *cmd = mvm->scan_cmd; 1945 struct iwl_scan_req_params_v11 *scan_p = &cmd->scan_params; 1946 int ret; 1947 u16 gen_flags; 1948 1949 mvm->scan_uid_status[uid] = type; 1950 1951 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); 1952 cmd->uid = cpu_to_le32(uid); 1953 1954 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); 1955 iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, 1956 &scan_p->general_params, 1957 gen_flags); 1958 1959 ret = iwl_mvm_fill_scan_sched_params(params, 1960 scan_p->periodic_params.schedule, 1961 &scan_p->periodic_params.delay); 1962 if (ret) 1963 return ret; 1964 1965 iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params); 1966 iwl_mvm_scan_umac_fill_ch_p_v3(mvm, params, vif, 1967 &scan_p->channel_params); 1968 1969 return 0; 1970 } 1971 1972 static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1973 struct iwl_mvm_scan_params *params, int type, 1974 int uid) 1975 { 1976 struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd; 1977 struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params; 1978 int ret; 1979 u16 gen_flags; 1980 1981 mvm->scan_uid_status[uid] = type; 1982 1983 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); 1984 cmd->uid = cpu_to_le32(uid); 1985 1986 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); 1987 iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, 1988 &scan_p->general_params, 1989 gen_flags); 1990 1991 ret = iwl_mvm_fill_scan_sched_params(params, 1992 scan_p->periodic_params.schedule, 1993 &scan_p->periodic_params.delay); 1994 if (ret) 1995 return ret; 1996 1997 iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params); 1998 iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif, 1999 &scan_p->channel_params, 0); 2000 2001 return 0; 2002 } 2003 2004 static int iwl_mvm_scan_umac_v13(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2005 struct iwl_mvm_scan_params *params, int type, 2006 int uid) 2007 { 2008 struct iwl_scan_req_umac_v13 *cmd = mvm->scan_cmd; 2009 struct iwl_scan_req_params_v13 *scan_p = &cmd->scan_params; 2010 int ret; 2011 u16 gen_flags; 2012 u32 bitmap_ssid = 0; 2013 2014 mvm->scan_uid_status[uid] = type; 2015 2016 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params)); 2017 cmd->uid = cpu_to_le32(uid); 2018 2019 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); 2020 iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, 2021 &scan_p->general_params, 2022 gen_flags); 2023 2024 ret = iwl_mvm_fill_scan_sched_params(params, 2025 scan_p->periodic_params.schedule, 2026 &scan_p->periodic_params.delay); 2027 if (ret) 2028 return ret; 2029 2030 iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, 2031 &bitmap_ssid); 2032 iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif, 2033 &scan_p->channel_params, bitmap_ssid); 2034 2035 return 0; 2036 } 2037 2038 static int iwl_mvm_num_scans(struct iwl_mvm *mvm) 2039 { 2040 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); 2041 } 2042 2043 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) 2044 { 2045 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2046 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2047 2048 /* This looks a bit arbitrary, but the idea is that if we run 2049 * out of possible simultaneous scans and the userspace is 2050 * trying to run a scan type that is already running, we 2051 * return -EBUSY. But if the userspace wants to start a 2052 * different type of scan, we stop the opposite type to make 2053 * space for the new request. The reason is backwards 2054 * compatibility with old wpa_supplicant that wouldn't stop a 2055 * scheduled scan before starting a normal scan. 2056 */ 2057 2058 /* FW supports only a single periodic scan */ 2059 if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && 2060 mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT)) 2061 return -EBUSY; 2062 2063 if (iwl_mvm_num_scans(mvm) < mvm->max_scans) 2064 return 0; 2065 2066 /* Use a switch, even though this is a bitmask, so that more 2067 * than one bits set will fall in default and we will warn. 2068 */ 2069 switch (type) { 2070 case IWL_MVM_SCAN_REGULAR: 2071 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) 2072 return -EBUSY; 2073 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2074 case IWL_MVM_SCAN_SCHED: 2075 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) 2076 return -EBUSY; 2077 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2078 case IWL_MVM_SCAN_NETDETECT: 2079 /* For non-unified images, there's no need to stop 2080 * anything for net-detect since the firmware is 2081 * restarted anyway. This way, any sched scans that 2082 * were running will be restarted when we resume. 2083 */ 2084 if (!unified_image) 2085 return 0; 2086 2087 /* If this is a unified image and we ran out of scans, 2088 * we need to stop something. Prefer stopping regular 2089 * scans, because the results are useless at this 2090 * point, and we should be able to keep running 2091 * another scheduled scan while suspended. 2092 */ 2093 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) 2094 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, 2095 true); 2096 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) 2097 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, 2098 true); 2099 /* Something is wrong if no scan was running but we 2100 * ran out of scans. 2101 */ 2102 /* fall through */ 2103 default: 2104 WARN_ON(1); 2105 break; 2106 } 2107 2108 return -EIO; 2109 } 2110 2111 #define SCAN_TIMEOUT 20000 2112 2113 void iwl_mvm_scan_timeout_wk(struct work_struct *work) 2114 { 2115 struct delayed_work *delayed_work = to_delayed_work(work); 2116 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, 2117 scan_timeout_dwork); 2118 2119 IWL_ERR(mvm, "regular scan timed out\n"); 2120 2121 iwl_force_nmi(mvm->trans); 2122 } 2123 2124 static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, 2125 struct iwl_mvm_scan_params *params, 2126 struct ieee80211_vif *vif) 2127 { 2128 if (iwl_mvm_is_cdb_supported(mvm)) { 2129 params->type = 2130 iwl_mvm_get_scan_type_band(mvm, vif, 2131 NL80211_BAND_2GHZ); 2132 params->hb_type = 2133 iwl_mvm_get_scan_type_band(mvm, vif, 2134 NL80211_BAND_5GHZ); 2135 } else { 2136 params->type = iwl_mvm_get_scan_type(mvm, vif); 2137 } 2138 } 2139 2140 struct iwl_scan_umac_handler { 2141 u8 version; 2142 int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2143 struct iwl_mvm_scan_params *params, int type, int uid); 2144 }; 2145 2146 #define IWL_SCAN_UMAC_HANDLER(_ver) { \ 2147 .version = _ver, \ 2148 .handler = iwl_mvm_scan_umac_v##_ver, \ 2149 } 2150 2151 static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { 2152 /* set the newest version first to shorten the list traverse time */ 2153 IWL_SCAN_UMAC_HANDLER(13), 2154 IWL_SCAN_UMAC_HANDLER(12), 2155 IWL_SCAN_UMAC_HANDLER(11), 2156 }; 2157 2158 static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, 2159 struct ieee80211_vif *vif, 2160 struct iwl_host_cmd *hcmd, 2161 struct iwl_mvm_scan_params *params, 2162 int type) 2163 { 2164 int uid, i; 2165 u8 scan_ver; 2166 2167 lockdep_assert_held(&mvm->mutex); 2168 memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd)); 2169 2170 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 2171 hcmd->id = SCAN_OFFLOAD_REQUEST_CMD; 2172 2173 return iwl_mvm_scan_lmac(mvm, vif, params); 2174 } 2175 2176 uid = iwl_mvm_scan_uid_by_status(mvm, 0); 2177 if (uid < 0) 2178 return uid; 2179 2180 hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); 2181 2182 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 2183 SCAN_REQ_UMAC); 2184 2185 for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) { 2186 const struct iwl_scan_umac_handler *ver_handler = 2187 &iwl_scan_umac_handlers[i]; 2188 2189 if (ver_handler->version != scan_ver) 2190 continue; 2191 2192 return ver_handler->handler(mvm, vif, params, type, uid); 2193 } 2194 2195 return iwl_mvm_scan_umac(mvm, vif, params, type, uid); 2196 } 2197 2198 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2199 struct cfg80211_scan_request *req, 2200 struct ieee80211_scan_ies *ies) 2201 { 2202 struct iwl_host_cmd hcmd = { 2203 .len = { iwl_mvm_scan_size(mvm), }, 2204 .data = { mvm->scan_cmd, }, 2205 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 2206 }; 2207 struct iwl_mvm_scan_params params = {}; 2208 int ret; 2209 struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; 2210 2211 lockdep_assert_held(&mvm->mutex); 2212 2213 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { 2214 IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); 2215 return -EBUSY; 2216 } 2217 2218 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR); 2219 if (ret) 2220 return ret; 2221 2222 /* we should have failed registration if scan_cmd was NULL */ 2223 if (WARN_ON(!mvm->scan_cmd)) 2224 return -ENOMEM; 2225 2226 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) 2227 return -ENOBUFS; 2228 2229 params.n_ssids = req->n_ssids; 2230 params.flags = req->flags; 2231 params.n_channels = req->n_channels; 2232 params.delay = 0; 2233 params.ssids = req->ssids; 2234 params.channels = req->channels; 2235 params.mac_addr = req->mac_addr; 2236 params.mac_addr_mask = req->mac_addr_mask; 2237 params.no_cck = req->no_cck; 2238 params.pass_all = true; 2239 params.n_match_sets = 0; 2240 params.match_sets = NULL; 2241 2242 params.scan_plans = &scan_plan; 2243 params.n_scan_plans = 1; 2244 2245 iwl_mvm_fill_scan_type(mvm, ¶ms, vif); 2246 2247 ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); 2248 if (ret < 0) 2249 return ret; 2250 2251 params.measurement_dwell = ret; 2252 2253 iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); 2254 2255 ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, 2256 IWL_MVM_SCAN_REGULAR); 2257 2258 if (ret) 2259 return ret; 2260 2261 iwl_mvm_pause_tcm(mvm, false); 2262 2263 ret = iwl_mvm_send_cmd(mvm, &hcmd); 2264 if (ret) { 2265 /* If the scan failed, it usually means that the FW was unable 2266 * to allocate the time events. Warn on it, but maybe we 2267 * should try to send the command again with different params. 2268 */ 2269 IWL_ERR(mvm, "Scan failed! ret %d\n", ret); 2270 iwl_mvm_resume_tcm(mvm); 2271 return ret; 2272 } 2273 2274 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n"); 2275 mvm->scan_status |= IWL_MVM_SCAN_REGULAR; 2276 mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif); 2277 2278 schedule_delayed_work(&mvm->scan_timeout_dwork, 2279 msecs_to_jiffies(SCAN_TIMEOUT)); 2280 2281 return 0; 2282 } 2283 2284 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, 2285 struct ieee80211_vif *vif, 2286 struct cfg80211_sched_scan_request *req, 2287 struct ieee80211_scan_ies *ies, 2288 int type) 2289 { 2290 struct iwl_host_cmd hcmd = { 2291 .len = { iwl_mvm_scan_size(mvm), }, 2292 .data = { mvm->scan_cmd, }, 2293 .dataflags = { IWL_HCMD_DFL_NOCOPY, }, 2294 }; 2295 struct iwl_mvm_scan_params params = {}; 2296 int ret; 2297 2298 lockdep_assert_held(&mvm->mutex); 2299 2300 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { 2301 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); 2302 return -EBUSY; 2303 } 2304 2305 ret = iwl_mvm_check_running_scans(mvm, type); 2306 if (ret) 2307 return ret; 2308 2309 /* we should have failed registration if scan_cmd was NULL */ 2310 if (WARN_ON(!mvm->scan_cmd)) 2311 return -ENOMEM; 2312 2313 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) 2314 return -ENOBUFS; 2315 2316 params.n_ssids = req->n_ssids; 2317 params.flags = req->flags; 2318 params.n_channels = req->n_channels; 2319 params.ssids = req->ssids; 2320 params.channels = req->channels; 2321 params.mac_addr = req->mac_addr; 2322 params.mac_addr_mask = req->mac_addr_mask; 2323 params.no_cck = false; 2324 params.pass_all = iwl_mvm_scan_pass_all(mvm, req); 2325 params.n_match_sets = req->n_match_sets; 2326 params.match_sets = req->match_sets; 2327 if (!req->n_scan_plans) 2328 return -EINVAL; 2329 2330 params.n_scan_plans = req->n_scan_plans; 2331 params.scan_plans = req->scan_plans; 2332 2333 iwl_mvm_fill_scan_type(mvm, ¶ms, vif); 2334 2335 /* In theory, LMAC scans can handle a 32-bit delay, but since 2336 * waiting for over 18 hours to start the scan is a bit silly 2337 * and to keep it aligned with UMAC scans (which only support 2338 * 16-bit delays), trim it down to 16-bits. 2339 */ 2340 if (req->delay > U16_MAX) { 2341 IWL_DEBUG_SCAN(mvm, 2342 "delay value is > 16-bits, set to max possible\n"); 2343 params.delay = U16_MAX; 2344 } else { 2345 params.delay = req->delay; 2346 } 2347 2348 ret = iwl_mvm_config_sched_scan_profiles(mvm, req); 2349 if (ret) 2350 return ret; 2351 2352 iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); 2353 2354 ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); 2355 2356 if (ret) 2357 return ret; 2358 2359 ret = iwl_mvm_send_cmd(mvm, &hcmd); 2360 if (!ret) { 2361 IWL_DEBUG_SCAN(mvm, 2362 "Sched scan request was sent successfully\n"); 2363 mvm->scan_status |= type; 2364 } else { 2365 /* If the scan failed, it usually means that the FW was unable 2366 * to allocate the time events. Warn on it, but maybe we 2367 * should try to send the command again with different params. 2368 */ 2369 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); 2370 } 2371 2372 return ret; 2373 } 2374 2375 void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, 2376 struct iwl_rx_cmd_buffer *rxb) 2377 { 2378 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2379 struct iwl_umac_scan_complete *notif = (void *)pkt->data; 2380 u32 uid = __le32_to_cpu(notif->uid); 2381 bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); 2382 2383 if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) 2384 return; 2385 2386 /* if the scan is already stopping, we don't need to notify mac80211 */ 2387 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { 2388 struct cfg80211_scan_info info = { 2389 .aborted = aborted, 2390 .scan_start_tsf = mvm->scan_start, 2391 }; 2392 2393 memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); 2394 ieee80211_scan_completed(mvm->hw, &info); 2395 mvm->scan_vif = NULL; 2396 cancel_delayed_work(&mvm->scan_timeout_dwork); 2397 iwl_mvm_resume_tcm(mvm); 2398 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { 2399 ieee80211_sched_scan_stopped(mvm->hw); 2400 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 2401 } 2402 2403 mvm->scan_status &= ~mvm->scan_uid_status[uid]; 2404 IWL_DEBUG_SCAN(mvm, 2405 "Scan completed, uid %u type %u, status %s, EBS status %s\n", 2406 uid, mvm->scan_uid_status[uid], 2407 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? 2408 "completed" : "aborted", 2409 iwl_mvm_ebs_status_str(notif->ebs_status)); 2410 IWL_DEBUG_SCAN(mvm, 2411 "Last line %d, Last iteration %d, Time from last iteration %d\n", 2412 notif->last_schedule, notif->last_iter, 2413 __le32_to_cpu(notif->time_from_last_iter)); 2414 2415 if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && 2416 notif->ebs_status != IWL_SCAN_EBS_INACTIVE) 2417 mvm->last_ebs_successful = false; 2418 2419 mvm->scan_uid_status[uid] = 0; 2420 } 2421 2422 void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, 2423 struct iwl_rx_cmd_buffer *rxb) 2424 { 2425 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2426 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; 2427 2428 mvm->scan_start = le64_to_cpu(notif->start_tsf); 2429 2430 IWL_DEBUG_SCAN(mvm, 2431 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", 2432 notif->status, notif->scanned_channels); 2433 2434 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { 2435 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); 2436 ieee80211_sched_scan_results(mvm->hw); 2437 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; 2438 } 2439 2440 IWL_DEBUG_SCAN(mvm, 2441 "UMAC Scan iteration complete: scan started at %llu (TSF)\n", 2442 mvm->scan_start); 2443 } 2444 2445 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) 2446 { 2447 struct iwl_umac_scan_abort cmd = {}; 2448 int uid, ret; 2449 2450 lockdep_assert_held(&mvm->mutex); 2451 2452 /* We should always get a valid index here, because we already 2453 * checked that this type of scan was running in the generic 2454 * code. 2455 */ 2456 uid = iwl_mvm_scan_uid_by_status(mvm, type); 2457 if (WARN_ON_ONCE(uid < 0)) 2458 return uid; 2459 2460 cmd.uid = cpu_to_le32(uid); 2461 2462 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); 2463 2464 ret = iwl_mvm_send_cmd_pdu(mvm, 2465 iwl_cmd_id(SCAN_ABORT_UMAC, 2466 IWL_ALWAYS_LONG_GROUP, 0), 2467 0, sizeof(cmd), &cmd); 2468 if (!ret) 2469 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; 2470 2471 return ret; 2472 } 2473 2474 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) 2475 { 2476 struct iwl_notification_wait wait_scan_done; 2477 static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, 2478 SCAN_OFFLOAD_COMPLETE, }; 2479 int ret; 2480 2481 lockdep_assert_held(&mvm->mutex); 2482 2483 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, 2484 scan_done_notif, 2485 ARRAY_SIZE(scan_done_notif), 2486 NULL, NULL); 2487 2488 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); 2489 2490 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2491 ret = iwl_mvm_umac_scan_abort(mvm, type); 2492 else 2493 ret = iwl_mvm_lmac_scan_abort(mvm); 2494 2495 if (ret) { 2496 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); 2497 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); 2498 return ret; 2499 } 2500 2501 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 2502 1 * HZ); 2503 } 2504 2505 #define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \ 2506 case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \ 2507 } 2508 2509 static int iwl_scan_req_umac_get_size(u8 scan_ver) 2510 { 2511 switch (scan_ver) { 2512 IWL_SCAN_REQ_UMAC_HANDLE_SIZE(13); 2513 IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12); 2514 IWL_SCAN_REQ_UMAC_HANDLE_SIZE(11); 2515 } 2516 2517 return 0; 2518 } 2519 2520 int iwl_mvm_scan_size(struct iwl_mvm *mvm) 2521 { 2522 int base_size, tail_size; 2523 u8 scan_ver = iwl_mvm_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 2524 SCAN_REQ_UMAC); 2525 2526 base_size = iwl_scan_req_umac_get_size(scan_ver); 2527 if (base_size) 2528 return base_size; 2529 2530 2531 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) 2532 base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; 2533 else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) 2534 base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; 2535 else if (iwl_mvm_cdb_scan_api(mvm)) 2536 base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; 2537 else 2538 base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; 2539 2540 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 2541 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) 2542 tail_size = sizeof(struct iwl_scan_req_umac_tail_v2); 2543 else 2544 tail_size = sizeof(struct iwl_scan_req_umac_tail_v1); 2545 2546 return base_size + 2547 sizeof(struct iwl_scan_channel_cfg_umac) * 2548 mvm->fw->ucode_capa.n_scan_channels + 2549 tail_size; 2550 } 2551 return sizeof(struct iwl_scan_req_lmac) + 2552 sizeof(struct iwl_scan_channel_cfg_lmac) * 2553 mvm->fw->ucode_capa.n_scan_channels + 2554 sizeof(struct iwl_scan_probe_req_v1); 2555 } 2556 2557 /* 2558 * This function is used in nic restart flow, to inform mac80211 about scans 2559 * that was aborted by restart flow or by an assert. 2560 */ 2561 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) 2562 { 2563 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 2564 int uid, i; 2565 2566 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); 2567 if (uid >= 0) { 2568 struct cfg80211_scan_info info = { 2569 .aborted = true, 2570 }; 2571 2572 ieee80211_scan_completed(mvm->hw, &info); 2573 mvm->scan_uid_status[uid] = 0; 2574 } 2575 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); 2576 if (uid >= 0 && !mvm->fw_restart) { 2577 ieee80211_sched_scan_stopped(mvm->hw); 2578 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 2579 mvm->scan_uid_status[uid] = 0; 2580 } 2581 2582 /* We shouldn't have any UIDs still set. Loop over all the 2583 * UIDs to make sure there's nothing left there and warn if 2584 * any is found. 2585 */ 2586 for (i = 0; i < mvm->max_scans; i++) { 2587 if (WARN_ONCE(mvm->scan_uid_status[i], 2588 "UMAC scan UID %d status was not cleaned\n", 2589 i)) 2590 mvm->scan_uid_status[i] = 0; 2591 } 2592 } else { 2593 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { 2594 struct cfg80211_scan_info info = { 2595 .aborted = true, 2596 }; 2597 2598 ieee80211_scan_completed(mvm->hw, &info); 2599 } 2600 2601 /* Sched scan will be restarted by mac80211 in 2602 * restart_hw, so do not report if FW is about to be 2603 * restarted. 2604 */ 2605 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && 2606 !mvm->fw_restart) { 2607 ieee80211_sched_scan_stopped(mvm->hw); 2608 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 2609 } 2610 } 2611 } 2612 2613 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) 2614 { 2615 int ret; 2616 2617 if (!(mvm->scan_status & type)) 2618 return 0; 2619 2620 if (iwl_mvm_is_radio_killed(mvm)) { 2621 ret = 0; 2622 goto out; 2623 } 2624 2625 ret = iwl_mvm_scan_stop_wait(mvm, type); 2626 if (!ret) 2627 mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; 2628 out: 2629 /* Clear the scan status so the next scan requests will 2630 * succeed and mark the scan as stopping, so that the Rx 2631 * handler doesn't do anything, as the scan was stopped from 2632 * above. 2633 */ 2634 mvm->scan_status &= ~type; 2635 2636 if (type == IWL_MVM_SCAN_REGULAR) { 2637 cancel_delayed_work(&mvm->scan_timeout_dwork); 2638 if (notify) { 2639 struct cfg80211_scan_info info = { 2640 .aborted = true, 2641 }; 2642 2643 ieee80211_scan_completed(mvm->hw, &info); 2644 } 2645 } else if (notify) { 2646 ieee80211_sched_scan_stopped(mvm->hw); 2647 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; 2648 } 2649 2650 return ret; 2651 } 2652