1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <net/mac80211.h>
9 #include <linux/crc32.h>
10
11 #include "mvm.h"
12 #include "fw/api/scan.h"
13 #include "iwl-io.h"
14
15 #define IWL_DENSE_EBS_SCAN_RATIO 5
16 #define IWL_SPARSE_EBS_SCAN_RATIO 1
17
18 #define IWL_SCAN_DWELL_ACTIVE 10
19 #define IWL_SCAN_DWELL_PASSIVE 110
20 #define IWL_SCAN_DWELL_FRAGMENTED 44
21 #define IWL_SCAN_DWELL_EXTENDED 90
22 #define IWL_SCAN_NUM_OF_FRAGS 3
23
24 /* adaptive dwell max budget time [TU] for full scan */
25 #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
26 /* adaptive dwell max budget time [TU] for directed scan */
27 #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
28 /* adaptive dwell default high band APs number */
29 #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
30 /* adaptive dwell default low band APs number */
31 #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
32 /* adaptive dwell default APs number in social channels (1, 6, 11) */
33 #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
34 /* number of scan channels */
35 #define IWL_SCAN_NUM_CHANNELS 112
36 /* adaptive dwell number of APs override mask for p2p friendly GO */
37 #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
38 /* adaptive dwell number of APs override mask for social channels */
39 #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
40 /* adaptive dwell number of APs override for p2p friendly GO channels */
41 #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
42 /* adaptive dwell number of APs override for social channels */
43 #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
44
45 /* minimal number of 2GHz and 5GHz channels in the regular scan request */
46 #define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
47
48 /* Number of iterations on the channel for mei filtered scan */
49 #define IWL_MEI_SCAN_NUM_ITER 5U
50
51 #define WFA_TPC_IE_LEN 9
52
53 struct iwl_mvm_scan_timing_params {
54 u32 suspend_time;
55 u32 max_out_time;
56 };
57
58 static struct iwl_mvm_scan_timing_params scan_timing[] = {
59 [IWL_SCAN_TYPE_UNASSOC] = {
60 .suspend_time = 0,
61 .max_out_time = 0,
62 },
63 [IWL_SCAN_TYPE_WILD] = {
64 .suspend_time = 30,
65 .max_out_time = 120,
66 },
67 [IWL_SCAN_TYPE_MILD] = {
68 .suspend_time = 120,
69 .max_out_time = 120,
70 },
71 [IWL_SCAN_TYPE_FRAGMENTED] = {
72 .suspend_time = 95,
73 .max_out_time = 44,
74 },
75 [IWL_SCAN_TYPE_FAST_BALANCE] = {
76 .suspend_time = 30,
77 .max_out_time = 37,
78 },
79 };
80
81 struct iwl_mvm_scan_params {
82 /* For CDB this is low band scan type, for non-CDB - type. */
83 enum iwl_mvm_scan_type type;
84 enum iwl_mvm_scan_type hb_type;
85 u32 n_channels;
86 u16 delay;
87 int n_ssids;
88 struct cfg80211_ssid *ssids;
89 struct ieee80211_channel **channels;
90 u32 flags;
91 u8 *mac_addr;
92 u8 *mac_addr_mask;
93 bool no_cck;
94 bool pass_all;
95 int n_match_sets;
96 struct iwl_scan_probe_req preq;
97 struct cfg80211_match_set *match_sets;
98 int n_scan_plans;
99 struct cfg80211_sched_scan_plan *scan_plans;
100 bool iter_notif;
101 struct cfg80211_scan_6ghz_params *scan_6ghz_params;
102 u32 n_6ghz_params;
103 bool scan_6ghz;
104 bool enable_6ghz_passive;
105 bool respect_p2p_go, respect_p2p_go_hb;
106 u8 bssid[ETH_ALEN] __aligned(2);
107 };
108
iwl_mvm_get_scan_req_umac_data(struct iwl_mvm * mvm)109 static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
110 {
111 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
112
113 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
114 return (void *)&cmd->v8.data;
115
116 if (iwl_mvm_is_adaptive_dwell_supported(mvm))
117 return (void *)&cmd->v7.data;
118
119 if (iwl_mvm_cdb_scan_api(mvm))
120 return (void *)&cmd->v6.data;
121
122 return (void *)&cmd->v1.data;
123 }
124
125 static inline struct iwl_scan_umac_chan_param *
iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm * mvm)126 iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
127 {
128 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
129
130 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
131 return &cmd->v8.channel;
132
133 if (iwl_mvm_is_adaptive_dwell_supported(mvm))
134 return &cmd->v7.channel;
135
136 if (iwl_mvm_cdb_scan_api(mvm))
137 return &cmd->v6.channel;
138
139 return &cmd->v1.channel;
140 }
141
iwl_mvm_scan_rx_ant(struct iwl_mvm * mvm)142 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
143 {
144 if (mvm->scan_rx_ant != ANT_NONE)
145 return mvm->scan_rx_ant;
146 return iwl_mvm_get_valid_rx_ant(mvm);
147 }
148
iwl_mvm_scan_rx_chain(struct iwl_mvm * mvm)149 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
150 {
151 u16 rx_chain;
152 u8 rx_ant;
153
154 rx_ant = iwl_mvm_scan_rx_ant(mvm);
155 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
156 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
157 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
158 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
159 return cpu_to_le16(rx_chain);
160 }
161
162 static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm * mvm,enum nl80211_band band,bool no_cck)163 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
164 bool no_cck)
165 {
166 u32 tx_ant;
167
168 iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
169 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
170
171 if (band == NL80211_BAND_2GHZ && !no_cck)
172 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 |
173 tx_ant);
174 else
175 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
176 }
177
iwl_mvm_get_traffic_load(struct iwl_mvm * mvm)178 static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
179 {
180 return mvm->tcm.result.global_load;
181 }
182
183 static enum iwl_mvm_traffic_load
iwl_mvm_get_traffic_load_band(struct iwl_mvm * mvm,enum nl80211_band band)184 iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
185 {
186 return mvm->tcm.result.band_load[band];
187 }
188
189 struct iwl_mvm_scan_iter_data {
190 u32 global_cnt;
191 struct ieee80211_vif *current_vif;
192 bool is_dcm_with_p2p_go;
193 };
194
iwl_mvm_scan_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)195 static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
196 struct ieee80211_vif *vif)
197 {
198 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
199 struct iwl_mvm_scan_iter_data *data = _data;
200 struct iwl_mvm_vif *curr_mvmvif;
201
202 if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
203 mvmvif->deflink.phy_ctxt &&
204 mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX)
205 data->global_cnt += 1;
206
207 if (!data->current_vif || vif == data->current_vif)
208 return;
209
210 curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
211
212 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
213 mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
214 mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
215 data->is_dcm_with_p2p_go = true;
216 }
217
218 static enum
_iwl_mvm_get_scan_type(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_traffic_load load,bool low_latency)219 iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
220 struct ieee80211_vif *vif,
221 enum iwl_mvm_traffic_load load,
222 bool low_latency)
223 {
224 struct iwl_mvm_scan_iter_data data = {
225 .current_vif = vif,
226 .is_dcm_with_p2p_go = false,
227 .global_cnt = 0,
228 };
229
230 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
231 IEEE80211_IFACE_ITER_NORMAL,
232 iwl_mvm_scan_iterator,
233 &data);
234
235 if (!data.global_cnt)
236 return IWL_SCAN_TYPE_UNASSOC;
237
238 if (fw_has_api(&mvm->fw->ucode_capa,
239 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
240 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
241 (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
242 return IWL_SCAN_TYPE_FRAGMENTED;
243
244 /*
245 * in case of DCM with GO where BSS DTIM interval < 220msec
246 * set all scan requests as fast-balance scan
247 */
248 if (vif && vif->type == NL80211_IFTYPE_STATION &&
249 data.is_dcm_with_p2p_go &&
250 ((vif->bss_conf.beacon_int *
251 vif->bss_conf.dtim_period) < 220))
252 return IWL_SCAN_TYPE_FAST_BALANCE;
253 }
254
255 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
256 return IWL_SCAN_TYPE_MILD;
257
258 return IWL_SCAN_TYPE_WILD;
259 }
260
261 static enum
iwl_mvm_get_scan_type(struct iwl_mvm * mvm,struct ieee80211_vif * vif)262 iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
263 struct ieee80211_vif *vif)
264 {
265 enum iwl_mvm_traffic_load load;
266 bool low_latency;
267
268 load = iwl_mvm_get_traffic_load(mvm);
269 low_latency = iwl_mvm_low_latency(mvm);
270
271 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
272 }
273
274 static enum
iwl_mvm_get_scan_type_band(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum nl80211_band band)275 iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
276 struct ieee80211_vif *vif,
277 enum nl80211_band band)
278 {
279 enum iwl_mvm_traffic_load load;
280 bool low_latency;
281
282 load = iwl_mvm_get_traffic_load_band(mvm, band);
283 low_latency = iwl_mvm_low_latency_band(mvm, band);
284
285 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
286 }
287
iwl_mvm_rrm_scan_needed(struct iwl_mvm * mvm)288 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
289 {
290 /* require rrm scan whenever the fw supports it */
291 return fw_has_capa(&mvm->fw->ucode_capa,
292 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
293 }
294
iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm * mvm)295 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
296 {
297 int max_probe_len;
298
299 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
300
301 /* we create the 802.11 header SSID element and WFA TPC element */
302 max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
303
304 /* DS parameter set element is added on 2.4GHZ band if required */
305 if (iwl_mvm_rrm_scan_needed(mvm))
306 max_probe_len -= 3;
307
308 return max_probe_len;
309 }
310
iwl_mvm_max_scan_ie_len(struct iwl_mvm * mvm)311 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
312 {
313 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
314
315 /* TODO: [BUG] This function should return the maximum allowed size of
316 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
317 * in the same command. So the correct implementation of this function
318 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
319 * command has only 512 bytes and it would leave us with about 240
320 * bytes for scan IEs, which is clearly not enough. So meanwhile
321 * we will report an incorrect value. This may result in a failure to
322 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
323 * functions with -ENOBUFS, if a large enough probe will be provided.
324 */
325 return max_ie_len;
326 }
327
iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)328 void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
329 struct iwl_rx_cmd_buffer *rxb)
330 {
331 struct iwl_rx_packet *pkt = rxb_addr(rxb);
332 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
333
334 IWL_DEBUG_SCAN(mvm,
335 "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
336 notif->status, notif->scanned_channels);
337
338 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
339 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
340 ieee80211_sched_scan_results(mvm->hw);
341 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
342 }
343 }
344
iwl_mvm_rx_scan_match_found(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)345 void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
346 struct iwl_rx_cmd_buffer *rxb)
347 {
348 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
349 ieee80211_sched_scan_results(mvm->hw);
350 }
351
iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)352 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
353 {
354 switch (status) {
355 case IWL_SCAN_EBS_SUCCESS:
356 return "successful";
357 case IWL_SCAN_EBS_INACTIVE:
358 return "inactive";
359 case IWL_SCAN_EBS_FAILED:
360 case IWL_SCAN_EBS_CHAN_NOT_FOUND:
361 default:
362 return "failed";
363 }
364 }
365
iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)366 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
367 struct iwl_rx_cmd_buffer *rxb)
368 {
369 struct iwl_rx_packet *pkt = rxb_addr(rxb);
370 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
371 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
372
373 /* If this happens, the firmware has mistakenly sent an LMAC
374 * notification during UMAC scans -- warn and ignore it.
375 */
376 if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
377 IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
378 return;
379
380 /* scan status must be locked for proper checking */
381 lockdep_assert_held(&mvm->mutex);
382
383 /* We first check if we were stopping a scan, in which case we
384 * just clear the stopping flag. Then we check if it was a
385 * firmware initiated stop, in which case we need to inform
386 * mac80211.
387 * Note that we can have a stopping and a running scan
388 * simultaneously, but we can't have two different types of
389 * scans stopping or running at the same time (since LMAC
390 * doesn't support it).
391 */
392
393 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
394 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
395
396 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
397 aborted ? "aborted" : "completed",
398 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
399 IWL_DEBUG_SCAN(mvm,
400 "Last line %d, Last iteration %d, Time after last iteration %d\n",
401 scan_notif->last_schedule_line,
402 scan_notif->last_schedule_iteration,
403 __le32_to_cpu(scan_notif->time_after_last_iter));
404
405 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
406 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
407 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
408 aborted ? "aborted" : "completed",
409 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
410
411 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
412 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
413 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
414
415 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
416 aborted ? "aborted" : "completed",
417 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
418 IWL_DEBUG_SCAN(mvm,
419 "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
420 scan_notif->last_schedule_line,
421 scan_notif->last_schedule_iteration,
422 __le32_to_cpu(scan_notif->time_after_last_iter));
423
424 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
425 ieee80211_sched_scan_stopped(mvm->hw);
426 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
427 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
428 struct cfg80211_scan_info info = {
429 .aborted = aborted,
430 };
431
432 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
433 aborted ? "aborted" : "completed",
434 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
435
436 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
437 ieee80211_scan_completed(mvm->hw, &info);
438 cancel_delayed_work(&mvm->scan_timeout_dwork);
439 iwl_mvm_resume_tcm(mvm);
440 } else {
441 IWL_ERR(mvm,
442 "got scan complete notification but no scan is running\n");
443 }
444
445 mvm->last_ebs_successful =
446 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
447 scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
448 }
449
iwl_ssid_exist(u8 * ssid,u8 ssid_len,struct iwl_ssid_ie * ssid_list)450 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
451 {
452 int i;
453
454 for (i = 0; i < PROBE_OPTION_MAX; i++) {
455 if (!ssid_list[i].len)
456 break;
457 if (ssid_list[i].len == ssid_len &&
458 !memcmp(ssid_list->ssid, ssid, ssid_len))
459 return i;
460 }
461 return -1;
462 }
463
464 /* We insert the SSIDs in an inverted order, because the FW will
465 * invert it back.
466 */
iwl_scan_build_ssids(struct iwl_mvm_scan_params * params,struct iwl_ssid_ie * ssids,u32 * ssid_bitmap)467 static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
468 struct iwl_ssid_ie *ssids,
469 u32 *ssid_bitmap)
470 {
471 int i, j;
472 int index;
473 u32 tmp_bitmap = 0;
474
475 /*
476 * copy SSIDs from match list.
477 * iwl_config_sched_scan_profiles() uses the order of these ssids to
478 * config match list.
479 */
480 for (i = 0, j = params->n_match_sets - 1;
481 j >= 0 && i < PROBE_OPTION_MAX;
482 i++, j--) {
483 /* skip empty SSID matchsets */
484 if (!params->match_sets[j].ssid.ssid_len)
485 continue;
486 ssids[i].id = WLAN_EID_SSID;
487 ssids[i].len = params->match_sets[j].ssid.ssid_len;
488 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
489 ssids[i].len);
490 }
491
492 /* add SSIDs from scan SSID list */
493 for (j = params->n_ssids - 1;
494 j >= 0 && i < PROBE_OPTION_MAX;
495 i++, j--) {
496 index = iwl_ssid_exist(params->ssids[j].ssid,
497 params->ssids[j].ssid_len,
498 ssids);
499 if (index < 0) {
500 ssids[i].id = WLAN_EID_SSID;
501 ssids[i].len = params->ssids[j].ssid_len;
502 memcpy(ssids[i].ssid, params->ssids[j].ssid,
503 ssids[i].len);
504 tmp_bitmap |= BIT(i);
505 } else {
506 tmp_bitmap |= BIT(index);
507 }
508 }
509 if (ssid_bitmap)
510 *ssid_bitmap = tmp_bitmap;
511 }
512
513 static int
iwl_mvm_config_sched_scan_profiles(struct iwl_mvm * mvm,struct cfg80211_sched_scan_request * req)514 iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
515 struct cfg80211_sched_scan_request *req)
516 {
517 struct iwl_scan_offload_profile *profile;
518 struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
519 struct iwl_scan_offload_blocklist *blocklist;
520 struct iwl_scan_offload_profile_cfg_data *data;
521 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
522 int profile_cfg_size = sizeof(*data) +
523 sizeof(*profile) * max_profiles;
524 struct iwl_host_cmd cmd = {
525 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
526 .len[1] = profile_cfg_size,
527 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
528 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
529 };
530 int blocklist_len;
531 int i;
532 int ret;
533
534 if (WARN_ON(req->n_match_sets > max_profiles))
535 return -EIO;
536
537 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
538 blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
539 else
540 blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
541
542 blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL);
543 if (!blocklist)
544 return -ENOMEM;
545
546 profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
547 if (!profile_cfg_v1) {
548 ret = -ENOMEM;
549 goto free_blocklist;
550 }
551
552 cmd.data[0] = blocklist;
553 cmd.len[0] = sizeof(*blocklist) * blocklist_len;
554 cmd.data[1] = profile_cfg_v1;
555
556 /* if max_profile is MAX_PROFILES_V2, we have the new API */
557 if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
558 struct iwl_scan_offload_profile_cfg *profile_cfg =
559 (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
560
561 data = &profile_cfg->data;
562 } else {
563 data = &profile_cfg_v1->data;
564 }
565
566 /* No blocklist configuration */
567 data->num_profiles = req->n_match_sets;
568 data->active_clients = SCAN_CLIENT_SCHED_SCAN;
569 data->pass_match = SCAN_CLIENT_SCHED_SCAN;
570 data->match_notify = SCAN_CLIENT_SCHED_SCAN;
571
572 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
573 data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
574
575 for (i = 0; i < req->n_match_sets; i++) {
576 profile = &profile_cfg_v1->profiles[i];
577 profile->ssid_index = i;
578 /* Support any cipher and auth algorithm */
579 profile->unicast_cipher = 0xff;
580 profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
581 IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
582 IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
583 profile->network_type = IWL_NETWORK_TYPE_ANY;
584 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
585 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
586 }
587
588 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
589
590 ret = iwl_mvm_send_cmd(mvm, &cmd);
591 kfree(profile_cfg_v1);
592 free_blocklist:
593 kfree(blocklist);
594
595 return ret;
596 }
597
iwl_mvm_scan_pass_all(struct iwl_mvm * mvm,struct cfg80211_sched_scan_request * req)598 static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
599 struct cfg80211_sched_scan_request *req)
600 {
601 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
602 IWL_DEBUG_SCAN(mvm,
603 "Sending scheduled scan with filtering, n_match_sets %d\n",
604 req->n_match_sets);
605 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
606 return false;
607 }
608
609 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
610
611 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
612 return true;
613 }
614
iwl_mvm_lmac_scan_abort(struct iwl_mvm * mvm)615 static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
616 {
617 int ret;
618 struct iwl_host_cmd cmd = {
619 .id = SCAN_OFFLOAD_ABORT_CMD,
620 };
621 u32 status = CAN_ABORT_STATUS;
622
623 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
624 if (ret)
625 return ret;
626
627 if (status != CAN_ABORT_STATUS) {
628 /*
629 * The scan abort will return 1 for success or
630 * 2 for "failure". A failure condition can be
631 * due to simply not being in an active scan which
632 * can occur if we send the scan abort before the
633 * microcode has notified us that a scan is completed.
634 */
635 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
636 ret = -ENOENT;
637 }
638
639 return ret;
640 }
641
iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm * mvm,struct iwl_scan_req_tx_cmd * tx_cmd,bool no_cck)642 static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
643 struct iwl_scan_req_tx_cmd *tx_cmd,
644 bool no_cck)
645 {
646 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
647 TX_CMD_FLG_BT_DIS);
648 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
649 NL80211_BAND_2GHZ,
650 no_cck);
651
652 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
653 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
654 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
655
656 /*
657 * Fw doesn't use this sta anymore, pending deprecation via HOST API
658 * change
659 */
660 } else {
661 tx_cmd[0].sta_id = 0xff;
662 tx_cmd[1].sta_id = 0xff;
663 }
664
665 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
666 TX_CMD_FLG_BT_DIS);
667
668 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
669 NL80211_BAND_5GHZ,
670 no_cck);
671 }
672
673 static void
iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,int n_channels,u32 ssid_bitmap,struct iwl_scan_req_lmac * cmd)674 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
675 struct ieee80211_channel **channels,
676 int n_channels, u32 ssid_bitmap,
677 struct iwl_scan_req_lmac *cmd)
678 {
679 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
680 int i;
681
682 for (i = 0; i < n_channels; i++) {
683 channel_cfg[i].channel_num =
684 cpu_to_le16(channels[i]->hw_value);
685 channel_cfg[i].iter_count = cpu_to_le16(1);
686 channel_cfg[i].iter_interval = 0;
687 channel_cfg[i].flags =
688 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
689 ssid_bitmap);
690 }
691 }
692
iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm * mvm,const u8 * ies,size_t len,u8 * const pos)693 static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
694 size_t len, u8 *const pos)
695 {
696 static const u8 before_ds_params[] = {
697 WLAN_EID_SSID,
698 WLAN_EID_SUPP_RATES,
699 WLAN_EID_REQUEST,
700 WLAN_EID_EXT_SUPP_RATES,
701 };
702 size_t offs;
703 u8 *newpos = pos;
704
705 if (!iwl_mvm_rrm_scan_needed(mvm)) {
706 memcpy(newpos, ies, len);
707 return newpos + len;
708 }
709
710 offs = ieee80211_ie_split(ies, len,
711 before_ds_params,
712 ARRAY_SIZE(before_ds_params),
713 0);
714
715 memcpy(newpos, ies, offs);
716 newpos += offs;
717
718 /* Add a placeholder for DS Parameter Set element */
719 *newpos++ = WLAN_EID_DS_PARAMS;
720 *newpos++ = 1;
721 *newpos++ = 0;
722
723 memcpy(newpos, ies + offs, len - offs);
724 newpos += len - offs;
725
726 return newpos;
727 }
728
iwl_mvm_add_tpc_report_ie(u8 * pos)729 static void iwl_mvm_add_tpc_report_ie(u8 *pos)
730 {
731 pos[0] = WLAN_EID_VENDOR_SPECIFIC;
732 pos[1] = WFA_TPC_IE_LEN - 2;
733 pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
734 pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
735 pos[4] = WLAN_OUI_MICROSOFT & 0xff;
736 pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
737 pos[6] = 0;
738 /* pos[7] - tx power will be inserted by the FW */
739 pos[7] = 0;
740 pos[8] = 0;
741 }
742
743 static void
iwl_mvm_build_scan_probe(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_scan_ies * ies,struct iwl_mvm_scan_params * params)744 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
745 struct ieee80211_scan_ies *ies,
746 struct iwl_mvm_scan_params *params)
747 {
748 struct ieee80211_mgmt *frame = (void *)params->preq.buf;
749 u8 *pos, *newpos;
750 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
751 params->mac_addr : NULL;
752
753 /*
754 * Unfortunately, right now the offload scan doesn't support randomising
755 * within the firmware, so until the firmware API is ready we implement
756 * it in the driver. This means that the scan iterations won't really be
757 * random, only when it's restarted, but at least that helps a bit.
758 */
759 if (mac_addr)
760 get_random_mask_addr(frame->sa, mac_addr,
761 params->mac_addr_mask);
762 else
763 memcpy(frame->sa, vif->addr, ETH_ALEN);
764
765 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
766 eth_broadcast_addr(frame->da);
767 ether_addr_copy(frame->bssid, params->bssid);
768 frame->seq_ctrl = 0;
769
770 pos = frame->u.probe_req.variable;
771 *pos++ = WLAN_EID_SSID;
772 *pos++ = 0;
773
774 params->preq.mac_header.offset = 0;
775 params->preq.mac_header.len = cpu_to_le16(24 + 2);
776
777 /* Insert ds parameter set element on 2.4 GHz band */
778 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
779 ies->ies[NL80211_BAND_2GHZ],
780 ies->len[NL80211_BAND_2GHZ],
781 pos);
782 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
783 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
784 pos = newpos;
785
786 memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
787 ies->len[NL80211_BAND_5GHZ]);
788 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
789 params->preq.band_data[1].len =
790 cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
791 pos += ies->len[NL80211_BAND_5GHZ];
792
793 memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
794 ies->len[NL80211_BAND_6GHZ]);
795 params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
796 params->preq.band_data[2].len =
797 cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
798 pos += ies->len[NL80211_BAND_6GHZ];
799 memcpy(pos, ies->common_ies, ies->common_ie_len);
800 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
801
802 if (iwl_mvm_rrm_scan_needed(mvm) &&
803 !fw_has_capa(&mvm->fw->ucode_capa,
804 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
805 iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
806 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
807 WFA_TPC_IE_LEN);
808 } else {
809 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
810 }
811 }
812
iwl_mvm_scan_lmac_dwell(struct iwl_mvm * mvm,struct iwl_scan_req_lmac * cmd,struct iwl_mvm_scan_params * params)813 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
814 struct iwl_scan_req_lmac *cmd,
815 struct iwl_mvm_scan_params *params)
816 {
817 cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
818 cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
819 cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
820 cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
821 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
822 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
823 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
824 }
825
iwl_mvm_scan_fits(struct iwl_mvm * mvm,int n_ssids,struct ieee80211_scan_ies * ies,int n_channels)826 static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
827 struct ieee80211_scan_ies *ies,
828 int n_channels)
829 {
830 return ((n_ssids <= PROBE_OPTION_MAX) &&
831 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
832 (ies->common_ie_len +
833 ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
834 ies->len[NL80211_BAND_6GHZ] <=
835 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
836 }
837
iwl_mvm_scan_use_ebs(struct iwl_mvm * mvm,struct ieee80211_vif * vif)838 static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
839 struct ieee80211_vif *vif)
840 {
841 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
842 bool low_latency;
843
844 if (iwl_mvm_is_cdb_supported(mvm))
845 low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
846 else
847 low_latency = iwl_mvm_low_latency(mvm);
848
849 /* We can only use EBS if:
850 * 1. the feature is supported;
851 * 2. the last EBS was successful;
852 * 3. if only single scan, the single scan EBS API is supported;
853 * 4. it's not a p2p find operation.
854 * 5. we are not in low latency mode,
855 * or if fragmented ebs is supported by the FW
856 */
857 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
858 mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
859 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
860 (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
861 }
862
iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params * params)863 static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
864 {
865 return params->n_scan_plans == 1 &&
866 params->scan_plans[0].iterations == 1;
867 }
868
iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)869 static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
870 {
871 return (type == IWL_SCAN_TYPE_FRAGMENTED ||
872 type == IWL_SCAN_TYPE_FAST_BALANCE);
873 }
874
iwl_mvm_scan_lmac_flags(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)875 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
876 struct iwl_mvm_scan_params *params,
877 struct ieee80211_vif *vif)
878 {
879 int flags = 0;
880
881 if (params->n_ssids == 0)
882 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
883
884 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
885 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
886
887 if (iwl_mvm_is_scan_fragmented(params->type))
888 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
889
890 if (iwl_mvm_rrm_scan_needed(mvm) &&
891 fw_has_capa(&mvm->fw->ucode_capa,
892 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
893 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
894
895 if (params->pass_all)
896 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
897 else
898 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
899
900 #ifdef CONFIG_IWLWIFI_DEBUGFS
901 if (mvm->scan_iter_notif_enabled)
902 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
903 #endif
904
905 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
906 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
907
908 if (iwl_mvm_is_regular_scan(params) &&
909 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
910 !iwl_mvm_is_scan_fragmented(params->type))
911 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
912
913 return flags;
914 }
915
916 static void
iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 * p_req,struct iwl_scan_probe_req * src_p_req)917 iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req,
918 struct iwl_scan_probe_req *src_p_req)
919 {
920 int i;
921
922 p_req->mac_header = src_p_req->mac_header;
923 for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++)
924 p_req->band_data[i] = src_p_req->band_data[i];
925 p_req->common_data = src_p_req->common_data;
926 memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf));
927 }
928
iwl_mvm_scan_lmac(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params)929 static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
930 struct iwl_mvm_scan_params *params)
931 {
932 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
933 struct iwl_scan_probe_req_v1 *preq =
934 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
935 mvm->fw->ucode_capa.n_scan_channels);
936 u32 ssid_bitmap = 0;
937 int i;
938 u8 band;
939
940 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
941 return -EINVAL;
942
943 iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
944
945 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
946 cmd->iter_num = cpu_to_le32(1);
947 cmd->n_channels = (u8)params->n_channels;
948
949 cmd->delay = cpu_to_le32(params->delay);
950
951 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
952 vif));
953
954 band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
955 cmd->flags = cpu_to_le32(band);
956 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
957 MAC_FILTER_IN_BEACON);
958 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
959 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
960
961 /* this API uses bits 1-20 instead of 0-19 */
962 ssid_bitmap <<= 1;
963
964 for (i = 0; i < params->n_scan_plans; i++) {
965 struct cfg80211_sched_scan_plan *scan_plan =
966 ¶ms->scan_plans[i];
967
968 cmd->schedule[i].delay =
969 cpu_to_le16(scan_plan->interval);
970 cmd->schedule[i].iterations = scan_plan->iterations;
971 cmd->schedule[i].full_scan_mul = 1;
972 }
973
974 /*
975 * If the number of iterations of the last scan plan is set to
976 * zero, it should run infinitely. However, this is not always the case.
977 * For example, when regular scan is requested the driver sets one scan
978 * plan with one iteration.
979 */
980 if (!cmd->schedule[i - 1].iterations)
981 cmd->schedule[i - 1].iterations = 0xff;
982
983 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
984 cmd->channel_opt[0].flags =
985 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
986 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
987 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
988 cmd->channel_opt[0].non_ebs_ratio =
989 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
990 cmd->channel_opt[1].flags =
991 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
992 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
993 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
994 cmd->channel_opt[1].non_ebs_ratio =
995 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
996 }
997
998 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
999 params->n_channels, ssid_bitmap, cmd);
1000
1001 iwl_mvm_scan_set_legacy_probe_req(preq, ¶ms->preq);
1002
1003 return 0;
1004 }
1005
rate_to_scan_rate_flag(unsigned int rate)1006 static int rate_to_scan_rate_flag(unsigned int rate)
1007 {
1008 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1009 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
1010 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
1011 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
1012 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
1013 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
1014 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
1015 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
1016 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
1017 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
1018 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
1019 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
1020 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
1021 };
1022
1023 return rate_to_scan_rate[rate];
1024 }
1025
iwl_mvm_scan_config_rates(struct iwl_mvm * mvm)1026 static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1027 {
1028 struct ieee80211_supported_band *band;
1029 unsigned int rates = 0;
1030 int i;
1031
1032 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1033 for (i = 0; i < band->n_bitrates; i++)
1034 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1035 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1036 for (i = 0; i < band->n_bitrates; i++)
1037 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1038
1039 /* Set both basic rates and supported rates */
1040 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1041
1042 return cpu_to_le32(rates);
1043 }
1044
iwl_mvm_fill_scan_dwell(struct iwl_mvm * mvm,struct iwl_scan_dwell * dwell)1045 static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
1046 struct iwl_scan_dwell *dwell)
1047 {
1048 dwell->active = IWL_SCAN_DWELL_ACTIVE;
1049 dwell->passive = IWL_SCAN_DWELL_PASSIVE;
1050 dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
1051 dwell->extended = IWL_SCAN_DWELL_EXTENDED;
1052 }
1053
iwl_mvm_fill_channels(struct iwl_mvm * mvm,u8 * channels,u32 max_channels)1054 static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
1055 u32 max_channels)
1056 {
1057 struct ieee80211_supported_band *band;
1058 int i, j = 0;
1059
1060 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1061 for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1062 channels[j] = band->channels[i].hw_value;
1063 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1064 for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1065 channels[j] = band->channels[i].hw_value;
1066 }
1067
iwl_mvm_fill_scan_config_v1(struct iwl_mvm * mvm,void * config,u32 flags,u8 channel_flags,u32 max_channels)1068 static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1069 u32 flags, u8 channel_flags,
1070 u32 max_channels)
1071 {
1072 enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
1073 struct iwl_scan_config_v1 *cfg = config;
1074
1075 cfg->flags = cpu_to_le32(flags);
1076 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1077 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1078 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1079 cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
1080 cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
1081
1082 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1083
1084 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1085
1086 /* This function should not be called when using ADD_STA ver >=12 */
1087 WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1088
1089 cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1090 cfg->channel_flags = channel_flags;
1091
1092 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1093 }
1094
iwl_mvm_fill_scan_config_v2(struct iwl_mvm * mvm,void * config,u32 flags,u8 channel_flags,u32 max_channels)1095 static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
1096 u32 flags, u8 channel_flags,
1097 u32 max_channels)
1098 {
1099 struct iwl_scan_config_v2 *cfg = config;
1100
1101 cfg->flags = cpu_to_le32(flags);
1102 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1103 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1104 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1105
1106 if (iwl_mvm_is_cdb_supported(mvm)) {
1107 enum iwl_mvm_scan_type lb_type, hb_type;
1108
1109 lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1110 NL80211_BAND_2GHZ);
1111 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1112 NL80211_BAND_5GHZ);
1113
1114 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1115 cpu_to_le32(scan_timing[lb_type].max_out_time);
1116 cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1117 cpu_to_le32(scan_timing[lb_type].suspend_time);
1118
1119 cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
1120 cpu_to_le32(scan_timing[hb_type].max_out_time);
1121 cfg->suspend_time[SCAN_HB_LMAC_IDX] =
1122 cpu_to_le32(scan_timing[hb_type].suspend_time);
1123 } else {
1124 enum iwl_mvm_scan_type type =
1125 iwl_mvm_get_scan_type(mvm, NULL);
1126
1127 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1128 cpu_to_le32(scan_timing[type].max_out_time);
1129 cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1130 cpu_to_le32(scan_timing[type].suspend_time);
1131 }
1132
1133 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1134
1135 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1136
1137 /* This function should not be called when using ADD_STA ver >=12 */
1138 WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1139
1140 cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1141 cfg->channel_flags = channel_flags;
1142
1143 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1144 }
1145
iwl_mvm_legacy_config_scan(struct iwl_mvm * mvm)1146 static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
1147 {
1148 void *cfg;
1149 int ret, cmd_size;
1150 struct iwl_host_cmd cmd = {
1151 .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1152 };
1153 enum iwl_mvm_scan_type type;
1154 enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
1155 int num_channels =
1156 mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
1157 mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
1158 u32 flags;
1159 u8 channel_flags;
1160
1161 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1162 num_channels = mvm->fw->ucode_capa.n_scan_channels;
1163
1164 if (iwl_mvm_is_cdb_supported(mvm)) {
1165 type = iwl_mvm_get_scan_type_band(mvm, NULL,
1166 NL80211_BAND_2GHZ);
1167 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1168 NL80211_BAND_5GHZ);
1169 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
1170 return 0;
1171 } else {
1172 type = iwl_mvm_get_scan_type(mvm, NULL);
1173 if (type == mvm->scan_type)
1174 return 0;
1175 }
1176
1177 if (iwl_mvm_cdb_scan_api(mvm))
1178 cmd_size = sizeof(struct iwl_scan_config_v2);
1179 else
1180 cmd_size = sizeof(struct iwl_scan_config_v1);
1181 cmd_size += mvm->fw->ucode_capa.n_scan_channels;
1182
1183 cfg = kzalloc(cmd_size, GFP_KERNEL);
1184 if (!cfg)
1185 return -ENOMEM;
1186
1187 flags = SCAN_CONFIG_FLAG_ACTIVATE |
1188 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1189 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1190 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1191 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
1192 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1193 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1194 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1195 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
1196 SCAN_CONFIG_N_CHANNELS(num_channels) |
1197 (iwl_mvm_is_scan_fragmented(type) ?
1198 SCAN_CONFIG_FLAG_SET_FRAGMENTED :
1199 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
1200
1201 channel_flags = IWL_CHANNEL_FLAG_EBS |
1202 IWL_CHANNEL_FLAG_ACCURATE_EBS |
1203 IWL_CHANNEL_FLAG_EBS_ADD |
1204 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1205
1206 /*
1207 * Check for fragmented scan on LMAC2 - high band.
1208 * LMAC1 - low band is checked above.
1209 */
1210 if (iwl_mvm_cdb_scan_api(mvm)) {
1211 if (iwl_mvm_is_cdb_supported(mvm))
1212 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1213 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1214 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1215 iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
1216 num_channels);
1217 } else {
1218 iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
1219 num_channels);
1220 }
1221
1222 cmd.data[0] = cfg;
1223 cmd.len[0] = cmd_size;
1224 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1225
1226 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1227
1228 ret = iwl_mvm_send_cmd(mvm, &cmd);
1229 if (!ret) {
1230 mvm->scan_type = type;
1231 mvm->hb_scan_type = hb_type;
1232 }
1233
1234 kfree(cfg);
1235 return ret;
1236 }
1237
iwl_mvm_config_scan(struct iwl_mvm * mvm)1238 int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1239 {
1240 struct iwl_scan_config cfg;
1241 struct iwl_host_cmd cmd = {
1242 .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1243 .len[0] = sizeof(cfg),
1244 .data[0] = &cfg,
1245 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1246 };
1247
1248 if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
1249 return iwl_mvm_legacy_config_scan(mvm);
1250
1251 memset(&cfg, 0, sizeof(cfg));
1252
1253 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1254 cfg.bcast_sta_id = mvm->aux_sta.sta_id;
1255 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
1256 /*
1257 * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
1258 * version 5.
1259 */
1260 cfg.bcast_sta_id = 0xff;
1261 }
1262
1263 cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1264 cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1265
1266 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1267
1268 return iwl_mvm_send_cmd(mvm, &cmd);
1269 }
1270
iwl_mvm_scan_uid_by_status(struct iwl_mvm * mvm,int status)1271 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1272 {
1273 int i;
1274
1275 for (i = 0; i < mvm->max_scans; i++)
1276 if (mvm->scan_uid_status[i] == status)
1277 return i;
1278
1279 return -ENOENT;
1280 }
1281
iwl_mvm_scan_umac_dwell(struct iwl_mvm * mvm,struct iwl_scan_req_umac * cmd,struct iwl_mvm_scan_params * params)1282 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1283 struct iwl_scan_req_umac *cmd,
1284 struct iwl_mvm_scan_params *params)
1285 {
1286 struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1287 u8 active_dwell, passive_dwell;
1288
1289 timing = &scan_timing[params->type];
1290 active_dwell = IWL_SCAN_DWELL_ACTIVE;
1291 passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1292
1293 if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
1294 cmd->v7.adwell_default_n_aps_social =
1295 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1296 cmd->v7.adwell_default_n_aps =
1297 IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1298
1299 if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
1300 cmd->v9.adwell_default_hb_n_aps =
1301 IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1302
1303 /* if custom max budget was configured with debugfs */
1304 if (IWL_MVM_ADWELL_MAX_BUDGET)
1305 cmd->v7.adwell_max_budget =
1306 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1307 else if (params->n_ssids && params->ssids[0].ssid_len)
1308 cmd->v7.adwell_max_budget =
1309 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1310 else
1311 cmd->v7.adwell_max_budget =
1312 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1313
1314 cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1315 cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
1316 cpu_to_le32(timing->max_out_time);
1317 cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
1318 cpu_to_le32(timing->suspend_time);
1319
1320 if (iwl_mvm_is_cdb_supported(mvm)) {
1321 hb_timing = &scan_timing[params->hb_type];
1322
1323 cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
1324 cpu_to_le32(hb_timing->max_out_time);
1325 cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
1326 cpu_to_le32(hb_timing->suspend_time);
1327 }
1328
1329 if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
1330 cmd->v7.active_dwell = active_dwell;
1331 cmd->v7.passive_dwell = passive_dwell;
1332 cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1333 } else {
1334 cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1335 cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1336 if (iwl_mvm_is_cdb_supported(mvm)) {
1337 cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
1338 active_dwell;
1339 cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
1340 passive_dwell;
1341 }
1342 }
1343 } else {
1344 cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
1345 cmd->v1.active_dwell = active_dwell;
1346 cmd->v1.passive_dwell = passive_dwell;
1347 cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1348
1349 if (iwl_mvm_is_cdb_supported(mvm)) {
1350 hb_timing = &scan_timing[params->hb_type];
1351
1352 cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
1353 cpu_to_le32(hb_timing->max_out_time);
1354 cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
1355 cpu_to_le32(hb_timing->suspend_time);
1356 }
1357
1358 if (iwl_mvm_cdb_scan_api(mvm)) {
1359 cmd->v6.scan_priority =
1360 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1361 cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
1362 cpu_to_le32(timing->max_out_time);
1363 cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
1364 cpu_to_le32(timing->suspend_time);
1365 } else {
1366 cmd->v1.scan_priority =
1367 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1368 cmd->v1.max_out_time =
1369 cpu_to_le32(timing->max_out_time);
1370 cmd->v1.suspend_time =
1371 cpu_to_le32(timing->suspend_time);
1372 }
1373 }
1374
1375 if (iwl_mvm_is_regular_scan(params))
1376 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1377 else
1378 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
1379 }
1380
iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params * params)1381 static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
1382 {
1383 return iwl_mvm_is_regular_scan(params) ?
1384 IWL_SCAN_PRIORITY_EXT_6 :
1385 IWL_SCAN_PRIORITY_EXT_2;
1386 }
1387
1388 static void
iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm * mvm,struct iwl_scan_general_params_v11 * general_params,struct iwl_mvm_scan_params * params)1389 iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
1390 struct iwl_scan_general_params_v11 *general_params,
1391 struct iwl_mvm_scan_params *params)
1392 {
1393 struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1394 u8 active_dwell, passive_dwell;
1395
1396 timing = &scan_timing[params->type];
1397 active_dwell = IWL_SCAN_DWELL_ACTIVE;
1398 passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1399
1400 general_params->adwell_default_social_chn =
1401 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1402 general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1403 general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1404
1405 /* if custom max budget was configured with debugfs */
1406 if (IWL_MVM_ADWELL_MAX_BUDGET)
1407 general_params->adwell_max_budget =
1408 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1409 else if (params->n_ssids && params->ssids[0].ssid_len)
1410 general_params->adwell_max_budget =
1411 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1412 else
1413 general_params->adwell_max_budget =
1414 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1415
1416 general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1417 general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
1418 cpu_to_le32(timing->max_out_time);
1419 general_params->suspend_time[SCAN_LB_LMAC_IDX] =
1420 cpu_to_le32(timing->suspend_time);
1421
1422 hb_timing = &scan_timing[params->hb_type];
1423
1424 general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
1425 cpu_to_le32(hb_timing->max_out_time);
1426 general_params->suspend_time[SCAN_HB_LMAC_IDX] =
1427 cpu_to_le32(hb_timing->suspend_time);
1428
1429 general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1430 general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1431 general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
1432 general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
1433 }
1434
1435 struct iwl_mvm_scan_channel_segment {
1436 u8 start_idx;
1437 u8 end_idx;
1438 u8 first_channel_id;
1439 u8 last_channel_id;
1440 u8 channel_spacing_shift;
1441 u8 band;
1442 };
1443
1444 static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
1445 {
1446 .start_idx = 0,
1447 .end_idx = 13,
1448 .first_channel_id = 1,
1449 .last_channel_id = 14,
1450 .channel_spacing_shift = 0,
1451 .band = PHY_BAND_24
1452 },
1453 {
1454 .start_idx = 14,
1455 .end_idx = 41,
1456 .first_channel_id = 36,
1457 .last_channel_id = 144,
1458 .channel_spacing_shift = 2,
1459 .band = PHY_BAND_5
1460 },
1461 {
1462 .start_idx = 42,
1463 .end_idx = 50,
1464 .first_channel_id = 149,
1465 .last_channel_id = 181,
1466 .channel_spacing_shift = 2,
1467 .band = PHY_BAND_5
1468 },
1469 {
1470 .start_idx = 51,
1471 .end_idx = 111,
1472 .first_channel_id = 1,
1473 .last_channel_id = 241,
1474 .channel_spacing_shift = 2,
1475 .band = PHY_BAND_6
1476 },
1477 };
1478
iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id,u8 band)1479 static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
1480 {
1481 int i, index;
1482
1483 if (!channel_id)
1484 return -EINVAL;
1485
1486 for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
1487 const struct iwl_mvm_scan_channel_segment *ch_segment =
1488 &scan_channel_segments[i];
1489 u32 ch_offset;
1490
1491 if (ch_segment->band != band ||
1492 ch_segment->first_channel_id > channel_id ||
1493 ch_segment->last_channel_id < channel_id)
1494 continue;
1495
1496 ch_offset = (channel_id - ch_segment->first_channel_id) >>
1497 ch_segment->channel_spacing_shift;
1498
1499 index = scan_channel_segments[i].start_idx + ch_offset;
1500 if (index < IWL_SCAN_NUM_CHANNELS)
1501 return index;
1502
1503 break;
1504 }
1505
1506 return -EINVAL;
1507 }
1508
1509 static const u8 p2p_go_friendly_chs[] = {
1510 36, 40, 44, 48, 149, 153, 157, 161, 165,
1511 };
1512
1513 static const u8 social_chs[] = {
1514 1, 6, 11
1515 };
1516
iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,u8 ch_id,u8 band,u8 * ch_bitmap,size_t bitmap_n_entries)1517 static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
1518 u8 ch_id, u8 band, u8 *ch_bitmap,
1519 size_t bitmap_n_entries)
1520 {
1521 int i;
1522
1523 if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1524 return;
1525
1526 for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1527 if (p2p_go_friendly_chs[i] == ch_id) {
1528 int ch_idx, bitmap_idx;
1529
1530 ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
1531 if (ch_idx < 0)
1532 return;
1533
1534 bitmap_idx = ch_idx / 8;
1535 if (bitmap_idx >= bitmap_n_entries)
1536 return;
1537
1538 ch_idx = ch_idx % 8;
1539 ch_bitmap[bitmap_idx] |= BIT(ch_idx);
1540
1541 return;
1542 }
1543 }
1544 }
1545
iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type,u8 ch_id)1546 static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
1547 {
1548 int i;
1549 u32 flags = 0;
1550
1551 if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1552 goto out;
1553
1554 for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1555 if (p2p_go_friendly_chs[i] == ch_id) {
1556 flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
1557 break;
1558 }
1559 }
1560
1561 if (flags)
1562 goto out;
1563
1564 for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
1565 if (social_chs[i] == ch_id) {
1566 flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
1567 break;
1568 }
1569 }
1570
1571 out:
1572 return flags;
1573 }
1574
1575 static void
iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,int n_channels,u32 flags,struct iwl_scan_channel_cfg_umac * channel_cfg)1576 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1577 struct ieee80211_channel **channels,
1578 int n_channels, u32 flags,
1579 struct iwl_scan_channel_cfg_umac *channel_cfg)
1580 {
1581 int i;
1582
1583 for (i = 0; i < n_channels; i++) {
1584 channel_cfg[i].flags = cpu_to_le32(flags);
1585 channel_cfg[i].v1.channel_num = channels[i]->hw_value;
1586 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
1587 enum nl80211_band band = channels[i]->band;
1588
1589 channel_cfg[i].v2.band =
1590 iwl_mvm_phy_band_from_nl80211(band);
1591 channel_cfg[i].v2.iter_count = 1;
1592 channel_cfg[i].v2.iter_interval = 0;
1593 } else {
1594 channel_cfg[i].v1.iter_count = 1;
1595 channel_cfg[i].v1.iter_interval = 0;
1596 }
1597 }
1598 }
1599
1600 static void
iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,struct iwl_scan_channel_params_v4 * cp,int n_channels,u32 flags,enum nl80211_iftype vif_type)1601 iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
1602 struct ieee80211_channel **channels,
1603 struct iwl_scan_channel_params_v4 *cp,
1604 int n_channels, u32 flags,
1605 enum nl80211_iftype vif_type)
1606 {
1607 u8 *bitmap = cp->adwell_ch_override_bitmap;
1608 size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
1609 int i;
1610
1611 for (i = 0; i < n_channels; i++) {
1612 enum nl80211_band band = channels[i]->band;
1613 struct iwl_scan_channel_cfg_umac *cfg =
1614 &cp->channel_config[i];
1615
1616 cfg->flags = cpu_to_le32(flags);
1617 cfg->v2.channel_num = channels[i]->hw_value;
1618 cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
1619 cfg->v2.iter_count = 1;
1620 cfg->v2.iter_interval = 0;
1621
1622 iwl_mvm_scan_ch_add_n_aps_override(vif_type,
1623 cfg->v2.channel_num,
1624 cfg->v2.band, bitmap,
1625 bitmap_n_entries);
1626 }
1627 }
1628
1629 static void
iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm * mvm,struct ieee80211_channel ** channels,struct iwl_scan_channel_params_v7 * cp,int n_channels,u32 flags,enum nl80211_iftype vif_type,u32 version)1630 iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
1631 struct ieee80211_channel **channels,
1632 struct iwl_scan_channel_params_v7 *cp,
1633 int n_channels, u32 flags,
1634 enum nl80211_iftype vif_type, u32 version)
1635 {
1636 int i;
1637
1638 for (i = 0; i < n_channels; i++) {
1639 enum nl80211_band band = channels[i]->band;
1640 struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
1641 u32 n_aps_flag =
1642 iwl_mvm_scan_ch_n_aps_flag(vif_type,
1643 channels[i]->hw_value);
1644 u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
1645
1646 cfg->flags = cpu_to_le32(flags | n_aps_flag);
1647 cfg->v2.channel_num = channels[i]->hw_value;
1648 if (cfg80211_channel_is_psc(channels[i]))
1649 cfg->flags = 0;
1650 cfg->v2.iter_count = 1;
1651 cfg->v2.iter_interval = 0;
1652 if (version < 17)
1653 cfg->v2.band = iwl_band;
1654 else
1655 cfg->flags |= cpu_to_le32((iwl_band <<
1656 IWL_CHAN_CFG_FLAGS_BAND_POS));
1657 }
1658 }
1659
1660 static void
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct iwl_scan_probe_params_v4 * pp)1661 iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
1662 struct iwl_mvm_scan_params *params,
1663 struct iwl_scan_probe_params_v4 *pp)
1664 {
1665 int j, idex_s = 0, idex_b = 0;
1666 struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1667 params->scan_6ghz_params;
1668 bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa,
1669 IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN);
1670
1671 for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
1672 j++) {
1673 if (!params->ssids[j].ssid_len)
1674 continue;
1675
1676 pp->short_ssid[idex_s] =
1677 cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
1678 params->ssids[j].ssid_len));
1679
1680 if (hidden_supported) {
1681 pp->direct_scan[idex_s].id = WLAN_EID_SSID;
1682 pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
1683 memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
1684 params->ssids[j].ssid_len);
1685 }
1686 idex_s++;
1687 }
1688
1689 /*
1690 * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
1691 * collocated parameters. This might not be optimal, as this processing
1692 * does not (yet) correspond to the actual channels, so it is possible
1693 * that some entries would be left out.
1694 *
1695 * TODO: improve this logic.
1696 */
1697 for (j = 0; j < params->n_6ghz_params; j++) {
1698 int k;
1699
1700 /* First, try to place the short SSID */
1701 if (scan_6ghz_params[j].short_ssid_valid) {
1702 for (k = 0; k < idex_s; k++) {
1703 if (pp->short_ssid[k] ==
1704 cpu_to_le32(scan_6ghz_params[j].short_ssid))
1705 break;
1706 }
1707
1708 if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
1709 pp->short_ssid[idex_s++] =
1710 cpu_to_le32(scan_6ghz_params[j].short_ssid);
1711 }
1712 }
1713
1714 /* try to place BSSID for the same entry */
1715 for (k = 0; k < idex_b; k++) {
1716 if (!memcmp(&pp->bssid_array[k],
1717 scan_6ghz_params[j].bssid, ETH_ALEN))
1718 break;
1719 }
1720
1721 if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
1722 !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
1723 "scan: invalid BSSID at index %u, index_b=%u\n",
1724 j, idex_b)) {
1725 memcpy(&pp->bssid_array[idex_b++],
1726 scan_6ghz_params[j].bssid, ETH_ALEN);
1727 }
1728 }
1729
1730 pp->short_ssid_num = idex_s;
1731 pp->bssid_num = idex_b;
1732 }
1733
1734 /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */
1735 static u32
iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,u32 n_channels,struct iwl_scan_probe_params_v4 * pp,struct iwl_scan_channel_params_v7 * cp,enum nl80211_iftype vif_type,u32 version)1736 iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
1737 struct iwl_mvm_scan_params *params,
1738 u32 n_channels,
1739 struct iwl_scan_probe_params_v4 *pp,
1740 struct iwl_scan_channel_params_v7 *cp,
1741 enum nl80211_iftype vif_type,
1742 u32 version)
1743 {
1744 int i;
1745 struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1746 params->scan_6ghz_params;
1747 u32 ch_cnt;
1748
1749 for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
1750 struct iwl_scan_channel_cfg_umac *cfg =
1751 &cp->channel_config[ch_cnt];
1752
1753 u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
1754 u8 k, s_max = 0, b_max = 0, n_used_bssid_entries;
1755 u32 j;
1756 bool force_passive, found = false, allow_passive = true,
1757 unsolicited_probe_on_chan = false, psc_no_listen = false;
1758 s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
1759
1760 /*
1761 * Avoid performing passive scan on non PSC channels unless the
1762 * scan is specifically a passive scan, i.e., no SSIDs
1763 * configured in the scan command.
1764 */
1765 if (!cfg80211_channel_is_psc(params->channels[i]) &&
1766 !params->n_6ghz_params && params->n_ssids)
1767 continue;
1768
1769 cfg->v1.channel_num = params->channels[i]->hw_value;
1770 if (version < 17)
1771 cfg->v2.band = PHY_BAND_6;
1772 else
1773 cfg->flags |= cpu_to_le32(PHY_BAND_6 <<
1774 IWL_CHAN_CFG_FLAGS_BAND_POS);
1775
1776 cfg->v5.iter_count = 1;
1777 cfg->v5.iter_interval = 0;
1778
1779 /*
1780 * The optimize the scan time, i.e., reduce the scan dwell time
1781 * on each channel, the below logic tries to set 3 direct BSSID
1782 * probe requests for each broadcast probe request with a short
1783 * SSID.
1784 * TODO: improve this logic
1785 */
1786 n_used_bssid_entries = 3;
1787 for (j = 0; j < params->n_6ghz_params; j++) {
1788 s8 tmp_psd_20;
1789
1790 if (!(scan_6ghz_params[j].channel_idx == i))
1791 continue;
1792
1793 /* Use the highest PSD value allowed as advertised by
1794 * APs for this channel
1795 */
1796 tmp_psd_20 = scan_6ghz_params[j].psd_20;
1797 if (tmp_psd_20 !=
1798 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
1799 (psd_20 ==
1800 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
1801 psd_20 < tmp_psd_20))
1802 psd_20 = tmp_psd_20;
1803
1804 found = false;
1805 unsolicited_probe_on_chan |=
1806 scan_6ghz_params[j].unsolicited_probe;
1807 psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
1808
1809 for (k = 0; k < pp->short_ssid_num; k++) {
1810 if (!scan_6ghz_params[j].unsolicited_probe &&
1811 le32_to_cpu(pp->short_ssid[k]) ==
1812 scan_6ghz_params[j].short_ssid) {
1813 /* Relevant short SSID bit set */
1814 if (s_ssid_bitmap & BIT(k)) {
1815 found = true;
1816 break;
1817 }
1818
1819 /*
1820 * Use short SSID only to create a new
1821 * iteration during channel dwell or in
1822 * case that the short SSID has a
1823 * matching SSID, i.e., scan for hidden
1824 * APs.
1825 */
1826 if (n_used_bssid_entries >= 3) {
1827 s_ssid_bitmap |= BIT(k);
1828 s_max++;
1829 n_used_bssid_entries -= 3;
1830 found = true;
1831 break;
1832 } else if (pp->direct_scan[k].len) {
1833 s_ssid_bitmap |= BIT(k);
1834 s_max++;
1835 found = true;
1836 allow_passive = false;
1837 break;
1838 }
1839 }
1840 }
1841
1842 if (found)
1843 continue;
1844
1845 for (k = 0; k < pp->bssid_num; k++) {
1846 if (!memcmp(&pp->bssid_array[k],
1847 scan_6ghz_params[j].bssid,
1848 ETH_ALEN)) {
1849 if (!(bssid_bitmap & BIT(k))) {
1850 bssid_bitmap |= BIT(k);
1851 b_max++;
1852 n_used_bssid_entries++;
1853 }
1854 break;
1855 }
1856 }
1857 }
1858
1859 if (cfg80211_channel_is_psc(params->channels[i]) &&
1860 psc_no_listen)
1861 flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
1862
1863 if (unsolicited_probe_on_chan)
1864 flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
1865
1866 /*
1867 * In the following cases apply passive scan:
1868 * 1. Non fragmented scan:
1869 * - PSC channel with NO_LISTEN_FLAG on should be treated
1870 * like non PSC channel
1871 * - Non PSC channel with more than 3 short SSIDs or more
1872 * than 9 BSSIDs.
1873 * - Non PSC Channel with unsolicited probe response and
1874 * more than 2 short SSIDs or more than 6 BSSIDs.
1875 * - PSC channel with more than 2 short SSIDs or more than
1876 * 6 BSSIDs.
1877 * 3. Fragmented scan:
1878 * - PSC channel with more than 1 SSID or 3 BSSIDs.
1879 * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
1880 * - Non PSC channel with unsolicited probe response and
1881 * more than 1 SSID or more than 3 BSSIDs.
1882 */
1883 if (!iwl_mvm_is_scan_fragmented(params->type)) {
1884 if (!cfg80211_channel_is_psc(params->channels[i]) ||
1885 flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
1886 force_passive = (s_max > 3 || b_max > 9);
1887 force_passive |= (unsolicited_probe_on_chan &&
1888 (s_max > 2 || b_max > 6));
1889 } else {
1890 force_passive = (s_max > 2 || b_max > 6);
1891 }
1892 } else if (cfg80211_channel_is_psc(params->channels[i])) {
1893 force_passive = (s_max > 1 || b_max > 3);
1894 } else {
1895 force_passive = (s_max > 2 || b_max > 6);
1896 force_passive |= (unsolicited_probe_on_chan &&
1897 (s_max > 1 || b_max > 3));
1898 }
1899 if ((allow_passive && force_passive) ||
1900 (!(bssid_bitmap | s_ssid_bitmap) &&
1901 !cfg80211_channel_is_psc(params->channels[i])))
1902 flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
1903 else
1904 flags |= bssid_bitmap | (s_ssid_bitmap << 16);
1905
1906 cfg->flags |= cpu_to_le32(flags);
1907 if (version >= 17)
1908 cfg->v5.psd_20 = psd_20;
1909
1910 ch_cnt++;
1911 }
1912
1913 if (params->n_channels > ch_cnt)
1914 IWL_DEBUG_SCAN(mvm,
1915 "6GHz: reducing number channels: (%u->%u)\n",
1916 params->n_channels, ch_cnt);
1917
1918 return ch_cnt;
1919 }
1920
iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)1921 static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
1922 struct iwl_mvm_scan_params *params,
1923 struct ieee80211_vif *vif)
1924 {
1925 u8 flags = 0;
1926
1927 flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
1928
1929 if (iwl_mvm_scan_use_ebs(mvm, vif))
1930 flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
1931 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1932 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1933
1934 /* set fragmented ebs for fragmented scan on HB channels */
1935 if ((!iwl_mvm_is_cdb_supported(mvm) &&
1936 iwl_mvm_is_scan_fragmented(params->type)) ||
1937 (iwl_mvm_is_cdb_supported(mvm) &&
1938 iwl_mvm_is_scan_fragmented(params->hb_type)))
1939 flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
1940
1941 /*
1942 * force EBS in case the scan is a fragmented and there is a need to take P2P
1943 * GO operation into consideration during scan operation.
1944 */
1945 if ((!iwl_mvm_is_cdb_supported(mvm) &&
1946 iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
1947 (iwl_mvm_is_cdb_supported(mvm) &&
1948 iwl_mvm_is_scan_fragmented(params->hb_type) &&
1949 params->respect_p2p_go_hb)) {
1950 IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
1951 flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
1952 }
1953
1954 return flags;
1955 }
1956
iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)1957 static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
1958 struct iwl_mvm_scan_params *params,
1959 struct ieee80211_vif *vif)
1960 {
1961 struct ieee80211_supported_band *sband =
1962 &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
1963 u32 n_disabled, i;
1964
1965 params->enable_6ghz_passive = false;
1966
1967 if (params->scan_6ghz)
1968 return;
1969
1970 if (!fw_has_capa(&mvm->fw->ucode_capa,
1971 IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) {
1972 IWL_DEBUG_SCAN(mvm,
1973 "6GHz passive scan: Not supported by FW\n");
1974 return;
1975 }
1976
1977 /* 6GHz passive scan allowed only on station interface */
1978 if (vif->type != NL80211_IFTYPE_STATION) {
1979 IWL_DEBUG_SCAN(mvm,
1980 "6GHz passive scan: not station interface\n");
1981 return;
1982 }
1983
1984 /*
1985 * 6GHz passive scan is allowed in a defined time interval following HW
1986 * reset or resume flow, or while not associated and a large interval
1987 * has passed since the last 6GHz passive scan.
1988 */
1989 if ((vif->cfg.assoc ||
1990 time_after(mvm->last_6ghz_passive_scan_jiffies +
1991 (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
1992 (time_before(mvm->last_reset_or_resume_time_jiffies +
1993 (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
1994 jiffies))) {
1995 IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
1996 vif->cfg.assoc ? "associated" :
1997 "timeout did not expire");
1998 return;
1999 }
2000
2001 /* not enough channels in the regular scan request */
2002 if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
2003 IWL_DEBUG_SCAN(mvm,
2004 "6GHz passive scan: not enough channels\n");
2005 return;
2006 }
2007
2008 for (i = 0; i < params->n_ssids; i++) {
2009 if (!params->ssids[i].ssid_len)
2010 break;
2011 }
2012
2013 /* not a wildcard scan, so cannot enable passive 6GHz scan */
2014 if (i == params->n_ssids) {
2015 IWL_DEBUG_SCAN(mvm,
2016 "6GHz passive scan: no wildcard SSID\n");
2017 return;
2018 }
2019
2020 if (!sband || !sband->n_channels) {
2021 IWL_DEBUG_SCAN(mvm,
2022 "6GHz passive scan: no 6GHz channels\n");
2023 return;
2024 }
2025
2026 for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
2027 if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
2028 n_disabled++;
2029 }
2030
2031 /*
2032 * Not all the 6GHz channels are disabled, so no need for 6GHz passive
2033 * scan
2034 */
2035 if (n_disabled != sband->n_channels) {
2036 IWL_DEBUG_SCAN(mvm,
2037 "6GHz passive scan: 6GHz channels enabled\n");
2038 return;
2039 }
2040
2041 /* all conditions to enable 6ghz passive scan are satisfied */
2042 IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n");
2043 params->enable_6ghz_passive = true;
2044 }
2045
iwl_mvm_scan_umac_flags_v2(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,int type)2046 static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
2047 struct iwl_mvm_scan_params *params,
2048 struct ieee80211_vif *vif,
2049 int type)
2050 {
2051 u16 flags = 0;
2052
2053 /*
2054 * If no direct SSIDs are provided perform a passive scan. Otherwise,
2055 * if there is a single SSID which is not the broadcast SSID, assume
2056 * that the scan is intended for roaming purposes and thus enable Rx on
2057 * all chains to improve chances of hearing the beacons/probe responses.
2058 */
2059 if (params->n_ssids == 0)
2060 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
2061 else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
2062 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
2063
2064 if (iwl_mvm_is_scan_fragmented(params->type))
2065 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
2066
2067 if (iwl_mvm_is_scan_fragmented(params->hb_type))
2068 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
2069
2070 if (params->pass_all)
2071 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
2072 else
2073 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
2074
2075 if (!iwl_mvm_is_regular_scan(params))
2076 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
2077
2078 if (params->iter_notif ||
2079 mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2080 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
2081
2082 if (IWL_MVM_ADWELL_ENABLE)
2083 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
2084
2085 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2086 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
2087
2088 if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2089 params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
2090 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
2091
2092 if (params->enable_6ghz_passive)
2093 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
2094
2095 if (iwl_mvm_is_oce_supported(mvm) &&
2096 (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
2097 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
2098 NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
2099 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
2100
2101 return flags;
2102 }
2103
iwl_mvm_scan_umac_flags2(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,int type)2104 static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
2105 struct iwl_mvm_scan_params *params,
2106 struct ieee80211_vif *vif, int type)
2107 {
2108 u8 flags = 0;
2109
2110 if (iwl_mvm_is_cdb_supported(mvm)) {
2111 if (params->respect_p2p_go)
2112 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
2113 if (params->respect_p2p_go_hb)
2114 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2115 } else {
2116 if (params->respect_p2p_go)
2117 flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
2118 IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2119 }
2120
2121 if (params->scan_6ghz &&
2122 fw_has_capa(&mvm->fw->ucode_capa,
2123 IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
2124 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
2125
2126 return flags;
2127 }
2128
iwl_mvm_scan_umac_flags(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)2129 static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
2130 struct iwl_mvm_scan_params *params,
2131 struct ieee80211_vif *vif)
2132 {
2133 u16 flags = 0;
2134
2135 if (params->n_ssids == 0)
2136 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
2137
2138 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
2139 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
2140
2141 if (iwl_mvm_is_scan_fragmented(params->type))
2142 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
2143
2144 if (iwl_mvm_is_cdb_supported(mvm) &&
2145 iwl_mvm_is_scan_fragmented(params->hb_type))
2146 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
2147
2148 if (iwl_mvm_rrm_scan_needed(mvm) &&
2149 fw_has_capa(&mvm->fw->ucode_capa,
2150 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
2151 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
2152
2153 if (params->pass_all)
2154 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
2155 else
2156 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
2157
2158 if (!iwl_mvm_is_regular_scan(params))
2159 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
2160
2161 if (params->iter_notif)
2162 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2163
2164 #ifdef CONFIG_IWLWIFI_DEBUGFS
2165 if (mvm->scan_iter_notif_enabled)
2166 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2167 #endif
2168
2169 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2170 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2171
2172 if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
2173 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
2174
2175 /*
2176 * Extended dwell is relevant only for low band to start with, as it is
2177 * being used for social channles only (1, 6, 11), so we can check
2178 * only scan type on low band also for CDB.
2179 */
2180 if (iwl_mvm_is_regular_scan(params) &&
2181 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
2182 !iwl_mvm_is_scan_fragmented(params->type) &&
2183 !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
2184 !iwl_mvm_is_oce_supported(mvm))
2185 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
2186
2187 if (iwl_mvm_is_oce_supported(mvm)) {
2188 if ((params->flags &
2189 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
2190 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
2191 /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
2192 * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
2193 * the same bit, we need to make sure that we use this bit here
2194 * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
2195 * used. */
2196 if ((params->flags &
2197 NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
2198 !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
2199 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
2200 if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
2201 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
2202 }
2203
2204 return flags;
2205 }
2206
2207 static int
iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params * params,struct iwl_scan_umac_schedule * schedule,__le16 * delay)2208 iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
2209 struct iwl_scan_umac_schedule *schedule,
2210 __le16 *delay)
2211 {
2212 int i;
2213 if (WARN_ON(!params->n_scan_plans ||
2214 params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
2215 return -EINVAL;
2216
2217 for (i = 0; i < params->n_scan_plans; i++) {
2218 struct cfg80211_sched_scan_plan *scan_plan =
2219 ¶ms->scan_plans[i];
2220
2221 schedule[i].iter_count = scan_plan->iterations;
2222 schedule[i].interval =
2223 cpu_to_le16(scan_plan->interval);
2224 }
2225
2226 /*
2227 * If the number of iterations of the last scan plan is set to
2228 * zero, it should run infinitely. However, this is not always the case.
2229 * For example, when regular scan is requested the driver sets one scan
2230 * plan with one iteration.
2231 */
2232 if (!schedule[params->n_scan_plans - 1].iter_count)
2233 schedule[params->n_scan_plans - 1].iter_count = 0xff;
2234
2235 *delay = cpu_to_le16(params->delay);
2236
2237 return 0;
2238 }
2239
iwl_mvm_scan_umac(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2240 static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2241 struct iwl_mvm_scan_params *params,
2242 int type, int uid)
2243 {
2244 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
2245 struct iwl_scan_umac_chan_param *chan_param;
2246 void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
2247 void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
2248 mvm->fw->ucode_capa.n_scan_channels;
2249 struct iwl_scan_req_umac_tail_v2 *tail_v2 =
2250 (struct iwl_scan_req_umac_tail_v2 *)sec_part;
2251 struct iwl_scan_req_umac_tail_v1 *tail_v1;
2252 struct iwl_ssid_ie *direct_scan;
2253 int ret = 0;
2254 u32 ssid_bitmap = 0;
2255 u8 channel_flags = 0;
2256 u16 gen_flags;
2257 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2258
2259 chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
2260
2261 iwl_mvm_scan_umac_dwell(mvm, cmd, params);
2262
2263 mvm->scan_uid_status[uid] = type;
2264
2265 cmd->uid = cpu_to_le32(uid);
2266 gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
2267 cmd->general_flags = cpu_to_le16(gen_flags);
2268 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
2269 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
2270 cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
2271 IWL_SCAN_NUM_OF_FRAGS;
2272 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
2273 cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
2274 IWL_SCAN_NUM_OF_FRAGS;
2275
2276 cmd->v8.general_flags2 =
2277 IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
2278 }
2279
2280 cmd->scan_start_mac_id = scan_vif->id;
2281
2282 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2283 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
2284
2285 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
2286 channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
2287 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
2288 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
2289
2290 /* set fragmented ebs for fragmented scan on HB channels */
2291 if (iwl_mvm_is_frag_ebs_supported(mvm)) {
2292 if (gen_flags &
2293 IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
2294 (!iwl_mvm_is_cdb_supported(mvm) &&
2295 gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
2296 channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
2297 }
2298 }
2299
2300 chan_param->flags = channel_flags;
2301 chan_param->count = params->n_channels;
2302
2303 ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
2304 &tail_v2->delay);
2305 if (ret) {
2306 mvm->scan_uid_status[uid] = 0;
2307 return ret;
2308 }
2309
2310 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
2311 tail_v2->preq = params->preq;
2312 direct_scan = tail_v2->direct_scan;
2313 } else {
2314 tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part;
2315 iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq,
2316 ¶ms->preq);
2317 direct_scan = tail_v1->direct_scan;
2318 }
2319 iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap);
2320 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
2321 params->n_channels, ssid_bitmap,
2322 cmd_data);
2323 return 0;
2324 }
2325
2326 static void
iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_general_params_v11 * gp,u16 gen_flags,u8 gen_flags2,u32 version)2327 iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
2328 struct iwl_mvm_scan_params *params,
2329 struct ieee80211_vif *vif,
2330 struct iwl_scan_general_params_v11 *gp,
2331 u16 gen_flags, u8 gen_flags2,
2332 u32 version)
2333 {
2334 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2335
2336 iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
2337
2338 IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n",
2339 gen_flags, gen_flags2);
2340
2341 gp->flags = cpu_to_le16(gen_flags);
2342 gp->flags2 = gen_flags2;
2343
2344 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
2345 gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2346 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
2347 gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2348
2349 if (version < 16) {
2350 gp->scan_start_mac_or_link_id = scan_vif->id;
2351 } else {
2352 struct iwl_mvm_vif_link_info *link_info;
2353 u8 link_id = 0;
2354
2355 /* Use one of the active link (if any). In the future it would
2356 * be possible that the link ID would be part of the scan
2357 * request coming from upper layers so we would need to use it.
2358 */
2359 if (vif->active_links)
2360 link_id = ffs(vif->active_links) - 1;
2361
2362 link_info = scan_vif->link[link_id];
2363 if (!WARN_ON(!link_info))
2364 gp->scan_start_mac_or_link_id = link_info->fw_link_id;
2365 }
2366 }
2367
2368 static void
iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params * params,struct iwl_scan_probe_params_v3 * pp)2369 iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
2370 struct iwl_scan_probe_params_v3 *pp)
2371 {
2372 pp->preq = params->preq;
2373 pp->ssid_num = params->n_ssids;
2374 iwl_scan_build_ssids(params, pp->direct_scan, NULL);
2375 }
2376
2377 static void
iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params * params,struct iwl_scan_probe_params_v4 * pp,u32 * bitmap_ssid)2378 iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
2379 struct iwl_scan_probe_params_v4 *pp,
2380 u32 *bitmap_ssid)
2381 {
2382 pp->preq = params->preq;
2383 iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
2384 }
2385
2386 static void
iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_channel_params_v4 * cp,u32 channel_cfg_flags)2387 iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
2388 struct iwl_mvm_scan_params *params,
2389 struct ieee80211_vif *vif,
2390 struct iwl_scan_channel_params_v4 *cp,
2391 u32 channel_cfg_flags)
2392 {
2393 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2394 cp->count = params->n_channels;
2395 cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2396
2397 iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
2398 params->n_channels,
2399 channel_cfg_flags,
2400 vif->type);
2401 }
2402
2403 static void
iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif,struct iwl_scan_channel_params_v7 * cp,u32 channel_cfg_flags,u32 version)2404 iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
2405 struct iwl_mvm_scan_params *params,
2406 struct ieee80211_vif *vif,
2407 struct iwl_scan_channel_params_v7 *cp,
2408 u32 channel_cfg_flags,
2409 u32 version)
2410 {
2411 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2412 cp->count = params->n_channels;
2413 cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2414 cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2415
2416 iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp,
2417 params->n_channels,
2418 channel_cfg_flags,
2419 vif->type, version);
2420
2421 if (params->enable_6ghz_passive) {
2422 struct ieee80211_supported_band *sband =
2423 &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
2424 u32 i;
2425
2426 for (i = 0; i < sband->n_channels; i++) {
2427 struct ieee80211_channel *channel =
2428 &sband->channels[i];
2429
2430 struct iwl_scan_channel_cfg_umac *cfg =
2431 &cp->channel_config[cp->count];
2432
2433 if (!cfg80211_channel_is_psc(channel))
2434 continue;
2435
2436 cfg->v5.channel_num = channel->hw_value;
2437 cfg->v5.iter_count = 1;
2438 cfg->v5.iter_interval = 0;
2439
2440 if (version < 17) {
2441 cfg->flags = 0;
2442 cfg->v2.band = PHY_BAND_6;
2443 } else {
2444 cfg->flags = cpu_to_le32(PHY_BAND_6 <<
2445 IWL_CHAN_CFG_FLAGS_BAND_POS);
2446 cfg->v5.psd_20 =
2447 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
2448 }
2449 cp->count++;
2450 }
2451 }
2452 }
2453
iwl_mvm_scan_umac_v12(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2454 static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2455 struct iwl_mvm_scan_params *params, int type,
2456 int uid)
2457 {
2458 struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
2459 struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
2460 int ret;
2461 u16 gen_flags;
2462
2463 mvm->scan_uid_status[uid] = type;
2464
2465 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2466 cmd->uid = cpu_to_le32(uid);
2467
2468 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2469 iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2470 &scan_p->general_params,
2471 gen_flags, 0, 12);
2472
2473 ret = iwl_mvm_fill_scan_sched_params(params,
2474 scan_p->periodic_params.schedule,
2475 &scan_p->periodic_params.delay);
2476 if (ret)
2477 return ret;
2478
2479 iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
2480 iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
2481 &scan_p->channel_params, 0);
2482
2483 return 0;
2484 }
2485
iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid,u32 version)2486 static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
2487 struct ieee80211_vif *vif,
2488 struct iwl_mvm_scan_params *params,
2489 int type, int uid, u32 version)
2490 {
2491 struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd;
2492 struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
2493 struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
2494 struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
2495 int ret;
2496 u16 gen_flags;
2497 u8 gen_flags2;
2498 u32 bitmap_ssid = 0;
2499
2500 mvm->scan_uid_status[uid] = type;
2501
2502 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2503 cmd->uid = cpu_to_le32(uid);
2504
2505 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2506
2507 if (version >= 15)
2508 gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
2509 else
2510 gen_flags2 = 0;
2511
2512 iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2513 &scan_p->general_params,
2514 gen_flags, gen_flags2, version);
2515
2516 ret = iwl_mvm_fill_scan_sched_params(params,
2517 scan_p->periodic_params.schedule,
2518 &scan_p->periodic_params.delay);
2519 if (ret)
2520 return ret;
2521
2522 if (!params->scan_6ghz) {
2523 iwl_mvm_scan_umac_fill_probe_p_v4(params,
2524 &scan_p->probe_params,
2525 &bitmap_ssid);
2526 iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif,
2527 &scan_p->channel_params,
2528 bitmap_ssid,
2529 version);
2530 return 0;
2531 } else {
2532 pb->preq = params->preq;
2533 }
2534
2535 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2536 cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2537 cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2538
2539 iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
2540
2541 cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params,
2542 params->n_channels,
2543 pb, cp, vif->type,
2544 version);
2545 if (!cp->count) {
2546 mvm->scan_uid_status[uid] = 0;
2547 return -EINVAL;
2548 }
2549
2550 if (!params->n_ssids ||
2551 (params->n_ssids == 1 && !params->ssids[0].ssid_len))
2552 cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
2553
2554 return 0;
2555 }
2556
iwl_mvm_scan_umac_v14(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2557 static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2558 struct iwl_mvm_scan_params *params, int type,
2559 int uid)
2560 {
2561 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
2562 }
2563
iwl_mvm_scan_umac_v15(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2564 static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2565 struct iwl_mvm_scan_params *params, int type,
2566 int uid)
2567 {
2568 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
2569 }
2570
iwl_mvm_scan_umac_v16(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2571 static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2572 struct iwl_mvm_scan_params *params, int type,
2573 int uid)
2574 {
2575 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16);
2576 }
2577
iwl_mvm_scan_umac_v17(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_mvm_scan_params * params,int type,int uid)2578 static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2579 struct iwl_mvm_scan_params *params, int type,
2580 int uid)
2581 {
2582 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17);
2583 }
2584
iwl_mvm_num_scans(struct iwl_mvm * mvm)2585 static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
2586 {
2587 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
2588 }
2589
iwl_mvm_check_running_scans(struct iwl_mvm * mvm,int type)2590 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
2591 {
2592 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2593 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2594
2595 /* This looks a bit arbitrary, but the idea is that if we run
2596 * out of possible simultaneous scans and the userspace is
2597 * trying to run a scan type that is already running, we
2598 * return -EBUSY. But if the userspace wants to start a
2599 * different type of scan, we stop the opposite type to make
2600 * space for the new request. The reason is backwards
2601 * compatibility with old wpa_supplicant that wouldn't stop a
2602 * scheduled scan before starting a normal scan.
2603 */
2604
2605 /* FW supports only a single periodic scan */
2606 if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2607 mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
2608 return -EBUSY;
2609
2610 if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
2611 return 0;
2612
2613 /* Use a switch, even though this is a bitmask, so that more
2614 * than one bits set will fall in default and we will warn.
2615 */
2616 switch (type) {
2617 case IWL_MVM_SCAN_REGULAR:
2618 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2619 return -EBUSY;
2620 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2621 case IWL_MVM_SCAN_SCHED:
2622 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2623 return -EBUSY;
2624 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2625 case IWL_MVM_SCAN_NETDETECT:
2626 /* For non-unified images, there's no need to stop
2627 * anything for net-detect since the firmware is
2628 * restarted anyway. This way, any sched scans that
2629 * were running will be restarted when we resume.
2630 */
2631 if (!unified_image)
2632 return 0;
2633
2634 /* If this is a unified image and we ran out of scans,
2635 * we need to stop something. Prefer stopping regular
2636 * scans, because the results are useless at this
2637 * point, and we should be able to keep running
2638 * another scheduled scan while suspended.
2639 */
2640 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2641 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
2642 true);
2643 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2644 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
2645 true);
2646 /* Something is wrong if no scan was running but we
2647 * ran out of scans.
2648 */
2649 fallthrough;
2650 default:
2651 WARN_ON(1);
2652 break;
2653 }
2654
2655 return -EIO;
2656 }
2657
2658 #define SCAN_TIMEOUT 30000
2659
iwl_mvm_scan_timeout_wk(struct work_struct * work)2660 void iwl_mvm_scan_timeout_wk(struct work_struct *work)
2661 {
2662 struct delayed_work *delayed_work = to_delayed_work(work);
2663 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
2664 scan_timeout_dwork);
2665
2666 IWL_ERR(mvm, "regular scan timed out\n");
2667
2668 iwl_force_nmi(mvm->trans);
2669 }
2670
iwl_mvm_fill_scan_type(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)2671 static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
2672 struct iwl_mvm_scan_params *params,
2673 struct ieee80211_vif *vif)
2674 {
2675 if (iwl_mvm_is_cdb_supported(mvm)) {
2676 params->type =
2677 iwl_mvm_get_scan_type_band(mvm, vif,
2678 NL80211_BAND_2GHZ);
2679 params->hb_type =
2680 iwl_mvm_get_scan_type_band(mvm, vif,
2681 NL80211_BAND_5GHZ);
2682 } else {
2683 params->type = iwl_mvm_get_scan_type(mvm, vif);
2684 }
2685 }
2686
2687 struct iwl_scan_umac_handler {
2688 u8 version;
2689 int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2690 struct iwl_mvm_scan_params *params, int type, int uid);
2691 };
2692
2693 #define IWL_SCAN_UMAC_HANDLER(_ver) { \
2694 .version = _ver, \
2695 .handler = iwl_mvm_scan_umac_v##_ver, \
2696 }
2697
2698 static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
2699 /* set the newest version first to shorten the list traverse time */
2700 IWL_SCAN_UMAC_HANDLER(17),
2701 IWL_SCAN_UMAC_HANDLER(16),
2702 IWL_SCAN_UMAC_HANDLER(15),
2703 IWL_SCAN_UMAC_HANDLER(14),
2704 IWL_SCAN_UMAC_HANDLER(12),
2705 };
2706
iwl_mvm_mei_scan_work(struct work_struct * wk)2707 static void iwl_mvm_mei_scan_work(struct work_struct *wk)
2708 {
2709 struct iwl_mei_scan_filter *scan_filter =
2710 container_of(wk, struct iwl_mei_scan_filter, scan_work);
2711 struct iwl_mvm *mvm =
2712 container_of(scan_filter, struct iwl_mvm, mei_scan_filter);
2713 struct iwl_mvm_csme_conn_info *info;
2714 struct sk_buff *skb;
2715 u8 bssid[ETH_ALEN];
2716
2717 mutex_lock(&mvm->mutex);
2718 info = iwl_mvm_get_csme_conn_info(mvm);
2719 memcpy(bssid, info->conn_info.bssid, ETH_ALEN);
2720 mutex_unlock(&mvm->mutex);
2721
2722 while ((skb = skb_dequeue(&scan_filter->scan_res))) {
2723 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2724
2725 if (!memcmp(mgmt->bssid, bssid, ETH_ALEN))
2726 ieee80211_rx_irqsafe(mvm->hw, skb);
2727 else
2728 kfree_skb(skb);
2729 }
2730 }
2731
iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter * mei_scan_filter)2732 void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter)
2733 {
2734 skb_queue_head_init(&mei_scan_filter->scan_res);
2735 INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work);
2736 }
2737
2738 /* In case CSME is connected and has link protection set, this function will
2739 * override the scan request to scan only the associated channel and only for
2740 * the associated SSID.
2741 */
iwl_mvm_mei_limited_scan(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params)2742 static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm,
2743 struct iwl_mvm_scan_params *params)
2744 {
2745 struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm);
2746 struct iwl_mei_conn_info *conn_info;
2747 struct ieee80211_channel *chan;
2748 int scan_iters, i;
2749
2750 if (!info) {
2751 IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n");
2752 return;
2753 }
2754
2755 conn_info = &info->conn_info;
2756 if (!info->conn_info.lp_state || !info->conn_info.ssid_len)
2757 return;
2758
2759 if (!params->n_channels || !params->n_ssids)
2760 return;
2761
2762 mvm->mei_scan_filter.is_mei_limited_scan = true;
2763
2764 chan = ieee80211_get_channel(mvm->hw->wiphy,
2765 ieee80211_channel_to_frequency(conn_info->channel,
2766 conn_info->band));
2767 if (!chan) {
2768 IWL_DEBUG_SCAN(mvm,
2769 "Failed to get CSME channel (chan=%u band=%u)\n",
2770 conn_info->channel, conn_info->band);
2771 return;
2772 }
2773
2774 /* The mei filtered scan must find the AP, otherwise CSME will
2775 * take the NIC ownership. Add several iterations on the channel to
2776 * make the scan more robust.
2777 */
2778 scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels);
2779 params->n_channels = scan_iters;
2780 for (i = 0; i < scan_iters; i++)
2781 params->channels[i] = chan;
2782
2783 IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters);
2784
2785 params->n_ssids = 1;
2786 params->ssids[0].ssid_len = conn_info->ssid_len;
2787 memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len);
2788 }
2789
iwl_mvm_build_scan_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_host_cmd * hcmd,struct iwl_mvm_scan_params * params,int type)2790 static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
2791 struct ieee80211_vif *vif,
2792 struct iwl_host_cmd *hcmd,
2793 struct iwl_mvm_scan_params *params,
2794 int type)
2795 {
2796 int uid, i, err;
2797 u8 scan_ver;
2798
2799 lockdep_assert_held(&mvm->mutex);
2800 memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
2801
2802 iwl_mvm_mei_limited_scan(mvm, params);
2803
2804 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2805 hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
2806
2807 return iwl_mvm_scan_lmac(mvm, vif, params);
2808 }
2809
2810 uid = iwl_mvm_scan_uid_by_status(mvm, 0);
2811 if (uid < 0)
2812 return uid;
2813
2814 hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
2815
2816 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
2817 IWL_FW_CMD_VER_UNKNOWN);
2818
2819 for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
2820 const struct iwl_scan_umac_handler *ver_handler =
2821 &iwl_scan_umac_handlers[i];
2822
2823 if (ver_handler->version != scan_ver)
2824 continue;
2825
2826 err = ver_handler->handler(mvm, vif, params, type, uid);
2827 return err ? : uid;
2828 }
2829
2830 err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
2831 if (err)
2832 return err;
2833
2834 return uid;
2835 }
2836
2837 struct iwl_mvm_scan_respect_p2p_go_iter_data {
2838 struct ieee80211_vif *current_vif;
2839 bool p2p_go;
2840 enum nl80211_band band;
2841 };
2842
iwl_mvm_scan_respect_p2p_go_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)2843 static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
2844 struct ieee80211_vif *vif)
2845 {
2846 struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
2847 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2848
2849 /* exclude the given vif */
2850 if (vif == data->current_vif)
2851 return;
2852
2853 if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
2854 u32 link_id;
2855
2856 for (link_id = 0;
2857 link_id < ARRAY_SIZE(mvmvif->link);
2858 link_id++) {
2859 struct iwl_mvm_vif_link_info *link =
2860 mvmvif->link[link_id];
2861
2862 if (link && link->phy_ctxt->id < NUM_PHY_CTX &&
2863 (data->band == NUM_NL80211_BANDS ||
2864 link->phy_ctxt->channel->band == data->band)) {
2865 data->p2p_go = true;
2866 break;
2867 }
2868 }
2869 }
2870 }
2871
_iwl_mvm_get_respect_p2p_go(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool low_latency,enum nl80211_band band)2872 static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2873 struct ieee80211_vif *vif,
2874 bool low_latency,
2875 enum nl80211_band band)
2876 {
2877 struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
2878 .current_vif = vif,
2879 .p2p_go = false,
2880 .band = band,
2881 };
2882
2883 if (!low_latency)
2884 return false;
2885
2886 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
2887 IEEE80211_IFACE_ITER_NORMAL,
2888 iwl_mvm_scan_respect_p2p_go_iter,
2889 &data);
2890
2891 return data.p2p_go;
2892 }
2893
iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum nl80211_band band)2894 static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
2895 struct ieee80211_vif *vif,
2896 enum nl80211_band band)
2897 {
2898 bool low_latency = iwl_mvm_low_latency_band(mvm, band);
2899
2900 return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
2901 }
2902
iwl_mvm_get_respect_p2p_go(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2903 static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2904 struct ieee80211_vif *vif)
2905 {
2906 bool low_latency = iwl_mvm_low_latency(mvm);
2907
2908 return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
2909 NUM_NL80211_BANDS);
2910 }
2911
iwl_mvm_fill_respect_p2p_go(struct iwl_mvm * mvm,struct iwl_mvm_scan_params * params,struct ieee80211_vif * vif)2912 static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
2913 struct iwl_mvm_scan_params *params,
2914 struct ieee80211_vif *vif)
2915 {
2916 if (iwl_mvm_is_cdb_supported(mvm)) {
2917 params->respect_p2p_go =
2918 iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2919 NL80211_BAND_2GHZ);
2920 params->respect_p2p_go_hb =
2921 iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2922 NL80211_BAND_5GHZ);
2923 } else {
2924 params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
2925 }
2926 }
2927
iwl_mvm_reg_scan_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_scan_request * req,struct ieee80211_scan_ies * ies)2928 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2929 struct cfg80211_scan_request *req,
2930 struct ieee80211_scan_ies *ies)
2931 {
2932 struct iwl_host_cmd hcmd = {
2933 .len = { iwl_mvm_scan_size(mvm), },
2934 .data = { mvm->scan_cmd, },
2935 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
2936 };
2937 struct iwl_mvm_scan_params params = {};
2938 int ret, uid;
2939 struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
2940
2941 lockdep_assert_held(&mvm->mutex);
2942
2943 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2944 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2945 return -EBUSY;
2946 }
2947
2948 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
2949 if (ret)
2950 return ret;
2951
2952 /* we should have failed registration if scan_cmd was NULL */
2953 if (WARN_ON(!mvm->scan_cmd))
2954 return -ENOMEM;
2955
2956 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
2957 return -ENOBUFS;
2958
2959 params.n_ssids = req->n_ssids;
2960 params.flags = req->flags;
2961 params.n_channels = req->n_channels;
2962 params.delay = 0;
2963 params.ssids = req->ssids;
2964 params.channels = req->channels;
2965 params.mac_addr = req->mac_addr;
2966 params.mac_addr_mask = req->mac_addr_mask;
2967 params.no_cck = req->no_cck;
2968 params.pass_all = true;
2969 params.n_match_sets = 0;
2970 params.match_sets = NULL;
2971 ether_addr_copy(params.bssid, req->bssid);
2972
2973 params.scan_plans = &scan_plan;
2974 params.n_scan_plans = 1;
2975
2976 params.n_6ghz_params = req->n_6ghz_params;
2977 params.scan_6ghz_params = req->scan_6ghz_params;
2978 params.scan_6ghz = req->scan_6ghz;
2979 iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
2980 iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif);
2981
2982 if (req->duration)
2983 params.iter_notif = true;
2984
2985 iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
2986
2987 iwl_mvm_scan_6ghz_passive_scan(mvm, ¶ms, vif);
2988
2989 uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms,
2990 IWL_MVM_SCAN_REGULAR);
2991
2992 if (uid < 0)
2993 return uid;
2994
2995 iwl_mvm_pause_tcm(mvm, false);
2996
2997 ret = iwl_mvm_send_cmd(mvm, &hcmd);
2998 if (ret) {
2999 /* If the scan failed, it usually means that the FW was unable
3000 * to allocate the time events. Warn on it, but maybe we
3001 * should try to send the command again with different params.
3002 */
3003 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
3004 iwl_mvm_resume_tcm(mvm);
3005 mvm->scan_uid_status[uid] = 0;
3006 return ret;
3007 }
3008
3009 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
3010 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
3011 mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
3012
3013 if (params.enable_6ghz_passive)
3014 mvm->last_6ghz_passive_scan_jiffies = jiffies;
3015
3016 schedule_delayed_work(&mvm->scan_timeout_dwork,
3017 msecs_to_jiffies(SCAN_TIMEOUT));
3018
3019 return 0;
3020 }
3021
iwl_mvm_sched_scan_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies,int type)3022 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
3023 struct ieee80211_vif *vif,
3024 struct cfg80211_sched_scan_request *req,
3025 struct ieee80211_scan_ies *ies,
3026 int type)
3027 {
3028 struct iwl_host_cmd hcmd = {
3029 .len = { iwl_mvm_scan_size(mvm), },
3030 .data = { mvm->scan_cmd, },
3031 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
3032 };
3033 struct iwl_mvm_scan_params params = {};
3034 int ret, uid;
3035 int i, j;
3036 bool non_psc_included = false;
3037
3038 lockdep_assert_held(&mvm->mutex);
3039
3040 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
3041 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
3042 return -EBUSY;
3043 }
3044
3045 ret = iwl_mvm_check_running_scans(mvm, type);
3046 if (ret)
3047 return ret;
3048
3049 /* we should have failed registration if scan_cmd was NULL */
3050 if (WARN_ON(!mvm->scan_cmd))
3051 return -ENOMEM;
3052
3053
3054 params.n_ssids = req->n_ssids;
3055 params.flags = req->flags;
3056 params.n_channels = req->n_channels;
3057 params.ssids = req->ssids;
3058 params.channels = req->channels;
3059 params.mac_addr = req->mac_addr;
3060 params.mac_addr_mask = req->mac_addr_mask;
3061 params.no_cck = false;
3062 params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
3063 params.n_match_sets = req->n_match_sets;
3064 params.match_sets = req->match_sets;
3065 eth_broadcast_addr(params.bssid);
3066 if (!req->n_scan_plans)
3067 return -EINVAL;
3068
3069 params.n_scan_plans = req->n_scan_plans;
3070 params.scan_plans = req->scan_plans;
3071
3072 iwl_mvm_fill_scan_type(mvm, ¶ms, vif);
3073 iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif);
3074
3075 /* In theory, LMAC scans can handle a 32-bit delay, but since
3076 * waiting for over 18 hours to start the scan is a bit silly
3077 * and to keep it aligned with UMAC scans (which only support
3078 * 16-bit delays), trim it down to 16-bits.
3079 */
3080 if (req->delay > U16_MAX) {
3081 IWL_DEBUG_SCAN(mvm,
3082 "delay value is > 16-bits, set to max possible\n");
3083 params.delay = U16_MAX;
3084 } else {
3085 params.delay = req->delay;
3086 }
3087
3088 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
3089 if (ret)
3090 return ret;
3091
3092 iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms);
3093
3094 /* for 6 GHZ band only PSC channels need to be added */
3095 for (i = 0; i < params.n_channels; i++) {
3096 struct ieee80211_channel *channel = params.channels[i];
3097
3098 if (channel->band == NL80211_BAND_6GHZ &&
3099 !cfg80211_channel_is_psc(channel)) {
3100 non_psc_included = true;
3101 break;
3102 }
3103 }
3104
3105 if (non_psc_included) {
3106 params.channels = kmemdup(params.channels,
3107 sizeof(params.channels[0]) *
3108 params.n_channels,
3109 GFP_KERNEL);
3110 if (!params.channels)
3111 return -ENOMEM;
3112
3113 for (i = j = 0; i < params.n_channels; i++) {
3114 if (params.channels[i]->band == NL80211_BAND_6GHZ &&
3115 !cfg80211_channel_is_psc(params.channels[i]))
3116 continue;
3117 params.channels[j++] = params.channels[i];
3118 }
3119 params.n_channels = j;
3120 }
3121
3122 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
3123 ret = -ENOBUFS;
3124 goto out;
3125 }
3126
3127 uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type);
3128 if (uid < 0) {
3129 ret = uid;
3130 goto out;
3131 }
3132
3133 ret = iwl_mvm_send_cmd(mvm, &hcmd);
3134 if (!ret) {
3135 IWL_DEBUG_SCAN(mvm,
3136 "Sched scan request was sent successfully\n");
3137 mvm->scan_status |= type;
3138 } else {
3139 /* If the scan failed, it usually means that the FW was unable
3140 * to allocate the time events. Warn on it, but maybe we
3141 * should try to send the command again with different params.
3142 */
3143 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
3144 mvm->scan_uid_status[uid] = 0;
3145 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3146 }
3147
3148 out:
3149 if (non_psc_included)
3150 kfree(params.channels);
3151 return ret;
3152 }
3153
iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)3154 void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
3155 struct iwl_rx_cmd_buffer *rxb)
3156 {
3157 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3158 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
3159 u32 uid = __le32_to_cpu(notif->uid);
3160 bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
3161
3162 mvm->mei_scan_filter.is_mei_limited_scan = false;
3163
3164 if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
3165 return;
3166
3167 /* if the scan is already stopping, we don't need to notify mac80211 */
3168 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
3169 struct cfg80211_scan_info info = {
3170 .aborted = aborted,
3171 .scan_start_tsf = mvm->scan_start,
3172 };
3173
3174 memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN);
3175 ieee80211_scan_completed(mvm->hw, &info);
3176 mvm->scan_vif = NULL;
3177 cancel_delayed_work(&mvm->scan_timeout_dwork);
3178 iwl_mvm_resume_tcm(mvm);
3179 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
3180 ieee80211_sched_scan_stopped(mvm->hw);
3181 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3182 }
3183
3184 mvm->scan_status &= ~mvm->scan_uid_status[uid];
3185 IWL_DEBUG_SCAN(mvm,
3186 "Scan completed, uid %u type %u, status %s, EBS status %s\n",
3187 uid, mvm->scan_uid_status[uid],
3188 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
3189 "completed" : "aborted",
3190 iwl_mvm_ebs_status_str(notif->ebs_status));
3191 IWL_DEBUG_SCAN(mvm,
3192 "Last line %d, Last iteration %d, Time from last iteration %d\n",
3193 notif->last_schedule, notif->last_iter,
3194 __le32_to_cpu(notif->time_from_last_iter));
3195
3196 if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
3197 notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
3198 mvm->last_ebs_successful = false;
3199
3200 mvm->scan_uid_status[uid] = 0;
3201 }
3202
iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)3203 void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
3204 struct iwl_rx_cmd_buffer *rxb)
3205 {
3206 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3207 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
3208
3209 mvm->scan_start = le64_to_cpu(notif->start_tsf);
3210
3211 IWL_DEBUG_SCAN(mvm,
3212 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
3213 notif->status, notif->scanned_channels);
3214
3215 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
3216 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
3217 ieee80211_sched_scan_results(mvm->hw);
3218 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
3219 }
3220
3221 IWL_DEBUG_SCAN(mvm,
3222 "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
3223 mvm->scan_start);
3224 }
3225
iwl_mvm_umac_scan_abort(struct iwl_mvm * mvm,int type,bool * wait)3226 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type, bool *wait)
3227 {
3228 struct iwl_umac_scan_abort abort_cmd = {};
3229 struct iwl_host_cmd cmd = {
3230 .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
3231 .len = { sizeof(abort_cmd), },
3232 .data = { &abort_cmd, },
3233 .flags = CMD_SEND_IN_RFKILL,
3234 };
3235
3236 int uid, ret;
3237 u32 status = IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND;
3238
3239 lockdep_assert_held(&mvm->mutex);
3240
3241 *wait = true;
3242
3243 /* We should always get a valid index here, because we already
3244 * checked that this type of scan was running in the generic
3245 * code.
3246 */
3247 uid = iwl_mvm_scan_uid_by_status(mvm, type);
3248 if (WARN_ON_ONCE(uid < 0))
3249 return uid;
3250
3251 abort_cmd.uid = cpu_to_le32(uid);
3252
3253 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
3254
3255 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
3256
3257 IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d, status=%u\n", ret, status);
3258 if (!ret)
3259 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
3260
3261 /* Handle the case that the FW is no longer familiar with the scan that
3262 * is to be stopped. In such a case, it is expected that the scan
3263 * complete notification was already received but not yet processed.
3264 * In such a case, there is no need to wait for a scan complete
3265 * notification and the flow should continue similar to the case that
3266 * the scan was really aborted.
3267 */
3268 if (status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) {
3269 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
3270 *wait = false;
3271 }
3272
3273 return ret;
3274 }
3275
iwl_mvm_scan_stop_wait(struct iwl_mvm * mvm,int type)3276 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
3277 {
3278 struct iwl_notification_wait wait_scan_done;
3279 static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
3280 SCAN_OFFLOAD_COMPLETE, };
3281 int ret;
3282 bool wait = true;
3283
3284 lockdep_assert_held(&mvm->mutex);
3285
3286 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
3287 scan_done_notif,
3288 ARRAY_SIZE(scan_done_notif),
3289 NULL, NULL);
3290
3291 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
3292
3293 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
3294 ret = iwl_mvm_umac_scan_abort(mvm, type, &wait);
3295 else
3296 ret = iwl_mvm_lmac_scan_abort(mvm);
3297
3298 if (ret) {
3299 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
3300 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
3301 return ret;
3302 } else if (!wait) {
3303 IWL_DEBUG_SCAN(mvm, "no need to wait for scan type %d\n", type);
3304 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
3305 return 0;
3306 }
3307
3308 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
3309 1 * HZ);
3310 }
3311
iwl_scan_req_umac_get_size(u8 scan_ver)3312 static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
3313 {
3314 switch (scan_ver) {
3315 case 12:
3316 return sizeof(struct iwl_scan_req_umac_v12);
3317 case 14:
3318 case 15:
3319 case 16:
3320 case 17:
3321 return sizeof(struct iwl_scan_req_umac_v17);
3322 }
3323
3324 return 0;
3325 }
3326
iwl_mvm_scan_size(struct iwl_mvm * mvm)3327 size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
3328 {
3329 int base_size, tail_size;
3330 u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
3331 IWL_FW_CMD_VER_UNKNOWN);
3332
3333 base_size = iwl_scan_req_umac_get_size(scan_ver);
3334 if (base_size)
3335 return base_size;
3336
3337
3338 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
3339 base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
3340 else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
3341 base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
3342 else if (iwl_mvm_cdb_scan_api(mvm))
3343 base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
3344 else
3345 base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
3346
3347 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3348 if (iwl_mvm_is_scan_ext_chan_supported(mvm))
3349 tail_size = sizeof(struct iwl_scan_req_umac_tail_v2);
3350 else
3351 tail_size = sizeof(struct iwl_scan_req_umac_tail_v1);
3352
3353 return base_size +
3354 sizeof(struct iwl_scan_channel_cfg_umac) *
3355 mvm->fw->ucode_capa.n_scan_channels +
3356 tail_size;
3357 }
3358 return sizeof(struct iwl_scan_req_lmac) +
3359 sizeof(struct iwl_scan_channel_cfg_lmac) *
3360 mvm->fw->ucode_capa.n_scan_channels +
3361 sizeof(struct iwl_scan_probe_req_v1);
3362 }
3363
3364 /*
3365 * This function is used in nic restart flow, to inform mac80211 about scans
3366 * that was aborted by restart flow or by an assert.
3367 */
iwl_mvm_report_scan_aborted(struct iwl_mvm * mvm)3368 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
3369 {
3370 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3371 int uid, i;
3372
3373 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
3374 if (uid >= 0) {
3375 struct cfg80211_scan_info info = {
3376 .aborted = true,
3377 };
3378
3379 cancel_delayed_work(&mvm->scan_timeout_dwork);
3380
3381 ieee80211_scan_completed(mvm->hw, &info);
3382 mvm->scan_uid_status[uid] = 0;
3383 }
3384 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
3385 if (uid >= 0) {
3386 /* Sched scan will be restarted by mac80211 in
3387 * restart_hw, so do not report if FW is about to be
3388 * restarted.
3389 */
3390 if (!mvm->fw_restart)
3391 ieee80211_sched_scan_stopped(mvm->hw);
3392 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3393 mvm->scan_uid_status[uid] = 0;
3394 }
3395 uid = iwl_mvm_scan_uid_by_status(mvm,
3396 IWL_MVM_SCAN_STOPPING_REGULAR);
3397 if (uid >= 0)
3398 mvm->scan_uid_status[uid] = 0;
3399
3400 uid = iwl_mvm_scan_uid_by_status(mvm,
3401 IWL_MVM_SCAN_STOPPING_SCHED);
3402 if (uid >= 0)
3403 mvm->scan_uid_status[uid] = 0;
3404
3405 /* We shouldn't have any UIDs still set. Loop over all the
3406 * UIDs to make sure there's nothing left there and warn if
3407 * any is found.
3408 */
3409 for (i = 0; i < mvm->max_scans; i++) {
3410 if (WARN_ONCE(mvm->scan_uid_status[i],
3411 "UMAC scan UID %d status was not cleaned\n",
3412 i))
3413 mvm->scan_uid_status[i] = 0;
3414 }
3415 } else {
3416 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
3417 struct cfg80211_scan_info info = {
3418 .aborted = true,
3419 };
3420
3421 cancel_delayed_work(&mvm->scan_timeout_dwork);
3422 ieee80211_scan_completed(mvm->hw, &info);
3423 }
3424
3425 /* Sched scan will be restarted by mac80211 in
3426 * restart_hw, so do not report if FW is about to be
3427 * restarted.
3428 */
3429 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
3430 !mvm->fw_restart) {
3431 ieee80211_sched_scan_stopped(mvm->hw);
3432 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3433 }
3434 }
3435 }
3436
iwl_mvm_scan_stop(struct iwl_mvm * mvm,int type,bool notify)3437 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
3438 {
3439 int ret;
3440
3441 if (!(mvm->scan_status & type))
3442 return 0;
3443
3444 if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
3445 ret = 0;
3446 goto out;
3447 }
3448
3449 ret = iwl_mvm_scan_stop_wait(mvm, type);
3450 if (!ret)
3451 mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
3452 out:
3453 /* Clear the scan status so the next scan requests will
3454 * succeed and mark the scan as stopping, so that the Rx
3455 * handler doesn't do anything, as the scan was stopped from
3456 * above.
3457 */
3458 mvm->scan_status &= ~type;
3459
3460 if (type == IWL_MVM_SCAN_REGULAR) {
3461 cancel_delayed_work(&mvm->scan_timeout_dwork);
3462 if (notify) {
3463 struct cfg80211_scan_info info = {
3464 .aborted = true,
3465 };
3466
3467 ieee80211_scan_completed(mvm->hw, &info);
3468 }
3469 } else if (notify) {
3470 ieee80211_sched_scan_stopped(mvm->hw);
3471 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3472 }
3473
3474 return ret;
3475 }
3476