1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2015-2017 Intel Deutschland GmbH
4 * Copyright (C) 2018-2022 Intel Corporation
5 */
6 #include <linux/etherdevice.h>
7 #include <linux/math64.h>
8 #include <net/cfg80211.h>
9 #include "mvm.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "constants.h"
13
14 struct iwl_mvm_loc_entry {
15 struct list_head list;
16 u8 addr[ETH_ALEN];
17 u8 lci_len, civic_len;
18 u8 buf[];
19 };
20
21 struct iwl_mvm_smooth_entry {
22 struct list_head list;
23 u8 addr[ETH_ALEN];
24 s64 rtt_avg;
25 u64 host_time;
26 };
27
28 enum iwl_mvm_pasn_flags {
29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
30 };
31
32 struct iwl_mvm_ftm_pasn_entry {
33 struct list_head list;
34 u8 addr[ETH_ALEN];
35 u8 hltk[HLTK_11AZ_LEN];
36 u8 tk[TK_11AZ_LEN];
37 u8 cipher;
38 u8 tx_pn[IEEE80211_CCMP_PN_LEN];
39 u8 rx_pn[IEEE80211_CCMP_PN_LEN];
40 u32 flags;
41 };
42
iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 * addr,u32 cipher,u8 * tk,u32 tk_len,u8 * hltk,u32 hltk_len)43 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
44 u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
45 u8 *hltk, u32 hltk_len)
46 {
47 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
48 GFP_KERNEL);
49 u32 expected_tk_len;
50
51 lockdep_assert_held(&mvm->mutex);
52
53 if (!pasn)
54 return -ENOBUFS;
55
56 iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
57
58 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
59
60 switch (pasn->cipher) {
61 case IWL_LOCATION_CIPHER_CCMP_128:
62 case IWL_LOCATION_CIPHER_GCMP_128:
63 expected_tk_len = WLAN_KEY_LEN_CCMP;
64 break;
65 case IWL_LOCATION_CIPHER_GCMP_256:
66 expected_tk_len = WLAN_KEY_LEN_GCMP_256;
67 break;
68 default:
69 goto out;
70 }
71
72 /*
73 * If associated to this AP and already have security context,
74 * the TK is already configured for this station, so it
75 * shouldn't be set again here.
76 */
77 if (vif->cfg.assoc) {
78 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
79 struct ieee80211_bss_conf *link_conf;
80 unsigned int link_id;
81 struct ieee80211_sta *sta;
82 u8 sta_id;
83
84 rcu_read_lock();
85 for_each_vif_active_link(vif, link_conf, link_id) {
86 if (memcmp(addr, link_conf->bssid, ETH_ALEN))
87 continue;
88
89 sta_id = mvmvif->link[link_id]->ap_sta_id;
90 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
91 if (!IS_ERR_OR_NULL(sta) && sta->mfp)
92 expected_tk_len = 0;
93 break;
94 }
95 rcu_read_unlock();
96 }
97
98 if (tk_len != expected_tk_len ||
99 (hltk_len && hltk_len != sizeof(pasn->hltk))) {
100 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
101 tk_len, hltk_len);
102 goto out;
103 }
104
105 if (!expected_tk_len && !hltk_len) {
106 IWL_ERR(mvm, "TK and HLTK not set\n");
107 goto out;
108 }
109
110 memcpy(pasn->addr, addr, sizeof(pasn->addr));
111
112 if (hltk_len) {
113 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
114 pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
115 }
116
117 if (tk && tk_len)
118 memcpy(pasn->tk, tk, sizeof(pasn->tk));
119
120 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
121 return 0;
122 out:
123 kfree(pasn);
124 return -EINVAL;
125 }
126
iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm * mvm,u8 * addr)127 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
128 {
129 struct iwl_mvm_ftm_pasn_entry *entry, *prev;
130
131 lockdep_assert_held(&mvm->mutex);
132
133 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
134 list) {
135 if (memcmp(entry->addr, addr, sizeof(entry->addr)))
136 continue;
137
138 list_del(&entry->list);
139 kfree(entry);
140 return;
141 }
142 }
143
iwl_mvm_ftm_reset(struct iwl_mvm * mvm)144 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
145 {
146 struct iwl_mvm_loc_entry *e, *t;
147
148 mvm->ftm_initiator.req = NULL;
149 mvm->ftm_initiator.req_wdev = NULL;
150 memset(mvm->ftm_initiator.responses, 0,
151 sizeof(mvm->ftm_initiator.responses));
152
153 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
154 list_del(&e->list);
155 kfree(e);
156 }
157 }
158
iwl_mvm_ftm_restart(struct iwl_mvm * mvm)159 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
160 {
161 struct cfg80211_pmsr_result result = {
162 .status = NL80211_PMSR_STATUS_FAILURE,
163 .final = 1,
164 .host_time = ktime_get_boottime_ns(),
165 .type = NL80211_PMSR_TYPE_FTM,
166 };
167 int i;
168
169 lockdep_assert_held(&mvm->mutex);
170
171 if (!mvm->ftm_initiator.req)
172 return;
173
174 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
175 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
176 ETH_ALEN);
177 result.ftm.burst_index = mvm->ftm_initiator.responses[i];
178
179 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
180 mvm->ftm_initiator.req,
181 &result, GFP_KERNEL);
182 }
183
184 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
185 mvm->ftm_initiator.req, GFP_KERNEL);
186 iwl_mvm_ftm_reset(mvm);
187 }
188
iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm * mvm)189 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
190 {
191 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
192
193 IWL_DEBUG_INFO(mvm,
194 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
195 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
196 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
197 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
198 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
199 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
200 }
201
iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm * mvm)202 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
203 {
204 struct iwl_mvm_smooth_entry *se, *st;
205
206 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
207 list) {
208 list_del(&se->list);
209 kfree(se);
210 }
211 }
212
213 static int
iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)214 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
215 {
216 switch (s) {
217 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
218 return 0;
219 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
220 return -EBUSY;
221 default:
222 WARN_ON_ONCE(1);
223 return -EIO;
224 }
225 }
226
iwl_mvm_ftm_cmd_v5(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v5 * cmd,struct cfg80211_pmsr_request * req)227 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
228 struct iwl_tof_range_req_cmd_v5 *cmd,
229 struct cfg80211_pmsr_request *req)
230 {
231 int i;
232
233 cmd->request_id = req->cookie;
234 cmd->num_of_ap = req->n_peers;
235
236 /* use maximum for "no timeout" or bigger than what we can do */
237 if (!req->timeout || req->timeout > 255 * 100)
238 cmd->req_timeout = 255;
239 else
240 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
241
242 /*
243 * We treat it always as random, since if not we'll
244 * have filled our local address there instead.
245 */
246 cmd->macaddr_random = 1;
247 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
248 for (i = 0; i < ETH_ALEN; i++)
249 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
250
251 if (vif->cfg.assoc)
252 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
253 else
254 eth_broadcast_addr(cmd->range_req_bssid);
255 }
256
iwl_mvm_ftm_cmd_common(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v9 * cmd,struct cfg80211_pmsr_request * req)257 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
258 struct ieee80211_vif *vif,
259 struct iwl_tof_range_req_cmd_v9 *cmd,
260 struct cfg80211_pmsr_request *req)
261 {
262 int i;
263
264 cmd->initiator_flags =
265 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
266 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
267 cmd->request_id = req->cookie;
268 cmd->num_of_ap = req->n_peers;
269
270 /*
271 * Use a large value for "no timeout". Don't use the maximum value
272 * because of fw limitations.
273 */
274 if (req->timeout)
275 cmd->req_timeout_ms = cpu_to_le32(req->timeout);
276 else
277 cmd->req_timeout_ms = cpu_to_le32(0xfffff);
278
279 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
280 for (i = 0; i < ETH_ALEN; i++)
281 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
282
283 if (vif->cfg.assoc) {
284 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
285
286 /* AP's TSF is only relevant if associated */
287 for (i = 0; i < req->n_peers; i++) {
288 if (req->peers[i].report_ap_tsf) {
289 struct iwl_mvm_vif *mvmvif =
290 iwl_mvm_vif_from_mac80211(vif);
291
292 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
293 return;
294 }
295 }
296 } else {
297 eth_broadcast_addr(cmd->range_req_bssid);
298 }
299
300 /* Don't report AP's TSF */
301 cmd->tsf_mac_id = cpu_to_le32(0xff);
302 }
303
iwl_mvm_ftm_cmd_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_cmd_v8 * cmd,struct cfg80211_pmsr_request * req)304 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
305 struct iwl_tof_range_req_cmd_v8 *cmd,
306 struct cfg80211_pmsr_request *req)
307 {
308 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
309 }
310
311 static int
iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,u8 * channel,u8 * bandwidth,u8 * ctrl_ch_position)312 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
313 struct cfg80211_pmsr_request_peer *peer,
314 u8 *channel, u8 *bandwidth,
315 u8 *ctrl_ch_position)
316 {
317 u32 freq = peer->chandef.chan->center_freq;
318
319 *channel = ieee80211_frequency_to_channel(freq);
320
321 switch (peer->chandef.width) {
322 case NL80211_CHAN_WIDTH_20_NOHT:
323 *bandwidth = IWL_TOF_BW_20_LEGACY;
324 break;
325 case NL80211_CHAN_WIDTH_20:
326 *bandwidth = IWL_TOF_BW_20_HT;
327 break;
328 case NL80211_CHAN_WIDTH_40:
329 *bandwidth = IWL_TOF_BW_40;
330 break;
331 case NL80211_CHAN_WIDTH_80:
332 *bandwidth = IWL_TOF_BW_80;
333 break;
334 default:
335 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
336 peer->chandef.width);
337 return -EINVAL;
338 }
339
340 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
341 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
342
343 return 0;
344 }
345
346 static int
iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,u8 * channel,u8 * format_bw,u8 * ctrl_ch_position)347 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
348 struct cfg80211_pmsr_request_peer *peer,
349 u8 *channel, u8 *format_bw,
350 u8 *ctrl_ch_position)
351 {
352 u32 freq = peer->chandef.chan->center_freq;
353 u8 cmd_ver;
354
355 *channel = ieee80211_frequency_to_channel(freq);
356
357 switch (peer->chandef.width) {
358 case NL80211_CHAN_WIDTH_20_NOHT:
359 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
360 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
361 break;
362 case NL80211_CHAN_WIDTH_20:
363 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
364 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
365 break;
366 case NL80211_CHAN_WIDTH_40:
367 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
368 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
369 break;
370 case NL80211_CHAN_WIDTH_80:
371 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
372 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
373 break;
374 case NL80211_CHAN_WIDTH_160:
375 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
376 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
377 IWL_FW_CMD_VER_UNKNOWN);
378
379 if (cmd_ver >= 13) {
380 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
381 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
382 break;
383 }
384 fallthrough;
385 default:
386 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
387 peer->chandef.width);
388 return -EINVAL;
389 }
390
391 /* non EDCA based measurement must use HE preamble */
392 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
393 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
394
395 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
396 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
397
398 return 0;
399 }
400
401 static int
iwl_mvm_ftm_put_target_v2(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v2 * target)402 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
403 struct cfg80211_pmsr_request_peer *peer,
404 struct iwl_tof_range_req_ap_entry_v2 *target)
405 {
406 int ret;
407
408 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
409 &target->bandwidth,
410 &target->ctrl_ch_position);
411 if (ret)
412 return ret;
413
414 memcpy(target->bssid, peer->addr, ETH_ALEN);
415 target->burst_period =
416 cpu_to_le16(peer->ftm.burst_period);
417 target->samples_per_burst = peer->ftm.ftms_per_burst;
418 target->num_of_bursts = peer->ftm.num_bursts_exp;
419 target->measure_type = 0; /* regular two-sided FTM */
420 target->retries_per_sample = peer->ftm.ftmr_retries;
421 target->asap_mode = peer->ftm.asap;
422 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
423
424 if (peer->ftm.request_lci)
425 target->location_req |= IWL_TOF_LOC_LCI;
426 if (peer->ftm.request_civicloc)
427 target->location_req |= IWL_TOF_LOC_CIVIC;
428
429 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
430
431 return 0;
432 }
433
434 #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
435 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
436
437 static void
iwl_mvm_ftm_put_target_common(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v6 * target)438 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
439 struct cfg80211_pmsr_request_peer *peer,
440 struct iwl_tof_range_req_ap_entry_v6 *target)
441 {
442 memcpy(target->bssid, peer->addr, ETH_ALEN);
443 target->burst_period =
444 cpu_to_le16(peer->ftm.burst_period);
445 target->samples_per_burst = peer->ftm.ftms_per_burst;
446 target->num_of_bursts = peer->ftm.num_bursts_exp;
447 target->ftmr_max_retries = peer->ftm.ftmr_retries;
448 target->initiator_ap_flags = cpu_to_le32(0);
449
450 if (peer->ftm.asap)
451 FTM_PUT_FLAG(ASAP);
452
453 if (peer->ftm.request_lci)
454 FTM_PUT_FLAG(LCI_REQUEST);
455
456 if (peer->ftm.request_civicloc)
457 FTM_PUT_FLAG(CIVIC_REQUEST);
458
459 if (IWL_MVM_FTM_INITIATOR_DYNACK)
460 FTM_PUT_FLAG(DYN_ACK);
461
462 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
463 FTM_PUT_FLAG(ALGO_LR);
464 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
465 FTM_PUT_FLAG(ALGO_FFT);
466
467 if (peer->ftm.trigger_based)
468 FTM_PUT_FLAG(TB);
469 else if (peer->ftm.non_trigger_based)
470 FTM_PUT_FLAG(NON_TB);
471
472 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
473 peer->ftm.lmr_feedback)
474 FTM_PUT_FLAG(LMR_FEEDBACK);
475 }
476
477 static int
iwl_mvm_ftm_put_target_v3(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v3 * target)478 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
479 struct cfg80211_pmsr_request_peer *peer,
480 struct iwl_tof_range_req_ap_entry_v3 *target)
481 {
482 int ret;
483
484 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
485 &target->bandwidth,
486 &target->ctrl_ch_position);
487 if (ret)
488 return ret;
489
490 /*
491 * Versions 3 and 4 has some common fields, so
492 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
493 */
494 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
495
496 return 0;
497 }
498
499 static int
iwl_mvm_ftm_put_target_v4(struct iwl_mvm * mvm,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v4 * target)500 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
501 struct cfg80211_pmsr_request_peer *peer,
502 struct iwl_tof_range_req_ap_entry_v4 *target)
503 {
504 int ret;
505
506 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
507 &target->format_bw,
508 &target->ctrl_ch_position);
509 if (ret)
510 return ret;
511
512 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
513
514 return 0;
515 }
516
517 static int
iwl_mvm_ftm_put_target(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v6 * target)518 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
519 struct cfg80211_pmsr_request_peer *peer,
520 struct iwl_tof_range_req_ap_entry_v6 *target)
521 {
522 int ret;
523
524 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
525 &target->format_bw,
526 &target->ctrl_ch_position);
527 if (ret)
528 return ret;
529
530 iwl_mvm_ftm_put_target_common(mvm, peer, target);
531
532 if (vif->cfg.assoc) {
533 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
534 struct ieee80211_sta *sta;
535 struct ieee80211_bss_conf *link_conf;
536 unsigned int link_id;
537
538 rcu_read_lock();
539 for_each_vif_active_link(vif, link_conf, link_id) {
540 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
541 continue;
542
543 target->sta_id = mvmvif->link[link_id]->ap_sta_id;
544 sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
545 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
546 rcu_read_unlock();
547 return PTR_ERR_OR_ZERO(sta);
548 }
549
550 if (sta->mfp && (peer->ftm.trigger_based ||
551 peer->ftm.non_trigger_based))
552 FTM_PUT_FLAG(PMF);
553 break;
554 }
555 rcu_read_unlock();
556 } else {
557 target->sta_id = IWL_MVM_INVALID_STA;
558 }
559
560 /*
561 * TODO: Beacon interval is currently unknown, so use the common value
562 * of 100 TUs.
563 */
564 target->beacon_interval = cpu_to_le16(100);
565 return 0;
566 }
567
iwl_mvm_ftm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * hcmd)568 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
569 {
570 u32 status;
571 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
572
573 if (!err && status) {
574 IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
575 status);
576 err = iwl_ftm_range_request_status_to_err(status);
577 }
578
579 return err;
580 }
581
iwl_mvm_ftm_start_v5(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)582 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
583 struct cfg80211_pmsr_request *req)
584 {
585 struct iwl_tof_range_req_cmd_v5 cmd_v5;
586 struct iwl_host_cmd hcmd = {
587 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
588 .dataflags[0] = IWL_HCMD_DFL_DUP,
589 .data[0] = &cmd_v5,
590 .len[0] = sizeof(cmd_v5),
591 };
592 u8 i;
593 int err;
594
595 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
596
597 for (i = 0; i < cmd_v5.num_of_ap; i++) {
598 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
599
600 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
601 if (err)
602 return err;
603 }
604
605 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
606 }
607
iwl_mvm_ftm_start_v7(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)608 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
609 struct cfg80211_pmsr_request *req)
610 {
611 struct iwl_tof_range_req_cmd_v7 cmd_v7;
612 struct iwl_host_cmd hcmd = {
613 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
614 .dataflags[0] = IWL_HCMD_DFL_DUP,
615 .data[0] = &cmd_v7,
616 .len[0] = sizeof(cmd_v7),
617 };
618 u8 i;
619 int err;
620
621 /*
622 * Versions 7 and 8 has the same structure except from the responders
623 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
624 */
625 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
626
627 for (i = 0; i < cmd_v7.num_of_ap; i++) {
628 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
629
630 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
631 if (err)
632 return err;
633 }
634
635 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
636 }
637
iwl_mvm_ftm_start_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)638 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
639 struct cfg80211_pmsr_request *req)
640 {
641 struct iwl_tof_range_req_cmd_v8 cmd;
642 struct iwl_host_cmd hcmd = {
643 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
644 .dataflags[0] = IWL_HCMD_DFL_DUP,
645 .data[0] = &cmd,
646 .len[0] = sizeof(cmd),
647 };
648 u8 i;
649 int err;
650
651 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
652
653 for (i = 0; i < cmd.num_of_ap; i++) {
654 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
655
656 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
657 if (err)
658 return err;
659 }
660
661 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
662 }
663
iwl_mvm_ftm_start_v9(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)664 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
665 struct cfg80211_pmsr_request *req)
666 {
667 struct iwl_tof_range_req_cmd_v9 cmd;
668 struct iwl_host_cmd hcmd = {
669 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
670 .dataflags[0] = IWL_HCMD_DFL_DUP,
671 .data[0] = &cmd,
672 .len[0] = sizeof(cmd),
673 };
674 u8 i;
675 int err;
676
677 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
678
679 for (i = 0; i < cmd.num_of_ap; i++) {
680 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
681 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
682
683 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
684 if (err)
685 return err;
686 }
687
688 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
689 }
690
iter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * data)691 static void iter(struct ieee80211_hw *hw,
692 struct ieee80211_vif *vif,
693 struct ieee80211_sta *sta,
694 struct ieee80211_key_conf *key,
695 void *data)
696 {
697 struct iwl_tof_range_req_ap_entry_v6 *target = data;
698
699 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
700 return;
701
702 WARN_ON(!sta->mfp);
703
704 if (WARN_ON(key->keylen > sizeof(target->tk)))
705 return;
706
707 memcpy(target->tk, key->key, key->keylen);
708 target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
709 WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
710 }
711
712 static void
iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct iwl_tof_range_req_ap_entry_v7 * target)713 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
714 struct iwl_tof_range_req_ap_entry_v7 *target)
715 {
716 struct iwl_mvm_ftm_pasn_entry *entry;
717 u32 flags = le32_to_cpu(target->initiator_ap_flags);
718
719 if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
720 IWL_INITIATOR_AP_FLAGS_TB)))
721 return;
722
723 lockdep_assert_held(&mvm->mutex);
724
725 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
726 if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
727 continue;
728
729 target->cipher = entry->cipher;
730
731 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
732 memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
733 else
734 memset(target->hltk, 0, sizeof(target->hltk));
735
736 if (vif->cfg.assoc &&
737 !memcmp(vif->bss_conf.bssid, target->bssid,
738 sizeof(target->bssid)))
739 ieee80211_iter_keys(mvm->hw, vif, iter, target);
740 else
741 memcpy(target->tk, entry->tk, sizeof(target->tk));
742
743 memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
744 memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
745
746 target->initiator_ap_flags |=
747 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
748 return;
749 }
750 }
751
752 static int
iwl_mvm_ftm_put_target_v7(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v7 * target)753 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
754 struct cfg80211_pmsr_request_peer *peer,
755 struct iwl_tof_range_req_ap_entry_v7 *target)
756 {
757 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
758 if (err)
759 return err;
760
761 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
762 return err;
763 }
764
iwl_mvm_ftm_start_v11(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)765 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
766 struct ieee80211_vif *vif,
767 struct cfg80211_pmsr_request *req)
768 {
769 struct iwl_tof_range_req_cmd_v11 cmd;
770 struct iwl_host_cmd hcmd = {
771 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
772 .dataflags[0] = IWL_HCMD_DFL_DUP,
773 .data[0] = &cmd,
774 .len[0] = sizeof(cmd),
775 };
776 u8 i;
777 int err;
778
779 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
780
781 for (i = 0; i < cmd.num_of_ap; i++) {
782 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
783 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
784
785 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
786 if (err)
787 return err;
788 }
789
790 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
791 }
792
793 static void
iwl_mvm_ftm_set_ndp_params(struct iwl_mvm * mvm,struct iwl_tof_range_req_ap_entry_v8 * target)794 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
795 struct iwl_tof_range_req_ap_entry_v8 *target)
796 {
797 /* Only 2 STS are supported on Tx */
798 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
799 IWL_MVM_FTM_I2R_MAX_STS;
800
801 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
802 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
803 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
804 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
805 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
806 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
807 }
808
809 static int
iwl_mvm_ftm_put_target_v8(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request_peer * peer,struct iwl_tof_range_req_ap_entry_v8 * target)810 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
811 struct cfg80211_pmsr_request_peer *peer,
812 struct iwl_tof_range_req_ap_entry_v8 *target)
813 {
814 u32 flags;
815 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
816
817 if (ret)
818 return ret;
819
820 iwl_mvm_ftm_set_ndp_params(mvm, target);
821
822 /*
823 * If secure LTF is turned off, replace the flag with PMF only
824 */
825 flags = le32_to_cpu(target->initiator_ap_flags);
826 if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
827 !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
828 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
829 flags |= IWL_INITIATOR_AP_FLAGS_PMF;
830 target->initiator_ap_flags = cpu_to_le32(flags);
831 }
832
833 return 0;
834 }
835
iwl_mvm_ftm_start_v12(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)836 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
837 struct ieee80211_vif *vif,
838 struct cfg80211_pmsr_request *req)
839 {
840 struct iwl_tof_range_req_cmd_v12 cmd;
841 struct iwl_host_cmd hcmd = {
842 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
843 .dataflags[0] = IWL_HCMD_DFL_DUP,
844 .data[0] = &cmd,
845 .len[0] = sizeof(cmd),
846 };
847 u8 i;
848 int err;
849
850 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
851
852 for (i = 0; i < cmd.num_of_ap; i++) {
853 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
854 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
855
856 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
857 if (err)
858 return err;
859 }
860
861 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
862 }
863
iwl_mvm_ftm_start_v13(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)864 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
865 struct ieee80211_vif *vif,
866 struct cfg80211_pmsr_request *req)
867 {
868 struct iwl_tof_range_req_cmd_v13 cmd;
869 struct iwl_host_cmd hcmd = {
870 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
871 .dataflags[0] = IWL_HCMD_DFL_DUP,
872 .data[0] = &cmd,
873 .len[0] = sizeof(cmd),
874 };
875 u8 i;
876 int err;
877
878 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
879
880 for (i = 0; i < cmd.num_of_ap; i++) {
881 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
882 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
883
884 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
885 if (err)
886 return err;
887
888 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
889 target->bss_color = peer->ftm.bss_color;
890
891 if (peer->ftm.non_trigger_based) {
892 target->min_time_between_msr =
893 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
894 target->burst_period =
895 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
896 } else {
897 target->min_time_between_msr = cpu_to_le16(0);
898 }
899
900 target->band =
901 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
902 }
903
904 return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
905 }
906
iwl_mvm_ftm_start(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct cfg80211_pmsr_request * req)907 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
908 struct cfg80211_pmsr_request *req)
909 {
910 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
911 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
912 int err;
913
914 lockdep_assert_held(&mvm->mutex);
915
916 if (mvm->ftm_initiator.req)
917 return -EBUSY;
918
919 if (new_api) {
920 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
921 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
922 IWL_FW_CMD_VER_UNKNOWN);
923
924 switch (cmd_ver) {
925 case 13:
926 err = iwl_mvm_ftm_start_v13(mvm, vif, req);
927 break;
928 case 12:
929 err = iwl_mvm_ftm_start_v12(mvm, vif, req);
930 break;
931 case 11:
932 err = iwl_mvm_ftm_start_v11(mvm, vif, req);
933 break;
934 case 9:
935 case 10:
936 err = iwl_mvm_ftm_start_v9(mvm, vif, req);
937 break;
938 case 8:
939 err = iwl_mvm_ftm_start_v8(mvm, vif, req);
940 break;
941 default:
942 err = iwl_mvm_ftm_start_v7(mvm, vif, req);
943 break;
944 }
945 } else {
946 err = iwl_mvm_ftm_start_v5(mvm, vif, req);
947 }
948
949 if (!err) {
950 mvm->ftm_initiator.req = req;
951 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
952 }
953
954 return err;
955 }
956
iwl_mvm_ftm_abort(struct iwl_mvm * mvm,struct cfg80211_pmsr_request * req)957 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
958 {
959 struct iwl_tof_range_abort_cmd cmd = {
960 .request_id = req->cookie,
961 };
962
963 lockdep_assert_held(&mvm->mutex);
964
965 if (req != mvm->ftm_initiator.req)
966 return;
967
968 iwl_mvm_ftm_reset(mvm);
969
970 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
971 0, sizeof(cmd), &cmd))
972 IWL_ERR(mvm, "failed to abort FTM process\n");
973 }
974
iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request * req,const u8 * addr)975 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
976 const u8 *addr)
977 {
978 int i;
979
980 for (i = 0; i < req->n_peers; i++) {
981 struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
982
983 if (ether_addr_equal_unaligned(peer->addr, addr))
984 return i;
985 }
986
987 return -ENOENT;
988 }
989
iwl_mvm_ftm_get_host_time(struct iwl_mvm * mvm,__le32 fw_gp2_ts)990 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
991 {
992 u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
993 u32 curr_gp2, diff;
994 u64 now_from_boot_ns;
995
996 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
997 &now_from_boot_ns, NULL);
998
999 if (curr_gp2 >= gp2_ts)
1000 diff = curr_gp2 - gp2_ts;
1001 else
1002 diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
1003
1004 return now_from_boot_ns - (u64)diff * 1000;
1005 }
1006
iwl_mvm_ftm_get_lci_civic(struct iwl_mvm * mvm,struct cfg80211_pmsr_result * res)1007 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
1008 struct cfg80211_pmsr_result *res)
1009 {
1010 struct iwl_mvm_loc_entry *entry;
1011
1012 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
1013 if (!ether_addr_equal_unaligned(res->addr, entry->addr))
1014 continue;
1015
1016 if (entry->lci_len) {
1017 res->ftm.lci_len = entry->lci_len;
1018 res->ftm.lci = entry->buf;
1019 }
1020
1021 if (entry->civic_len) {
1022 res->ftm.civicloc_len = entry->civic_len;
1023 res->ftm.civicloc = entry->buf + entry->lci_len;
1024 }
1025
1026 /* we found the entry we needed */
1027 break;
1028 }
1029 }
1030
iwl_mvm_ftm_range_resp_valid(struct iwl_mvm * mvm,u8 request_id,u8 num_of_aps)1031 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1032 u8 num_of_aps)
1033 {
1034 lockdep_assert_held(&mvm->mutex);
1035
1036 if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1037 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1038 request_id, (u8)mvm->ftm_initiator.req->cookie);
1039 return -EINVAL;
1040 }
1041
1042 if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1043 IWL_ERR(mvm, "FTM range response invalid\n");
1044 return -EINVAL;
1045 }
1046
1047 return 0;
1048 }
1049
iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm * mvm,struct cfg80211_pmsr_result * res)1050 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1051 struct cfg80211_pmsr_result *res)
1052 {
1053 struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1054 s64 rtt_avg, rtt = res->ftm.rtt_avg;
1055 u32 undershoot, overshoot;
1056 u8 alpha;
1057
1058 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1059 return;
1060
1061 WARN_ON(rtt < 0);
1062
1063 if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1064 IWL_DEBUG_INFO(mvm,
1065 ": %pM: ignore failed measurement. Status=%u\n",
1066 res->addr, res->status);
1067 return;
1068 }
1069
1070 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1071 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1072 resp = iter;
1073 break;
1074 }
1075 }
1076
1077 if (!resp) {
1078 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1079 if (!resp)
1080 return;
1081
1082 memcpy(resp->addr, res->addr, ETH_ALEN);
1083 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1084
1085 resp->rtt_avg = rtt;
1086
1087 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1088 resp->addr, resp->rtt_avg);
1089 goto update_time;
1090 }
1091
1092 if (res->host_time - resp->host_time >
1093 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1094 resp->rtt_avg = rtt;
1095
1096 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1097 resp->addr, resp->rtt_avg);
1098 goto update_time;
1099 }
1100
1101 /* Smooth the results based on the tracked RTT average */
1102 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1103 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1104 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1105
1106 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1107
1108 IWL_DEBUG_INFO(mvm,
1109 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1110 resp->addr, resp->rtt_avg, rtt_avg, rtt);
1111
1112 /*
1113 * update the responder's average RTT results regardless of
1114 * the under/over shoot logic below
1115 */
1116 resp->rtt_avg = rtt_avg;
1117
1118 /* smooth the results */
1119 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1120 res->ftm.rtt_avg = rtt_avg;
1121
1122 IWL_DEBUG_INFO(mvm,
1123 "undershoot: val=%lld\n",
1124 (rtt_avg - rtt));
1125 } else if (rtt_avg < rtt && (rtt - rtt_avg) >
1126 overshoot) {
1127 res->ftm.rtt_avg = rtt_avg;
1128 IWL_DEBUG_INFO(mvm,
1129 "overshoot: val=%lld\n",
1130 (rtt - rtt_avg));
1131 }
1132
1133 update_time:
1134 resp->host_time = res->host_time;
1135 }
1136
iwl_mvm_debug_range_resp(struct iwl_mvm * mvm,u8 index,struct cfg80211_pmsr_result * res)1137 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1138 struct cfg80211_pmsr_result *res)
1139 {
1140 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1141
1142 IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1143 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1144 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1145 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1146 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1147 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1148 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1149 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1150 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1151 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1152 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1153 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1154 }
1155
1156 static void
iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm * mvm,struct iwl_tof_range_rsp_ap_entry_ntfy_v6 * fw_ap)1157 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1158 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1159 {
1160 struct iwl_mvm_ftm_pasn_entry *entry;
1161
1162 lockdep_assert_held(&mvm->mutex);
1163
1164 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1165 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1166 continue;
1167
1168 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1169 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1170 return;
1171 }
1172 }
1173
iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm * mvm)1174 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1175 {
1176 if (!fw_has_api(&mvm->fw->ucode_capa,
1177 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1178 return 5;
1179
1180 /* Starting from version 8, the FW advertises the version */
1181 if (mvm->cmd_ver.range_resp >= 8)
1182 return mvm->cmd_ver.range_resp;
1183 else if (fw_has_api(&mvm->fw->ucode_capa,
1184 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1185 return 7;
1186
1187 /* The first version of the new range request API */
1188 return 6;
1189 }
1190
iwl_mvm_ftm_resp_size_validation(u8 ver,unsigned int pkt_len)1191 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1192 {
1193 switch (ver) {
1194 case 9:
1195 case 8:
1196 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1197 case 7:
1198 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1199 case 6:
1200 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1201 case 5:
1202 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1203 default:
1204 WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1205 return false;
1206 }
1207 }
1208
iwl_mvm_ftm_range_resp(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1209 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1210 {
1211 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1212 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1213 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1214 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1215 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1216 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1217 int i;
1218 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1219 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1220 u8 num_of_aps, last_in_batch;
1221 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1222
1223 lockdep_assert_held(&mvm->mutex);
1224
1225 if (!mvm->ftm_initiator.req) {
1226 return;
1227 }
1228
1229 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1230 return;
1231
1232 if (new_api) {
1233 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1234 fw_resp_v8->num_of_aps))
1235 return;
1236
1237 num_of_aps = fw_resp_v8->num_of_aps;
1238 last_in_batch = fw_resp_v8->last_report;
1239 } else {
1240 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1241 fw_resp_v5->num_of_aps))
1242 return;
1243
1244 num_of_aps = fw_resp_v5->num_of_aps;
1245 last_in_batch = fw_resp_v5->last_in_batch;
1246 }
1247
1248 IWL_DEBUG_INFO(mvm, "Range response received\n");
1249 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1250 mvm->ftm_initiator.req->cookie, num_of_aps);
1251
1252 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1253 struct cfg80211_pmsr_result result = {};
1254 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1255 int peer_idx;
1256
1257 if (new_api) {
1258 if (notif_ver >= 8) {
1259 fw_ap = &fw_resp_v8->ap[i];
1260 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1261 } else if (notif_ver == 7) {
1262 fw_ap = (void *)&fw_resp_v7->ap[i];
1263 } else {
1264 fw_ap = (void *)&fw_resp_v6->ap[i];
1265 }
1266
1267 result.final = fw_ap->last_burst;
1268 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1269 result.ap_tsf_valid = 1;
1270 } else {
1271 /* the first part is the same for old and new APIs */
1272 fw_ap = (void *)&fw_resp_v5->ap[i];
1273 /*
1274 * FIXME: the firmware needs to report this, we don't
1275 * even know the number of bursts the responder picked
1276 * (if we asked it to)
1277 */
1278 result.final = 0;
1279 }
1280
1281 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1282 fw_ap->bssid);
1283 if (peer_idx < 0) {
1284 IWL_WARN(mvm,
1285 "Unknown address (%pM, target #%d) in FTM response\n",
1286 fw_ap->bssid, i);
1287 continue;
1288 }
1289
1290 switch (fw_ap->measure_status) {
1291 case IWL_TOF_ENTRY_SUCCESS:
1292 result.status = NL80211_PMSR_STATUS_SUCCESS;
1293 break;
1294 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1295 result.status = NL80211_PMSR_STATUS_TIMEOUT;
1296 break;
1297 case IWL_TOF_ENTRY_NO_RESPONSE:
1298 result.status = NL80211_PMSR_STATUS_FAILURE;
1299 result.ftm.failure_reason =
1300 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1301 break;
1302 case IWL_TOF_ENTRY_REQUEST_REJECTED:
1303 result.status = NL80211_PMSR_STATUS_FAILURE;
1304 result.ftm.failure_reason =
1305 NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1306 result.ftm.busy_retry_time = fw_ap->refusal_period;
1307 break;
1308 default:
1309 result.status = NL80211_PMSR_STATUS_FAILURE;
1310 result.ftm.failure_reason =
1311 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1312 break;
1313 }
1314 memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1315 result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1316 fw_ap->timestamp);
1317 result.type = NL80211_PMSR_TYPE_FTM;
1318 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1319 mvm->ftm_initiator.responses[peer_idx]++;
1320 result.ftm.rssi_avg = fw_ap->rssi;
1321 result.ftm.rssi_avg_valid = 1;
1322 result.ftm.rssi_spread = fw_ap->rssi_spread;
1323 result.ftm.rssi_spread_valid = 1;
1324 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1325 result.ftm.rtt_avg_valid = 1;
1326 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1327 result.ftm.rtt_variance_valid = 1;
1328 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1329 result.ftm.rtt_spread_valid = 1;
1330
1331 iwl_mvm_ftm_get_lci_civic(mvm, &result);
1332
1333 iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1334
1335 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1336 mvm->ftm_initiator.req,
1337 &result, GFP_KERNEL);
1338
1339 if (fw_has_api(&mvm->fw->ucode_capa,
1340 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1341 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1342 fw_ap->rttConfidence);
1343
1344 iwl_mvm_debug_range_resp(mvm, i, &result);
1345 }
1346
1347 if (last_in_batch) {
1348 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1349 mvm->ftm_initiator.req,
1350 GFP_KERNEL);
1351 iwl_mvm_ftm_reset(mvm);
1352 }
1353 }
1354
iwl_mvm_ftm_lc_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1355 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1356 {
1357 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1358 const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1359 size_t len = iwl_rx_packet_payload_len(pkt);
1360 struct iwl_mvm_loc_entry *entry;
1361 const u8 *ies, *lci, *civic, *msr_ie;
1362 size_t ies_len, lci_len = 0, civic_len = 0;
1363 size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1364 sizeof(mgmt->u.action.u.ftm);
1365 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1366 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1367
1368 if (len <= baselen)
1369 return;
1370
1371 lockdep_assert_held(&mvm->mutex);
1372
1373 ies = mgmt->u.action.u.ftm.variable;
1374 ies_len = len - baselen;
1375
1376 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1377 &rprt_type_lci, 1, 4);
1378 if (msr_ie) {
1379 lci = msr_ie + 2;
1380 lci_len = msr_ie[1];
1381 }
1382
1383 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1384 &rprt_type_civic, 1, 4);
1385 if (msr_ie) {
1386 civic = msr_ie + 2;
1387 civic_len = msr_ie[1];
1388 }
1389
1390 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1391 if (!entry)
1392 return;
1393
1394 memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1395
1396 entry->lci_len = lci_len;
1397 if (lci_len)
1398 memcpy(entry->buf, lci, lci_len);
1399
1400 entry->civic_len = civic_len;
1401 if (civic_len)
1402 memcpy(entry->buf + lci_len, civic, civic_len);
1403
1404 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1405 }
1406