1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2015-2017 Intel Deutschland GmbH
4  * Copyright (C) 2018-2022 Intel Corporation
5  */
6 #include <linux/etherdevice.h>
7 #include <linux/math64.h>
8 #include <net/cfg80211.h>
9 #include "mvm.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "constants.h"
13 
14 struct iwl_mvm_loc_entry {
15 	struct list_head list;
16 	u8 addr[ETH_ALEN];
17 	u8 lci_len, civic_len;
18 	u8 buf[];
19 };
20 
21 struct iwl_mvm_smooth_entry {
22 	struct list_head list;
23 	u8 addr[ETH_ALEN];
24 	s64 rtt_avg;
25 	u64 host_time;
26 };
27 
28 enum iwl_mvm_pasn_flags {
29 	IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
30 };
31 
32 struct iwl_mvm_ftm_pasn_entry {
33 	struct list_head list;
34 	u8 addr[ETH_ALEN];
35 	u8 hltk[HLTK_11AZ_LEN];
36 	u8 tk[TK_11AZ_LEN];
37 	u8 cipher;
38 	u8 tx_pn[IEEE80211_CCMP_PN_LEN];
39 	u8 rx_pn[IEEE80211_CCMP_PN_LEN];
40 	u32 flags;
41 };
42 
43 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
44 			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
45 			     u8 *hltk, u32 hltk_len)
46 {
47 	struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
48 						      GFP_KERNEL);
49 	u32 expected_tk_len;
50 
51 	lockdep_assert_held(&mvm->mutex);
52 
53 	if (!pasn)
54 		return -ENOBUFS;
55 
56 	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
57 
58 	switch (pasn->cipher) {
59 	case IWL_LOCATION_CIPHER_CCMP_128:
60 	case IWL_LOCATION_CIPHER_GCMP_128:
61 		expected_tk_len = WLAN_KEY_LEN_CCMP;
62 		break;
63 	case IWL_LOCATION_CIPHER_GCMP_256:
64 		expected_tk_len = WLAN_KEY_LEN_GCMP_256;
65 		break;
66 	default:
67 		goto out;
68 	}
69 
70 	/*
71 	 * If associated to this AP and already have security context,
72 	 * the TK is already configured for this station, so it
73 	 * shouldn't be set again here.
74 	 */
75 	if (vif->cfg.assoc &&
76 	    !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
77 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
78 		struct ieee80211_sta *sta;
79 
80 		rcu_read_lock();
81 		sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
82 		if (!IS_ERR_OR_NULL(sta) && sta->mfp)
83 			expected_tk_len = 0;
84 		rcu_read_unlock();
85 	}
86 
87 	if (tk_len != expected_tk_len ||
88 	    (hltk_len && hltk_len != sizeof(pasn->hltk))) {
89 		IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
90 			tk_len, hltk_len);
91 		goto out;
92 	}
93 
94 	if (!expected_tk_len && !hltk_len) {
95 		IWL_ERR(mvm, "TK and HLTK not set\n");
96 		goto out;
97 	}
98 
99 	memcpy(pasn->addr, addr, sizeof(pasn->addr));
100 
101 	if (hltk_len) {
102 		memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
103 		pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
104 	}
105 
106 	if (tk && tk_len)
107 		memcpy(pasn->tk, tk, sizeof(pasn->tk));
108 
109 	list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
110 	return 0;
111 out:
112 	kfree(pasn);
113 	return -EINVAL;
114 }
115 
116 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
117 {
118 	struct iwl_mvm_ftm_pasn_entry *entry, *prev;
119 
120 	lockdep_assert_held(&mvm->mutex);
121 
122 	list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
123 				 list) {
124 		if (memcmp(entry->addr, addr, sizeof(entry->addr)))
125 			continue;
126 
127 		list_del(&entry->list);
128 		kfree(entry);
129 		return;
130 	}
131 }
132 
133 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
134 {
135 	struct iwl_mvm_loc_entry *e, *t;
136 
137 	mvm->ftm_initiator.req = NULL;
138 	mvm->ftm_initiator.req_wdev = NULL;
139 	memset(mvm->ftm_initiator.responses, 0,
140 	       sizeof(mvm->ftm_initiator.responses));
141 
142 	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
143 		list_del(&e->list);
144 		kfree(e);
145 	}
146 }
147 
148 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
149 {
150 	struct cfg80211_pmsr_result result = {
151 		.status = NL80211_PMSR_STATUS_FAILURE,
152 		.final = 1,
153 		.host_time = ktime_get_boottime_ns(),
154 		.type = NL80211_PMSR_TYPE_FTM,
155 	};
156 	int i;
157 
158 	lockdep_assert_held(&mvm->mutex);
159 
160 	if (!mvm->ftm_initiator.req)
161 		return;
162 
163 	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
164 		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
165 		       ETH_ALEN);
166 		result.ftm.burst_index = mvm->ftm_initiator.responses[i];
167 
168 		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
169 				     mvm->ftm_initiator.req,
170 				     &result, GFP_KERNEL);
171 	}
172 
173 	cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
174 			       mvm->ftm_initiator.req, GFP_KERNEL);
175 	iwl_mvm_ftm_reset(mvm);
176 }
177 
178 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
179 {
180 	INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
181 
182 	IWL_DEBUG_INFO(mvm,
183 		       "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
184 			IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
185 			IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
186 			IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
187 			IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
188 			IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
189 }
190 
191 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
192 {
193 	struct iwl_mvm_smooth_entry *se, *st;
194 
195 	list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
196 				 list) {
197 		list_del(&se->list);
198 		kfree(se);
199 	}
200 }
201 
202 static int
203 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
204 {
205 	switch (s) {
206 	case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
207 		return 0;
208 	case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
209 		return -EBUSY;
210 	default:
211 		WARN_ON_ONCE(1);
212 		return -EIO;
213 	}
214 }
215 
216 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
217 			       struct iwl_tof_range_req_cmd_v5 *cmd,
218 			       struct cfg80211_pmsr_request *req)
219 {
220 	int i;
221 
222 	cmd->request_id = req->cookie;
223 	cmd->num_of_ap = req->n_peers;
224 
225 	/* use maximum for "no timeout" or bigger than what we can do */
226 	if (!req->timeout || req->timeout > 255 * 100)
227 		cmd->req_timeout = 255;
228 	else
229 		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
230 
231 	/*
232 	 * We treat it always as random, since if not we'll
233 	 * have filled our local address there instead.
234 	 */
235 	cmd->macaddr_random = 1;
236 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
237 	for (i = 0; i < ETH_ALEN; i++)
238 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
239 
240 	if (vif->cfg.assoc)
241 		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
242 	else
243 		eth_broadcast_addr(cmd->range_req_bssid);
244 }
245 
246 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
247 				   struct ieee80211_vif *vif,
248 				   struct iwl_tof_range_req_cmd_v9 *cmd,
249 				   struct cfg80211_pmsr_request *req)
250 {
251 	int i;
252 
253 	cmd->initiator_flags =
254 		cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
255 			    IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
256 	cmd->request_id = req->cookie;
257 	cmd->num_of_ap = req->n_peers;
258 
259 	/*
260 	 * Use a large value for "no timeout". Don't use the maximum value
261 	 * because of fw limitations.
262 	 */
263 	if (req->timeout)
264 		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
265 	else
266 		cmd->req_timeout_ms = cpu_to_le32(0xfffff);
267 
268 	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
269 	for (i = 0; i < ETH_ALEN; i++)
270 		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
271 
272 	if (vif->cfg.assoc) {
273 		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
274 
275 		/* AP's TSF is only relevant if associated */
276 		for (i = 0; i < req->n_peers; i++) {
277 			if (req->peers[i].report_ap_tsf) {
278 				struct iwl_mvm_vif *mvmvif =
279 					iwl_mvm_vif_from_mac80211(vif);
280 
281 				cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
282 				return;
283 			}
284 		}
285 	} else {
286 		eth_broadcast_addr(cmd->range_req_bssid);
287 	}
288 
289 	/* Don't report AP's TSF */
290 	cmd->tsf_mac_id = cpu_to_le32(0xff);
291 }
292 
293 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
294 			       struct iwl_tof_range_req_cmd_v8 *cmd,
295 			       struct cfg80211_pmsr_request *req)
296 {
297 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
298 }
299 
300 static int
301 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
302 			      struct cfg80211_pmsr_request_peer *peer,
303 			      u8 *channel, u8 *bandwidth,
304 			      u8 *ctrl_ch_position)
305 {
306 	u32 freq = peer->chandef.chan->center_freq;
307 
308 	*channel = ieee80211_frequency_to_channel(freq);
309 
310 	switch (peer->chandef.width) {
311 	case NL80211_CHAN_WIDTH_20_NOHT:
312 		*bandwidth = IWL_TOF_BW_20_LEGACY;
313 		break;
314 	case NL80211_CHAN_WIDTH_20:
315 		*bandwidth = IWL_TOF_BW_20_HT;
316 		break;
317 	case NL80211_CHAN_WIDTH_40:
318 		*bandwidth = IWL_TOF_BW_40;
319 		break;
320 	case NL80211_CHAN_WIDTH_80:
321 		*bandwidth = IWL_TOF_BW_80;
322 		break;
323 	default:
324 		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
325 			peer->chandef.width);
326 		return -EINVAL;
327 	}
328 
329 	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
330 		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
331 
332 	return 0;
333 }
334 
335 static int
336 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
337 			      struct cfg80211_pmsr_request_peer *peer,
338 			      u8 *channel, u8 *format_bw,
339 			      u8 *ctrl_ch_position)
340 {
341 	u32 freq = peer->chandef.chan->center_freq;
342 	u8 cmd_ver;
343 
344 	*channel = ieee80211_frequency_to_channel(freq);
345 
346 	switch (peer->chandef.width) {
347 	case NL80211_CHAN_WIDTH_20_NOHT:
348 		*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
349 		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
350 		break;
351 	case NL80211_CHAN_WIDTH_20:
352 		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
353 		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
354 		break;
355 	case NL80211_CHAN_WIDTH_40:
356 		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
357 		*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
358 		break;
359 	case NL80211_CHAN_WIDTH_80:
360 		*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
361 		*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
362 		break;
363 	case NL80211_CHAN_WIDTH_160:
364 		cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
365 						WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
366 						IWL_FW_CMD_VER_UNKNOWN);
367 
368 		if (cmd_ver >= 13) {
369 			*format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
370 			*format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
371 			break;
372 		}
373 		fallthrough;
374 	default:
375 		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
376 			peer->chandef.width);
377 		return -EINVAL;
378 	}
379 
380 	/* non EDCA based measurement must use HE preamble */
381 	if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
382 		*format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
383 
384 	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
385 		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
386 
387 	return 0;
388 }
389 
390 static int
391 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
392 			  struct cfg80211_pmsr_request_peer *peer,
393 			  struct iwl_tof_range_req_ap_entry_v2 *target)
394 {
395 	int ret;
396 
397 	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
398 					    &target->bandwidth,
399 					    &target->ctrl_ch_position);
400 	if (ret)
401 		return ret;
402 
403 	memcpy(target->bssid, peer->addr, ETH_ALEN);
404 	target->burst_period =
405 		cpu_to_le16(peer->ftm.burst_period);
406 	target->samples_per_burst = peer->ftm.ftms_per_burst;
407 	target->num_of_bursts = peer->ftm.num_bursts_exp;
408 	target->measure_type = 0; /* regular two-sided FTM */
409 	target->retries_per_sample = peer->ftm.ftmr_retries;
410 	target->asap_mode = peer->ftm.asap;
411 	target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
412 
413 	if (peer->ftm.request_lci)
414 		target->location_req |= IWL_TOF_LOC_LCI;
415 	if (peer->ftm.request_civicloc)
416 		target->location_req |= IWL_TOF_LOC_CIVIC;
417 
418 	target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
419 
420 	return 0;
421 }
422 
423 #define FTM_PUT_FLAG(flag)	(target->initiator_ap_flags |= \
424 				 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
425 
426 static void
427 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
428 			      struct cfg80211_pmsr_request_peer *peer,
429 			      struct iwl_tof_range_req_ap_entry_v6 *target)
430 {
431 	memcpy(target->bssid, peer->addr, ETH_ALEN);
432 	target->burst_period =
433 		cpu_to_le16(peer->ftm.burst_period);
434 	target->samples_per_burst = peer->ftm.ftms_per_burst;
435 	target->num_of_bursts = peer->ftm.num_bursts_exp;
436 	target->ftmr_max_retries = peer->ftm.ftmr_retries;
437 	target->initiator_ap_flags = cpu_to_le32(0);
438 
439 	if (peer->ftm.asap)
440 		FTM_PUT_FLAG(ASAP);
441 
442 	if (peer->ftm.request_lci)
443 		FTM_PUT_FLAG(LCI_REQUEST);
444 
445 	if (peer->ftm.request_civicloc)
446 		FTM_PUT_FLAG(CIVIC_REQUEST);
447 
448 	if (IWL_MVM_FTM_INITIATOR_DYNACK)
449 		FTM_PUT_FLAG(DYN_ACK);
450 
451 	if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
452 		FTM_PUT_FLAG(ALGO_LR);
453 	else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
454 		FTM_PUT_FLAG(ALGO_FFT);
455 
456 	if (peer->ftm.trigger_based)
457 		FTM_PUT_FLAG(TB);
458 	else if (peer->ftm.non_trigger_based)
459 		FTM_PUT_FLAG(NON_TB);
460 
461 	if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
462 	    peer->ftm.lmr_feedback)
463 		FTM_PUT_FLAG(LMR_FEEDBACK);
464 }
465 
466 static int
467 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
468 			  struct cfg80211_pmsr_request_peer *peer,
469 			  struct iwl_tof_range_req_ap_entry_v3 *target)
470 {
471 	int ret;
472 
473 	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
474 					    &target->bandwidth,
475 					    &target->ctrl_ch_position);
476 	if (ret)
477 		return ret;
478 
479 	/*
480 	 * Versions 3 and 4 has some common fields, so
481 	 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
482 	 */
483 	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
484 
485 	return 0;
486 }
487 
488 static int
489 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
490 			  struct cfg80211_pmsr_request_peer *peer,
491 			  struct iwl_tof_range_req_ap_entry_v4 *target)
492 {
493 	int ret;
494 
495 	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
496 					    &target->format_bw,
497 					    &target->ctrl_ch_position);
498 	if (ret)
499 		return ret;
500 
501 	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
502 
503 	return 0;
504 }
505 
506 static int
507 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
508 		       struct cfg80211_pmsr_request_peer *peer,
509 		       struct iwl_tof_range_req_ap_entry_v6 *target)
510 {
511 	int ret;
512 
513 	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
514 					    &target->format_bw,
515 					    &target->ctrl_ch_position);
516 	if (ret)
517 		return ret;
518 
519 	iwl_mvm_ftm_put_target_common(mvm, peer, target);
520 
521 	if (vif->cfg.assoc &&
522 	    !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
523 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
524 		struct ieee80211_sta *sta;
525 
526 		rcu_read_lock();
527 
528 		sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
529 		if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
530 			rcu_read_unlock();
531 			return PTR_ERR_OR_ZERO(sta);
532 		}
533 
534 		if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
535 			FTM_PUT_FLAG(PMF);
536 
537 		rcu_read_unlock();
538 
539 		target->sta_id = mvmvif->deflink.ap_sta_id;
540 	} else {
541 		target->sta_id = IWL_MVM_INVALID_STA;
542 	}
543 
544 	/*
545 	 * TODO: Beacon interval is currently unknown, so use the common value
546 	 * of 100 TUs.
547 	 */
548 	target->beacon_interval = cpu_to_le16(100);
549 	return 0;
550 }
551 
552 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
553 {
554 	u32 status;
555 	int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
556 
557 	if (!err && status) {
558 		IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
559 			status);
560 		err = iwl_ftm_range_request_status_to_err(status);
561 	}
562 
563 	return err;
564 }
565 
566 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
567 				struct cfg80211_pmsr_request *req)
568 {
569 	struct iwl_tof_range_req_cmd_v5 cmd_v5;
570 	struct iwl_host_cmd hcmd = {
571 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
572 		.dataflags[0] = IWL_HCMD_DFL_DUP,
573 		.data[0] = &cmd_v5,
574 		.len[0] = sizeof(cmd_v5),
575 	};
576 	u8 i;
577 	int err;
578 
579 	iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
580 
581 	for (i = 0; i < cmd_v5.num_of_ap; i++) {
582 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
583 
584 		err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
585 		if (err)
586 			return err;
587 	}
588 
589 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
590 }
591 
592 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
593 				struct cfg80211_pmsr_request *req)
594 {
595 	struct iwl_tof_range_req_cmd_v7 cmd_v7;
596 	struct iwl_host_cmd hcmd = {
597 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
598 		.dataflags[0] = IWL_HCMD_DFL_DUP,
599 		.data[0] = &cmd_v7,
600 		.len[0] = sizeof(cmd_v7),
601 	};
602 	u8 i;
603 	int err;
604 
605 	/*
606 	 * Versions 7 and 8 has the same structure except from the responders
607 	 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
608 	 */
609 	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
610 
611 	for (i = 0; i < cmd_v7.num_of_ap; i++) {
612 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
613 
614 		err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
615 		if (err)
616 			return err;
617 	}
618 
619 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
620 }
621 
622 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
623 				struct cfg80211_pmsr_request *req)
624 {
625 	struct iwl_tof_range_req_cmd_v8 cmd;
626 	struct iwl_host_cmd hcmd = {
627 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
628 		.dataflags[0] = IWL_HCMD_DFL_DUP,
629 		.data[0] = &cmd,
630 		.len[0] = sizeof(cmd),
631 	};
632 	u8 i;
633 	int err;
634 
635 	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
636 
637 	for (i = 0; i < cmd.num_of_ap; i++) {
638 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
639 
640 		err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
641 		if (err)
642 			return err;
643 	}
644 
645 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
646 }
647 
648 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
649 				struct cfg80211_pmsr_request *req)
650 {
651 	struct iwl_tof_range_req_cmd_v9 cmd;
652 	struct iwl_host_cmd hcmd = {
653 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
654 		.dataflags[0] = IWL_HCMD_DFL_DUP,
655 		.data[0] = &cmd,
656 		.len[0] = sizeof(cmd),
657 	};
658 	u8 i;
659 	int err;
660 
661 	iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
662 
663 	for (i = 0; i < cmd.num_of_ap; i++) {
664 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
665 		struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
666 
667 		err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
668 		if (err)
669 			return err;
670 	}
671 
672 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
673 }
674 
675 static void iter(struct ieee80211_hw *hw,
676 		 struct ieee80211_vif *vif,
677 		 struct ieee80211_sta *sta,
678 		 struct ieee80211_key_conf *key,
679 		 void *data)
680 {
681 	struct iwl_tof_range_req_ap_entry_v6 *target = data;
682 
683 	if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
684 		return;
685 
686 	WARN_ON(!sta->mfp);
687 
688 	if (WARN_ON(key->keylen > sizeof(target->tk)))
689 		return;
690 
691 	memcpy(target->tk, key->key, key->keylen);
692 	target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
693 	WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
694 }
695 
696 static void
697 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
698 				struct iwl_tof_range_req_ap_entry_v7 *target)
699 {
700 	struct iwl_mvm_ftm_pasn_entry *entry;
701 	u32 flags = le32_to_cpu(target->initiator_ap_flags);
702 
703 	if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
704 		       IWL_INITIATOR_AP_FLAGS_TB)))
705 		return;
706 
707 	lockdep_assert_held(&mvm->mutex);
708 
709 	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
710 		if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
711 			continue;
712 
713 		target->cipher = entry->cipher;
714 
715 		if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
716 			memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
717 		else
718 			memset(target->hltk, 0, sizeof(target->hltk));
719 
720 		if (vif->cfg.assoc &&
721 		    !memcmp(vif->bss_conf.bssid, target->bssid,
722 			    sizeof(target->bssid)))
723 			ieee80211_iter_keys(mvm->hw, vif, iter, target);
724 		else
725 			memcpy(target->tk, entry->tk, sizeof(target->tk));
726 
727 		memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
728 		memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
729 
730 		target->initiator_ap_flags |=
731 			cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
732 		return;
733 	}
734 }
735 
736 static int
737 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
738 			  struct cfg80211_pmsr_request_peer *peer,
739 			  struct iwl_tof_range_req_ap_entry_v7 *target)
740 {
741 	int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
742 	if (err)
743 		return err;
744 
745 	iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
746 	return err;
747 }
748 
749 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
750 				 struct ieee80211_vif *vif,
751 				 struct cfg80211_pmsr_request *req)
752 {
753 	struct iwl_tof_range_req_cmd_v11 cmd;
754 	struct iwl_host_cmd hcmd = {
755 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
756 		.dataflags[0] = IWL_HCMD_DFL_DUP,
757 		.data[0] = &cmd,
758 		.len[0] = sizeof(cmd),
759 	};
760 	u8 i;
761 	int err;
762 
763 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
764 
765 	for (i = 0; i < cmd.num_of_ap; i++) {
766 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
767 		struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
768 
769 		err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
770 		if (err)
771 			return err;
772 	}
773 
774 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
775 }
776 
777 static void
778 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
779 			   struct iwl_tof_range_req_ap_entry_v8 *target)
780 {
781 	/* Only 2 STS are supported on Tx */
782 	u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
783 		IWL_MVM_FTM_I2R_MAX_STS;
784 
785 	target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
786 		(IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
787 	target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
788 		(i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
789 	target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
790 	target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
791 }
792 
793 static int
794 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
795 			  struct cfg80211_pmsr_request_peer *peer,
796 			  struct iwl_tof_range_req_ap_entry_v8 *target)
797 {
798 	u32 flags;
799 	int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
800 
801 	if (ret)
802 		return ret;
803 
804 	iwl_mvm_ftm_set_ndp_params(mvm, target);
805 
806 	/*
807 	 * If secure LTF is turned off, replace the flag with PMF only
808 	 */
809 	flags = le32_to_cpu(target->initiator_ap_flags);
810 	if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
811 	    !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
812 		flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
813 		flags |= IWL_INITIATOR_AP_FLAGS_PMF;
814 		target->initiator_ap_flags = cpu_to_le32(flags);
815 	}
816 
817 	return 0;
818 }
819 
820 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
821 				 struct ieee80211_vif *vif,
822 				 struct cfg80211_pmsr_request *req)
823 {
824 	struct iwl_tof_range_req_cmd_v12 cmd;
825 	struct iwl_host_cmd hcmd = {
826 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
827 		.dataflags[0] = IWL_HCMD_DFL_DUP,
828 		.data[0] = &cmd,
829 		.len[0] = sizeof(cmd),
830 	};
831 	u8 i;
832 	int err;
833 
834 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
835 
836 	for (i = 0; i < cmd.num_of_ap; i++) {
837 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
838 		struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
839 
840 		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
841 		if (err)
842 			return err;
843 	}
844 
845 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
846 }
847 
848 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
849 				 struct ieee80211_vif *vif,
850 				 struct cfg80211_pmsr_request *req)
851 {
852 	struct iwl_tof_range_req_cmd_v13 cmd;
853 	struct iwl_host_cmd hcmd = {
854 		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
855 		.dataflags[0] = IWL_HCMD_DFL_DUP,
856 		.data[0] = &cmd,
857 		.len[0] = sizeof(cmd),
858 	};
859 	u8 i;
860 	int err;
861 
862 	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
863 
864 	for (i = 0; i < cmd.num_of_ap; i++) {
865 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
866 		struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
867 
868 		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
869 		if (err)
870 			return err;
871 
872 		if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
873 			target->bss_color = peer->ftm.bss_color;
874 
875 		if (peer->ftm.non_trigger_based) {
876 			target->min_time_between_msr =
877 				cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
878 			target->burst_period =
879 				cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
880 		} else {
881 			target->min_time_between_msr = cpu_to_le16(0);
882 		}
883 
884 		target->band =
885 			iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
886 	}
887 
888 	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
889 }
890 
891 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
892 		      struct cfg80211_pmsr_request *req)
893 {
894 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
895 				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
896 	int err;
897 
898 	lockdep_assert_held(&mvm->mutex);
899 
900 	if (mvm->ftm_initiator.req)
901 		return -EBUSY;
902 
903 	if (new_api) {
904 		u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
905 						   WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
906 						   IWL_FW_CMD_VER_UNKNOWN);
907 
908 		switch (cmd_ver) {
909 		case 13:
910 			err = iwl_mvm_ftm_start_v13(mvm, vif, req);
911 			break;
912 		case 12:
913 			err = iwl_mvm_ftm_start_v12(mvm, vif, req);
914 			break;
915 		case 11:
916 			err = iwl_mvm_ftm_start_v11(mvm, vif, req);
917 			break;
918 		case 9:
919 		case 10:
920 			err = iwl_mvm_ftm_start_v9(mvm, vif, req);
921 			break;
922 		case 8:
923 			err = iwl_mvm_ftm_start_v8(mvm, vif, req);
924 			break;
925 		default:
926 			err = iwl_mvm_ftm_start_v7(mvm, vif, req);
927 			break;
928 		}
929 	} else {
930 		err = iwl_mvm_ftm_start_v5(mvm, vif, req);
931 	}
932 
933 	if (!err) {
934 		mvm->ftm_initiator.req = req;
935 		mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
936 	}
937 
938 	return err;
939 }
940 
941 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
942 {
943 	struct iwl_tof_range_abort_cmd cmd = {
944 		.request_id = req->cookie,
945 	};
946 
947 	lockdep_assert_held(&mvm->mutex);
948 
949 	if (req != mvm->ftm_initiator.req)
950 		return;
951 
952 	iwl_mvm_ftm_reset(mvm);
953 
954 	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
955 				 0, sizeof(cmd), &cmd))
956 		IWL_ERR(mvm, "failed to abort FTM process\n");
957 }
958 
959 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
960 				 const u8 *addr)
961 {
962 	int i;
963 
964 	for (i = 0; i < req->n_peers; i++) {
965 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
966 
967 		if (ether_addr_equal_unaligned(peer->addr, addr))
968 			return i;
969 	}
970 
971 	return -ENOENT;
972 }
973 
974 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
975 {
976 	u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
977 	u32 curr_gp2, diff;
978 	u64 now_from_boot_ns;
979 
980 	iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
981 			      &now_from_boot_ns, NULL);
982 
983 	if (curr_gp2 >= gp2_ts)
984 		diff = curr_gp2 - gp2_ts;
985 	else
986 		diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
987 
988 	return now_from_boot_ns - (u64)diff * 1000;
989 }
990 
991 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
992 				      struct cfg80211_pmsr_result *res)
993 {
994 	struct iwl_mvm_loc_entry *entry;
995 
996 	list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
997 		if (!ether_addr_equal_unaligned(res->addr, entry->addr))
998 			continue;
999 
1000 		if (entry->lci_len) {
1001 			res->ftm.lci_len = entry->lci_len;
1002 			res->ftm.lci = entry->buf;
1003 		}
1004 
1005 		if (entry->civic_len) {
1006 			res->ftm.civicloc_len = entry->civic_len;
1007 			res->ftm.civicloc = entry->buf + entry->lci_len;
1008 		}
1009 
1010 		/* we found the entry we needed */
1011 		break;
1012 	}
1013 }
1014 
1015 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1016 					u8 num_of_aps)
1017 {
1018 	lockdep_assert_held(&mvm->mutex);
1019 
1020 	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1021 		IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1022 			request_id, (u8)mvm->ftm_initiator.req->cookie);
1023 		return -EINVAL;
1024 	}
1025 
1026 	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1027 		IWL_ERR(mvm, "FTM range response invalid\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	return 0;
1032 }
1033 
1034 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1035 				      struct cfg80211_pmsr_result *res)
1036 {
1037 	struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1038 	s64 rtt_avg, rtt = res->ftm.rtt_avg;
1039 	u32 undershoot, overshoot;
1040 	u8 alpha;
1041 
1042 	if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1043 		return;
1044 
1045 	WARN_ON(rtt < 0);
1046 
1047 	if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1048 		IWL_DEBUG_INFO(mvm,
1049 			       ": %pM: ignore failed measurement. Status=%u\n",
1050 			       res->addr, res->status);
1051 		return;
1052 	}
1053 
1054 	list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1055 		if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1056 			resp = iter;
1057 			break;
1058 		}
1059 	}
1060 
1061 	if (!resp) {
1062 		resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1063 		if (!resp)
1064 			return;
1065 
1066 		memcpy(resp->addr, res->addr, ETH_ALEN);
1067 		list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1068 
1069 		resp->rtt_avg = rtt;
1070 
1071 		IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1072 			       resp->addr, resp->rtt_avg);
1073 		goto update_time;
1074 	}
1075 
1076 	if (res->host_time - resp->host_time >
1077 	    IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1078 		resp->rtt_avg = rtt;
1079 
1080 		IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1081 			       resp->addr, resp->rtt_avg);
1082 		goto update_time;
1083 	}
1084 
1085 	/* Smooth the results based on the tracked RTT average */
1086 	undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1087 	overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1088 	alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1089 
1090 	rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1091 
1092 	IWL_DEBUG_INFO(mvm,
1093 		       "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1094 		       resp->addr, resp->rtt_avg, rtt_avg, rtt);
1095 
1096 	/*
1097 	 * update the responder's average RTT results regardless of
1098 	 * the under/over shoot logic below
1099 	 */
1100 	resp->rtt_avg = rtt_avg;
1101 
1102 	/* smooth the results */
1103 	if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1104 		res->ftm.rtt_avg = rtt_avg;
1105 
1106 		IWL_DEBUG_INFO(mvm,
1107 			       "undershoot: val=%lld\n",
1108 			       (rtt_avg - rtt));
1109 	} else if (rtt_avg < rtt && (rtt - rtt_avg) >
1110 		   overshoot) {
1111 		res->ftm.rtt_avg = rtt_avg;
1112 		IWL_DEBUG_INFO(mvm,
1113 			       "overshoot: val=%lld\n",
1114 			       (rtt - rtt_avg));
1115 	}
1116 
1117 update_time:
1118 	resp->host_time = res->host_time;
1119 }
1120 
1121 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1122 				     struct cfg80211_pmsr_result *res)
1123 {
1124 	s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1125 
1126 	IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1127 	IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1128 	IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1129 	IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1130 	IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1131 	IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1132 	IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1133 	IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1134 	IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1135 	IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1136 	IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1137 	IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1138 }
1139 
1140 static void
1141 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1142 			   struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1143 {
1144 	struct iwl_mvm_ftm_pasn_entry *entry;
1145 
1146 	lockdep_assert_held(&mvm->mutex);
1147 
1148 	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1149 		if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1150 			continue;
1151 
1152 		memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1153 		memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1154 		return;
1155 	}
1156 }
1157 
1158 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1159 {
1160 	if (!fw_has_api(&mvm->fw->ucode_capa,
1161 			IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1162 		return 5;
1163 
1164 	/* Starting from version 8, the FW advertises the version */
1165 	if (mvm->cmd_ver.range_resp >= 8)
1166 		return mvm->cmd_ver.range_resp;
1167 	else if (fw_has_api(&mvm->fw->ucode_capa,
1168 			    IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1169 		return 7;
1170 
1171 	/* The first version of the new range request API */
1172 	return 6;
1173 }
1174 
1175 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1176 {
1177 	switch (ver) {
1178 	case 9:
1179 	case 8:
1180 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1181 	case 7:
1182 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1183 	case 6:
1184 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1185 	case 5:
1186 		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1187 	default:
1188 		WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1189 		return false;
1190 	}
1191 }
1192 
1193 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1194 {
1195 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1196 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1197 	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1198 	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1199 	struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1200 	struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1201 	int i;
1202 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1203 				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1204 	u8 num_of_aps, last_in_batch;
1205 	u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1206 
1207 	lockdep_assert_held(&mvm->mutex);
1208 
1209 	if (!mvm->ftm_initiator.req) {
1210 		return;
1211 	}
1212 
1213 	if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1214 		return;
1215 
1216 	if (new_api) {
1217 		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1218 						 fw_resp_v8->num_of_aps))
1219 			return;
1220 
1221 		num_of_aps = fw_resp_v8->num_of_aps;
1222 		last_in_batch = fw_resp_v8->last_report;
1223 	} else {
1224 		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1225 						 fw_resp_v5->num_of_aps))
1226 			return;
1227 
1228 		num_of_aps = fw_resp_v5->num_of_aps;
1229 		last_in_batch = fw_resp_v5->last_in_batch;
1230 	}
1231 
1232 	IWL_DEBUG_INFO(mvm, "Range response received\n");
1233 	IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1234 		       mvm->ftm_initiator.req->cookie, num_of_aps);
1235 
1236 	for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1237 		struct cfg80211_pmsr_result result = {};
1238 		struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1239 		int peer_idx;
1240 
1241 		if (new_api) {
1242 			if (notif_ver >= 8) {
1243 				fw_ap = &fw_resp_v8->ap[i];
1244 				iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1245 			} else if (notif_ver == 7) {
1246 				fw_ap = (void *)&fw_resp_v7->ap[i];
1247 			} else {
1248 				fw_ap = (void *)&fw_resp_v6->ap[i];
1249 			}
1250 
1251 			result.final = fw_ap->last_burst;
1252 			result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1253 			result.ap_tsf_valid = 1;
1254 		} else {
1255 			/* the first part is the same for old and new APIs */
1256 			fw_ap = (void *)&fw_resp_v5->ap[i];
1257 			/*
1258 			 * FIXME: the firmware needs to report this, we don't
1259 			 * even know the number of bursts the responder picked
1260 			 * (if we asked it to)
1261 			 */
1262 			result.final = 0;
1263 		}
1264 
1265 		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1266 						 fw_ap->bssid);
1267 		if (peer_idx < 0) {
1268 			IWL_WARN(mvm,
1269 				 "Unknown address (%pM, target #%d) in FTM response\n",
1270 				 fw_ap->bssid, i);
1271 			continue;
1272 		}
1273 
1274 		switch (fw_ap->measure_status) {
1275 		case IWL_TOF_ENTRY_SUCCESS:
1276 			result.status = NL80211_PMSR_STATUS_SUCCESS;
1277 			break;
1278 		case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1279 			result.status = NL80211_PMSR_STATUS_TIMEOUT;
1280 			break;
1281 		case IWL_TOF_ENTRY_NO_RESPONSE:
1282 			result.status = NL80211_PMSR_STATUS_FAILURE;
1283 			result.ftm.failure_reason =
1284 				NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1285 			break;
1286 		case IWL_TOF_ENTRY_REQUEST_REJECTED:
1287 			result.status = NL80211_PMSR_STATUS_FAILURE;
1288 			result.ftm.failure_reason =
1289 				NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1290 			result.ftm.busy_retry_time = fw_ap->refusal_period;
1291 			break;
1292 		default:
1293 			result.status = NL80211_PMSR_STATUS_FAILURE;
1294 			result.ftm.failure_reason =
1295 				NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1296 			break;
1297 		}
1298 		memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1299 		result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1300 							     fw_ap->timestamp);
1301 		result.type = NL80211_PMSR_TYPE_FTM;
1302 		result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1303 		mvm->ftm_initiator.responses[peer_idx]++;
1304 		result.ftm.rssi_avg = fw_ap->rssi;
1305 		result.ftm.rssi_avg_valid = 1;
1306 		result.ftm.rssi_spread = fw_ap->rssi_spread;
1307 		result.ftm.rssi_spread_valid = 1;
1308 		result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1309 		result.ftm.rtt_avg_valid = 1;
1310 		result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1311 		result.ftm.rtt_variance_valid = 1;
1312 		result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1313 		result.ftm.rtt_spread_valid = 1;
1314 
1315 		iwl_mvm_ftm_get_lci_civic(mvm, &result);
1316 
1317 		iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1318 
1319 		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1320 				     mvm->ftm_initiator.req,
1321 				     &result, GFP_KERNEL);
1322 
1323 		if (fw_has_api(&mvm->fw->ucode_capa,
1324 			       IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1325 			IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1326 				       fw_ap->rttConfidence);
1327 
1328 		iwl_mvm_debug_range_resp(mvm, i, &result);
1329 	}
1330 
1331 	if (last_in_batch) {
1332 		cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1333 				       mvm->ftm_initiator.req,
1334 				       GFP_KERNEL);
1335 		iwl_mvm_ftm_reset(mvm);
1336 	}
1337 }
1338 
1339 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1340 {
1341 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1342 	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1343 	size_t len = iwl_rx_packet_payload_len(pkt);
1344 	struct iwl_mvm_loc_entry *entry;
1345 	const u8 *ies, *lci, *civic, *msr_ie;
1346 	size_t ies_len, lci_len = 0, civic_len = 0;
1347 	size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1348 			 sizeof(mgmt->u.action.u.ftm);
1349 	static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1350 	static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1351 
1352 	if (len <= baselen)
1353 		return;
1354 
1355 	lockdep_assert_held(&mvm->mutex);
1356 
1357 	ies = mgmt->u.action.u.ftm.variable;
1358 	ies_len = len - baselen;
1359 
1360 	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1361 					&rprt_type_lci, 1, 4);
1362 	if (msr_ie) {
1363 		lci = msr_ie + 2;
1364 		lci_len = msr_ie[1];
1365 	}
1366 
1367 	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1368 					&rprt_type_civic, 1, 4);
1369 	if (msr_ie) {
1370 		civic = msr_ie + 2;
1371 		civic_len = msr_ie[1];
1372 	}
1373 
1374 	entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1375 	if (!entry)
1376 		return;
1377 
1378 	memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1379 
1380 	entry->lci_len = lci_len;
1381 	if (lci_len)
1382 		memcpy(entry->buf, lci, lci_len);
1383 
1384 	entry->civic_len = civic_len;
1385 	if (civic_len)
1386 		memcpy(entry->buf + lci_len, civic, civic_len);
1387 
1388 	list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1389 }
1390