1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2021 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 struct iwl_mvm_ftm_pasn_entry { 29 struct list_head list; 30 u8 addr[ETH_ALEN]; 31 u8 hltk[HLTK_11AZ_LEN]; 32 u8 tk[TK_11AZ_LEN]; 33 u8 cipher; 34 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 35 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 36 }; 37 38 int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 39 u8 *addr, u32 cipher, u8 *tk, u32 tk_len, 40 u8 *hltk, u32 hltk_len) 41 { 42 struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), 43 GFP_KERNEL); 44 u32 expected_tk_len; 45 46 lockdep_assert_held(&mvm->mutex); 47 48 if (!pasn) 49 return -ENOBUFS; 50 51 pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); 52 53 switch (pasn->cipher) { 54 case IWL_LOCATION_CIPHER_CCMP_128: 55 case IWL_LOCATION_CIPHER_GCMP_128: 56 expected_tk_len = WLAN_KEY_LEN_CCMP; 57 break; 58 case IWL_LOCATION_CIPHER_GCMP_256: 59 expected_tk_len = WLAN_KEY_LEN_GCMP_256; 60 break; 61 default: 62 goto out; 63 } 64 65 /* 66 * If associated to this AP and already have security context, 67 * the TK is already configured for this station, so it 68 * shouldn't be set again here. 69 */ 70 if (vif->bss_conf.assoc && 71 !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) { 72 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 73 struct ieee80211_sta *sta; 74 75 rcu_read_lock(); 76 sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); 77 if (!IS_ERR_OR_NULL(sta) && sta->mfp) 78 expected_tk_len = 0; 79 rcu_read_unlock(); 80 } 81 82 if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) { 83 IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", 84 tk_len, hltk_len); 85 goto out; 86 } 87 88 memcpy(pasn->addr, addr, sizeof(pasn->addr)); 89 memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); 90 91 if (tk && tk_len) 92 memcpy(pasn->tk, tk, sizeof(pasn->tk)); 93 94 list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); 95 return 0; 96 out: 97 kfree(pasn); 98 return -EINVAL; 99 } 100 101 void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) 102 { 103 struct iwl_mvm_ftm_pasn_entry *entry, *prev; 104 105 lockdep_assert_held(&mvm->mutex); 106 107 list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, 108 list) { 109 if (memcmp(entry->addr, addr, sizeof(entry->addr))) 110 continue; 111 112 list_del(&entry->list); 113 kfree(entry); 114 return; 115 } 116 } 117 118 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 119 { 120 struct iwl_mvm_loc_entry *e, *t; 121 122 mvm->ftm_initiator.req = NULL; 123 mvm->ftm_initiator.req_wdev = NULL; 124 memset(mvm->ftm_initiator.responses, 0, 125 sizeof(mvm->ftm_initiator.responses)); 126 127 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 128 list_del(&e->list); 129 kfree(e); 130 } 131 } 132 133 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 134 { 135 struct cfg80211_pmsr_result result = { 136 .status = NL80211_PMSR_STATUS_FAILURE, 137 .final = 1, 138 .host_time = ktime_get_boottime_ns(), 139 .type = NL80211_PMSR_TYPE_FTM, 140 }; 141 int i; 142 143 lockdep_assert_held(&mvm->mutex); 144 145 if (!mvm->ftm_initiator.req) 146 return; 147 148 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 149 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 150 ETH_ALEN); 151 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 152 153 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 154 mvm->ftm_initiator.req, 155 &result, GFP_KERNEL); 156 } 157 158 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 159 mvm->ftm_initiator.req, GFP_KERNEL); 160 iwl_mvm_ftm_reset(mvm); 161 } 162 163 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 164 { 165 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 166 167 IWL_DEBUG_INFO(mvm, 168 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 169 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 170 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 171 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 172 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 173 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 174 } 175 176 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 177 { 178 struct iwl_mvm_smooth_entry *se, *st; 179 180 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 181 list) { 182 list_del(&se->list); 183 kfree(se); 184 } 185 } 186 187 static int 188 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 189 { 190 switch (s) { 191 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 192 return 0; 193 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 194 return -EBUSY; 195 default: 196 WARN_ON_ONCE(1); 197 return -EIO; 198 } 199 } 200 201 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 202 struct iwl_tof_range_req_cmd_v5 *cmd, 203 struct cfg80211_pmsr_request *req) 204 { 205 int i; 206 207 cmd->request_id = req->cookie; 208 cmd->num_of_ap = req->n_peers; 209 210 /* use maximum for "no timeout" or bigger than what we can do */ 211 if (!req->timeout || req->timeout > 255 * 100) 212 cmd->req_timeout = 255; 213 else 214 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 215 216 /* 217 * We treat it always as random, since if not we'll 218 * have filled our local address there instead. 219 */ 220 cmd->macaddr_random = 1; 221 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 222 for (i = 0; i < ETH_ALEN; i++) 223 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 224 225 if (vif->bss_conf.assoc) 226 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 227 else 228 eth_broadcast_addr(cmd->range_req_bssid); 229 } 230 231 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 232 struct ieee80211_vif *vif, 233 struct iwl_tof_range_req_cmd_v9 *cmd, 234 struct cfg80211_pmsr_request *req) 235 { 236 int i; 237 238 cmd->initiator_flags = 239 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 240 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 241 cmd->request_id = req->cookie; 242 cmd->num_of_ap = req->n_peers; 243 244 /* 245 * Use a large value for "no timeout". Don't use the maximum value 246 * because of fw limitations. 247 */ 248 if (req->timeout) 249 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 250 else 251 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 252 253 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 254 for (i = 0; i < ETH_ALEN; i++) 255 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 256 257 if (vif->bss_conf.assoc) { 258 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 259 260 /* AP's TSF is only relevant if associated */ 261 for (i = 0; i < req->n_peers; i++) { 262 if (req->peers[i].report_ap_tsf) { 263 struct iwl_mvm_vif *mvmvif = 264 iwl_mvm_vif_from_mac80211(vif); 265 266 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 267 return; 268 } 269 } 270 } else { 271 eth_broadcast_addr(cmd->range_req_bssid); 272 } 273 274 /* Don't report AP's TSF */ 275 cmd->tsf_mac_id = cpu_to_le32(0xff); 276 } 277 278 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 279 struct iwl_tof_range_req_cmd_v8 *cmd, 280 struct cfg80211_pmsr_request *req) 281 { 282 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 283 } 284 285 static int 286 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 287 struct cfg80211_pmsr_request_peer *peer, 288 u8 *channel, u8 *bandwidth, 289 u8 *ctrl_ch_position) 290 { 291 u32 freq = peer->chandef.chan->center_freq; 292 293 *channel = ieee80211_frequency_to_channel(freq); 294 295 switch (peer->chandef.width) { 296 case NL80211_CHAN_WIDTH_20_NOHT: 297 *bandwidth = IWL_TOF_BW_20_LEGACY; 298 break; 299 case NL80211_CHAN_WIDTH_20: 300 *bandwidth = IWL_TOF_BW_20_HT; 301 break; 302 case NL80211_CHAN_WIDTH_40: 303 *bandwidth = IWL_TOF_BW_40; 304 break; 305 case NL80211_CHAN_WIDTH_80: 306 *bandwidth = IWL_TOF_BW_80; 307 break; 308 default: 309 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 310 peer->chandef.width); 311 return -EINVAL; 312 } 313 314 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 315 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 316 317 return 0; 318 } 319 320 static int 321 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 322 struct cfg80211_pmsr_request_peer *peer, 323 u8 *channel, u8 *format_bw, 324 u8 *ctrl_ch_position) 325 { 326 u32 freq = peer->chandef.chan->center_freq; 327 u8 cmd_ver; 328 329 *channel = ieee80211_frequency_to_channel(freq); 330 331 switch (peer->chandef.width) { 332 case NL80211_CHAN_WIDTH_20_NOHT: 333 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 334 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 335 break; 336 case NL80211_CHAN_WIDTH_20: 337 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 338 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 339 break; 340 case NL80211_CHAN_WIDTH_40: 341 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 342 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 343 break; 344 case NL80211_CHAN_WIDTH_80: 345 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 346 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 347 break; 348 case NL80211_CHAN_WIDTH_160: 349 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, 350 TOF_RANGE_REQ_CMD, 351 IWL_FW_CMD_VER_UNKNOWN); 352 353 if (cmd_ver >= 13) { 354 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 355 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 356 break; 357 } 358 fallthrough; 359 default: 360 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 361 peer->chandef.width); 362 return -EINVAL; 363 } 364 365 /* non EDCA based measurement must use HE preamble */ 366 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 367 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 368 369 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 370 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 371 372 return 0; 373 } 374 375 static int 376 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 377 struct cfg80211_pmsr_request_peer *peer, 378 struct iwl_tof_range_req_ap_entry_v2 *target) 379 { 380 int ret; 381 382 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 383 &target->bandwidth, 384 &target->ctrl_ch_position); 385 if (ret) 386 return ret; 387 388 memcpy(target->bssid, peer->addr, ETH_ALEN); 389 target->burst_period = 390 cpu_to_le16(peer->ftm.burst_period); 391 target->samples_per_burst = peer->ftm.ftms_per_burst; 392 target->num_of_bursts = peer->ftm.num_bursts_exp; 393 target->measure_type = 0; /* regular two-sided FTM */ 394 target->retries_per_sample = peer->ftm.ftmr_retries; 395 target->asap_mode = peer->ftm.asap; 396 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 397 398 if (peer->ftm.request_lci) 399 target->location_req |= IWL_TOF_LOC_LCI; 400 if (peer->ftm.request_civicloc) 401 target->location_req |= IWL_TOF_LOC_CIVIC; 402 403 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 404 405 return 0; 406 } 407 408 #define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ 409 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 410 411 static void 412 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 413 struct cfg80211_pmsr_request_peer *peer, 414 struct iwl_tof_range_req_ap_entry_v6 *target) 415 { 416 memcpy(target->bssid, peer->addr, ETH_ALEN); 417 target->burst_period = 418 cpu_to_le16(peer->ftm.burst_period); 419 target->samples_per_burst = peer->ftm.ftms_per_burst; 420 target->num_of_bursts = peer->ftm.num_bursts_exp; 421 target->ftmr_max_retries = peer->ftm.ftmr_retries; 422 target->initiator_ap_flags = cpu_to_le32(0); 423 424 if (peer->ftm.asap) 425 FTM_PUT_FLAG(ASAP); 426 427 if (peer->ftm.request_lci) 428 FTM_PUT_FLAG(LCI_REQUEST); 429 430 if (peer->ftm.request_civicloc) 431 FTM_PUT_FLAG(CIVIC_REQUEST); 432 433 if (IWL_MVM_FTM_INITIATOR_DYNACK) 434 FTM_PUT_FLAG(DYN_ACK); 435 436 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 437 FTM_PUT_FLAG(ALGO_LR); 438 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 439 FTM_PUT_FLAG(ALGO_FFT); 440 441 if (peer->ftm.trigger_based) 442 FTM_PUT_FLAG(TB); 443 else if (peer->ftm.non_trigger_based) 444 FTM_PUT_FLAG(NON_TB); 445 446 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 447 peer->ftm.lmr_feedback) 448 FTM_PUT_FLAG(LMR_FEEDBACK); 449 } 450 451 static int 452 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 453 struct cfg80211_pmsr_request_peer *peer, 454 struct iwl_tof_range_req_ap_entry_v3 *target) 455 { 456 int ret; 457 458 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 459 &target->bandwidth, 460 &target->ctrl_ch_position); 461 if (ret) 462 return ret; 463 464 /* 465 * Versions 3 and 4 has some common fields, so 466 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 467 */ 468 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 469 470 return 0; 471 } 472 473 static int 474 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 475 struct cfg80211_pmsr_request_peer *peer, 476 struct iwl_tof_range_req_ap_entry_v4 *target) 477 { 478 int ret; 479 480 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 481 &target->format_bw, 482 &target->ctrl_ch_position); 483 if (ret) 484 return ret; 485 486 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 487 488 return 0; 489 } 490 491 static int 492 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 493 struct cfg80211_pmsr_request_peer *peer, 494 struct iwl_tof_range_req_ap_entry_v6 *target) 495 { 496 int ret; 497 498 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 499 &target->format_bw, 500 &target->ctrl_ch_position); 501 if (ret) 502 return ret; 503 504 iwl_mvm_ftm_put_target_common(mvm, peer, target); 505 506 if (vif->bss_conf.assoc && 507 !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { 508 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 509 struct ieee80211_sta *sta; 510 511 rcu_read_lock(); 512 513 sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); 514 if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) 515 FTM_PUT_FLAG(PMF); 516 517 rcu_read_unlock(); 518 519 target->sta_id = mvmvif->ap_sta_id; 520 } else { 521 target->sta_id = IWL_MVM_INVALID_STA; 522 } 523 524 /* 525 * TODO: Beacon interval is currently unknown, so use the common value 526 * of 100 TUs. 527 */ 528 target->beacon_interval = cpu_to_le16(100); 529 return 0; 530 } 531 532 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 533 { 534 u32 status; 535 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 536 537 if (!err && status) { 538 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 539 status); 540 err = iwl_ftm_range_request_status_to_err(status); 541 } 542 543 return err; 544 } 545 546 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 547 struct cfg80211_pmsr_request *req) 548 { 549 struct iwl_tof_range_req_cmd_v5 cmd_v5; 550 struct iwl_host_cmd hcmd = { 551 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 552 .dataflags[0] = IWL_HCMD_DFL_DUP, 553 .data[0] = &cmd_v5, 554 .len[0] = sizeof(cmd_v5), 555 }; 556 u8 i; 557 int err; 558 559 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 560 561 for (i = 0; i < cmd_v5.num_of_ap; i++) { 562 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 563 564 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 565 if (err) 566 return err; 567 } 568 569 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 570 } 571 572 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 573 struct cfg80211_pmsr_request *req) 574 { 575 struct iwl_tof_range_req_cmd_v7 cmd_v7; 576 struct iwl_host_cmd hcmd = { 577 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 578 .dataflags[0] = IWL_HCMD_DFL_DUP, 579 .data[0] = &cmd_v7, 580 .len[0] = sizeof(cmd_v7), 581 }; 582 u8 i; 583 int err; 584 585 /* 586 * Versions 7 and 8 has the same structure except from the responders 587 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 588 */ 589 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 590 591 for (i = 0; i < cmd_v7.num_of_ap; i++) { 592 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 593 594 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 595 if (err) 596 return err; 597 } 598 599 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 600 } 601 602 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 603 struct cfg80211_pmsr_request *req) 604 { 605 struct iwl_tof_range_req_cmd_v8 cmd; 606 struct iwl_host_cmd hcmd = { 607 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 608 .dataflags[0] = IWL_HCMD_DFL_DUP, 609 .data[0] = &cmd, 610 .len[0] = sizeof(cmd), 611 }; 612 u8 i; 613 int err; 614 615 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 616 617 for (i = 0; i < cmd.num_of_ap; i++) { 618 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 619 620 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 621 if (err) 622 return err; 623 } 624 625 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 626 } 627 628 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 629 struct cfg80211_pmsr_request *req) 630 { 631 struct iwl_tof_range_req_cmd_v9 cmd; 632 struct iwl_host_cmd hcmd = { 633 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 634 .dataflags[0] = IWL_HCMD_DFL_DUP, 635 .data[0] = &cmd, 636 .len[0] = sizeof(cmd), 637 }; 638 u8 i; 639 int err; 640 641 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 642 643 for (i = 0; i < cmd.num_of_ap; i++) { 644 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 645 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 646 647 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 648 if (err) 649 return err; 650 } 651 652 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 653 } 654 655 static void iter(struct ieee80211_hw *hw, 656 struct ieee80211_vif *vif, 657 struct ieee80211_sta *sta, 658 struct ieee80211_key_conf *key, 659 void *data) 660 { 661 struct iwl_tof_range_req_ap_entry_v6 *target = data; 662 663 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 664 return; 665 666 WARN_ON(!sta->mfp); 667 668 if (WARN_ON(key->keylen > sizeof(target->tk))) 669 return; 670 671 memcpy(target->tk, key->key, key->keylen); 672 target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 673 WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); 674 } 675 676 static void 677 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 678 struct iwl_tof_range_req_ap_entry_v7 *target) 679 { 680 struct iwl_mvm_ftm_pasn_entry *entry; 681 u32 flags = le32_to_cpu(target->initiator_ap_flags); 682 683 if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | 684 IWL_INITIATOR_AP_FLAGS_TB))) 685 return; 686 687 lockdep_assert_held(&mvm->mutex); 688 689 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 690 if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) 691 continue; 692 693 target->cipher = entry->cipher; 694 memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); 695 696 if (vif->bss_conf.assoc && 697 !memcmp(vif->bss_conf.bssid, target->bssid, 698 sizeof(target->bssid))) 699 ieee80211_iter_keys(mvm->hw, vif, iter, target); 700 else 701 memcpy(target->tk, entry->tk, sizeof(target->tk)); 702 703 memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); 704 memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); 705 706 target->initiator_ap_flags |= 707 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); 708 return; 709 } 710 } 711 712 static int 713 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 714 struct cfg80211_pmsr_request_peer *peer, 715 struct iwl_tof_range_req_ap_entry_v7 *target) 716 { 717 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 718 if (err) 719 return err; 720 721 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); 722 return err; 723 } 724 725 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 726 struct ieee80211_vif *vif, 727 struct cfg80211_pmsr_request *req) 728 { 729 struct iwl_tof_range_req_cmd_v11 cmd; 730 struct iwl_host_cmd hcmd = { 731 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 732 .dataflags[0] = IWL_HCMD_DFL_DUP, 733 .data[0] = &cmd, 734 .len[0] = sizeof(cmd), 735 }; 736 u8 i; 737 int err; 738 739 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 740 741 for (i = 0; i < cmd.num_of_ap; i++) { 742 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 743 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 744 745 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 746 if (err) 747 return err; 748 } 749 750 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 751 } 752 753 static void 754 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 755 struct iwl_tof_range_req_ap_entry_v8 *target) 756 { 757 /* Only 2 STS are supported on Tx */ 758 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 759 IWL_MVM_FTM_I2R_MAX_STS; 760 761 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 762 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 763 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 764 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 765 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 766 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 767 } 768 769 static int 770 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 771 struct cfg80211_pmsr_request_peer *peer, 772 struct iwl_tof_range_req_ap_entry_v8 *target) 773 { 774 u32 flags; 775 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 776 777 if (ret) 778 return ret; 779 780 iwl_mvm_ftm_set_ndp_params(mvm, target); 781 782 /* 783 * If secure LTF is turned off, replace the flag with PMF only 784 */ 785 flags = le32_to_cpu(target->initiator_ap_flags); 786 if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) && 787 !IWL_MVM_FTM_INITIATOR_SECURE_LTF) { 788 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 789 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 790 target->initiator_ap_flags = cpu_to_le32(flags); 791 } 792 793 return 0; 794 } 795 796 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 797 struct ieee80211_vif *vif, 798 struct cfg80211_pmsr_request *req) 799 { 800 struct iwl_tof_range_req_cmd_v12 cmd; 801 struct iwl_host_cmd hcmd = { 802 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 803 .dataflags[0] = IWL_HCMD_DFL_DUP, 804 .data[0] = &cmd, 805 .len[0] = sizeof(cmd), 806 }; 807 u8 i; 808 int err; 809 810 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 811 812 for (i = 0; i < cmd.num_of_ap; i++) { 813 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 814 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 815 816 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 817 if (err) 818 return err; 819 } 820 821 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 822 } 823 824 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 825 struct ieee80211_vif *vif, 826 struct cfg80211_pmsr_request *req) 827 { 828 struct iwl_tof_range_req_cmd_v13 cmd; 829 struct iwl_host_cmd hcmd = { 830 .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), 831 .dataflags[0] = IWL_HCMD_DFL_DUP, 832 .data[0] = &cmd, 833 .len[0] = sizeof(cmd), 834 }; 835 u8 i; 836 int err; 837 838 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 839 840 for (i = 0; i < cmd.num_of_ap; i++) { 841 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 842 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 843 844 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 845 if (err) 846 return err; 847 848 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 849 target->bss_color = peer->ftm.bss_color; 850 851 if (peer->ftm.non_trigger_based) { 852 target->min_time_between_msr = 853 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 854 target->burst_period = 855 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 856 } else { 857 target->min_time_between_msr = cpu_to_le16(0); 858 } 859 860 target->band = 861 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 862 } 863 864 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 865 } 866 867 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 868 struct cfg80211_pmsr_request *req) 869 { 870 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 871 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 872 int err; 873 874 lockdep_assert_held(&mvm->mutex); 875 876 if (mvm->ftm_initiator.req) 877 return -EBUSY; 878 879 if (new_api) { 880 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, 881 TOF_RANGE_REQ_CMD, 882 IWL_FW_CMD_VER_UNKNOWN); 883 884 switch (cmd_ver) { 885 case 13: 886 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 887 break; 888 case 12: 889 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 890 break; 891 case 11: 892 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 893 break; 894 case 9: 895 case 10: 896 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 897 break; 898 case 8: 899 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 900 break; 901 default: 902 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 903 break; 904 } 905 } else { 906 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 907 } 908 909 if (!err) { 910 mvm->ftm_initiator.req = req; 911 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 912 } 913 914 return err; 915 } 916 917 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 918 { 919 struct iwl_tof_range_abort_cmd cmd = { 920 .request_id = req->cookie, 921 }; 922 923 lockdep_assert_held(&mvm->mutex); 924 925 if (req != mvm->ftm_initiator.req) 926 return; 927 928 iwl_mvm_ftm_reset(mvm); 929 930 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, 931 LOCATION_GROUP, 0), 932 0, sizeof(cmd), &cmd)) 933 IWL_ERR(mvm, "failed to abort FTM process\n"); 934 } 935 936 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 937 const u8 *addr) 938 { 939 int i; 940 941 for (i = 0; i < req->n_peers; i++) { 942 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 943 944 if (ether_addr_equal_unaligned(peer->addr, addr)) 945 return i; 946 } 947 948 return -ENOENT; 949 } 950 951 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 952 { 953 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 954 u32 curr_gp2, diff; 955 u64 now_from_boot_ns; 956 957 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 958 &now_from_boot_ns, NULL); 959 960 if (curr_gp2 >= gp2_ts) 961 diff = curr_gp2 - gp2_ts; 962 else 963 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 964 965 return now_from_boot_ns - (u64)diff * 1000; 966 } 967 968 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 969 struct cfg80211_pmsr_result *res) 970 { 971 struct iwl_mvm_loc_entry *entry; 972 973 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 974 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 975 continue; 976 977 if (entry->lci_len) { 978 res->ftm.lci_len = entry->lci_len; 979 res->ftm.lci = entry->buf; 980 } 981 982 if (entry->civic_len) { 983 res->ftm.civicloc_len = entry->civic_len; 984 res->ftm.civicloc = entry->buf + entry->lci_len; 985 } 986 987 /* we found the entry we needed */ 988 break; 989 } 990 } 991 992 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 993 u8 num_of_aps) 994 { 995 lockdep_assert_held(&mvm->mutex); 996 997 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 998 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 999 request_id, (u8)mvm->ftm_initiator.req->cookie); 1000 return -EINVAL; 1001 } 1002 1003 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1004 IWL_ERR(mvm, "FTM range response invalid\n"); 1005 return -EINVAL; 1006 } 1007 1008 return 0; 1009 } 1010 1011 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1012 struct cfg80211_pmsr_result *res) 1013 { 1014 struct iwl_mvm_smooth_entry *resp; 1015 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1016 u32 undershoot, overshoot; 1017 u8 alpha; 1018 bool found; 1019 1020 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1021 return; 1022 1023 WARN_ON(rtt < 0); 1024 1025 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1026 IWL_DEBUG_INFO(mvm, 1027 ": %pM: ignore failed measurement. Status=%u\n", 1028 res->addr, res->status); 1029 return; 1030 } 1031 1032 found = false; 1033 list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) { 1034 if (!memcmp(res->addr, resp->addr, ETH_ALEN)) { 1035 found = true; 1036 break; 1037 } 1038 } 1039 1040 if (!found) { 1041 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1042 if (!resp) 1043 return; 1044 1045 memcpy(resp->addr, res->addr, ETH_ALEN); 1046 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1047 1048 resp->rtt_avg = rtt; 1049 1050 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1051 resp->addr, resp->rtt_avg); 1052 goto update_time; 1053 } 1054 1055 if (res->host_time - resp->host_time > 1056 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1057 resp->rtt_avg = rtt; 1058 1059 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1060 resp->addr, resp->rtt_avg); 1061 goto update_time; 1062 } 1063 1064 /* Smooth the results based on the tracked RTT average */ 1065 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1066 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1067 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1068 1069 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1070 1071 IWL_DEBUG_INFO(mvm, 1072 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1073 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1074 1075 /* 1076 * update the responder's average RTT results regardless of 1077 * the under/over shoot logic below 1078 */ 1079 resp->rtt_avg = rtt_avg; 1080 1081 /* smooth the results */ 1082 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1083 res->ftm.rtt_avg = rtt_avg; 1084 1085 IWL_DEBUG_INFO(mvm, 1086 "undershoot: val=%lld\n", 1087 (rtt_avg - rtt)); 1088 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1089 overshoot) { 1090 res->ftm.rtt_avg = rtt_avg; 1091 IWL_DEBUG_INFO(mvm, 1092 "overshoot: val=%lld\n", 1093 (rtt - rtt_avg)); 1094 } 1095 1096 update_time: 1097 resp->host_time = res->host_time; 1098 } 1099 1100 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1101 struct cfg80211_pmsr_result *res) 1102 { 1103 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1104 1105 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1106 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1107 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1108 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1109 IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); 1110 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1111 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1112 IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); 1113 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1114 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1115 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1116 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1117 } 1118 1119 static void 1120 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1121 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) 1122 { 1123 struct iwl_mvm_ftm_pasn_entry *entry; 1124 1125 lockdep_assert_held(&mvm->mutex); 1126 1127 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1128 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1129 continue; 1130 1131 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1132 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1133 return; 1134 } 1135 } 1136 1137 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1138 { 1139 if (!fw_has_api(&mvm->fw->ucode_capa, 1140 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1141 return 5; 1142 1143 /* Starting from version 8, the FW advertises the version */ 1144 if (mvm->cmd_ver.range_resp >= 8) 1145 return mvm->cmd_ver.range_resp; 1146 else if (fw_has_api(&mvm->fw->ucode_capa, 1147 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1148 return 7; 1149 1150 /* The first version of the new range request API */ 1151 return 6; 1152 } 1153 1154 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1155 { 1156 switch (ver) { 1157 case 9: 1158 case 8: 1159 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); 1160 case 7: 1161 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1162 case 6: 1163 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1164 case 5: 1165 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1166 default: 1167 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1168 return false; 1169 } 1170 } 1171 1172 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1173 { 1174 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1175 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1176 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1177 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1178 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1179 struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; 1180 int i; 1181 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1182 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1183 u8 num_of_aps, last_in_batch; 1184 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1185 1186 lockdep_assert_held(&mvm->mutex); 1187 1188 if (!mvm->ftm_initiator.req) { 1189 return; 1190 } 1191 1192 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1193 return; 1194 1195 if (new_api) { 1196 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1197 fw_resp_v8->num_of_aps)) 1198 return; 1199 1200 num_of_aps = fw_resp_v8->num_of_aps; 1201 last_in_batch = fw_resp_v8->last_report; 1202 } else { 1203 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1204 fw_resp_v5->num_of_aps)) 1205 return; 1206 1207 num_of_aps = fw_resp_v5->num_of_aps; 1208 last_in_batch = fw_resp_v5->last_in_batch; 1209 } 1210 1211 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1212 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", 1213 mvm->ftm_initiator.req->cookie, num_of_aps); 1214 1215 for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { 1216 struct cfg80211_pmsr_result result = {}; 1217 struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; 1218 int peer_idx; 1219 1220 if (new_api) { 1221 if (notif_ver >= 8) { 1222 fw_ap = &fw_resp_v8->ap[i]; 1223 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1224 } else if (notif_ver == 7) { 1225 fw_ap = (void *)&fw_resp_v7->ap[i]; 1226 } else { 1227 fw_ap = (void *)&fw_resp_v6->ap[i]; 1228 } 1229 1230 result.final = fw_ap->last_burst; 1231 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1232 result.ap_tsf_valid = 1; 1233 } else { 1234 /* the first part is the same for old and new APIs */ 1235 fw_ap = (void *)&fw_resp_v5->ap[i]; 1236 /* 1237 * FIXME: the firmware needs to report this, we don't 1238 * even know the number of bursts the responder picked 1239 * (if we asked it to) 1240 */ 1241 result.final = 0; 1242 } 1243 1244 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1245 fw_ap->bssid); 1246 if (peer_idx < 0) { 1247 IWL_WARN(mvm, 1248 "Unknown address (%pM, target #%d) in FTM response\n", 1249 fw_ap->bssid, i); 1250 continue; 1251 } 1252 1253 switch (fw_ap->measure_status) { 1254 case IWL_TOF_ENTRY_SUCCESS: 1255 result.status = NL80211_PMSR_STATUS_SUCCESS; 1256 break; 1257 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1258 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1259 break; 1260 case IWL_TOF_ENTRY_NO_RESPONSE: 1261 result.status = NL80211_PMSR_STATUS_FAILURE; 1262 result.ftm.failure_reason = 1263 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1264 break; 1265 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1266 result.status = NL80211_PMSR_STATUS_FAILURE; 1267 result.ftm.failure_reason = 1268 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1269 result.ftm.busy_retry_time = fw_ap->refusal_period; 1270 break; 1271 default: 1272 result.status = NL80211_PMSR_STATUS_FAILURE; 1273 result.ftm.failure_reason = 1274 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1275 break; 1276 } 1277 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1278 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1279 fw_ap->timestamp); 1280 result.type = NL80211_PMSR_TYPE_FTM; 1281 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1282 mvm->ftm_initiator.responses[peer_idx]++; 1283 result.ftm.rssi_avg = fw_ap->rssi; 1284 result.ftm.rssi_avg_valid = 1; 1285 result.ftm.rssi_spread = fw_ap->rssi_spread; 1286 result.ftm.rssi_spread_valid = 1; 1287 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1288 result.ftm.rtt_avg_valid = 1; 1289 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1290 result.ftm.rtt_variance_valid = 1; 1291 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1292 result.ftm.rtt_spread_valid = 1; 1293 1294 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1295 1296 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1297 1298 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1299 mvm->ftm_initiator.req, 1300 &result, GFP_KERNEL); 1301 1302 if (fw_has_api(&mvm->fw->ucode_capa, 1303 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1304 IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n", 1305 fw_ap->rttConfidence); 1306 1307 iwl_mvm_debug_range_resp(mvm, i, &result); 1308 } 1309 1310 if (last_in_batch) { 1311 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1312 mvm->ftm_initiator.req, 1313 GFP_KERNEL); 1314 iwl_mvm_ftm_reset(mvm); 1315 } 1316 } 1317 1318 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1319 { 1320 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1321 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1322 size_t len = iwl_rx_packet_payload_len(pkt); 1323 struct iwl_mvm_loc_entry *entry; 1324 const u8 *ies, *lci, *civic, *msr_ie; 1325 size_t ies_len, lci_len = 0, civic_len = 0; 1326 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1327 sizeof(mgmt->u.action.u.ftm); 1328 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1329 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1330 1331 if (len <= baselen) 1332 return; 1333 1334 lockdep_assert_held(&mvm->mutex); 1335 1336 ies = mgmt->u.action.u.ftm.variable; 1337 ies_len = len - baselen; 1338 1339 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1340 &rprt_type_lci, 1, 4); 1341 if (msr_ie) { 1342 lci = msr_ie + 2; 1343 lci_len = msr_ie[1]; 1344 } 1345 1346 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1347 &rprt_type_civic, 1, 4); 1348 if (msr_ie) { 1349 civic = msr_ie + 2; 1350 civic_len = msr_ie[1]; 1351 } 1352 1353 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1354 if (!entry) 1355 return; 1356 1357 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1358 1359 entry->lci_len = lci_len; 1360 if (lci_len) 1361 memcpy(entry->buf, lci, lci_len); 1362 1363 entry->civic_len = civic_len; 1364 if (civic_len) 1365 memcpy(entry->buf + lci_len, civic, civic_len); 1366 1367 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1368 } 1369