1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2018 - 2021 Intel Corporation 4 */ 5 #ifndef __PMSR_H 6 #define __PMSR_H 7 #include <net/cfg80211.h> 8 #include "core.h" 9 #include "nl80211.h" 10 #include "rdev-ops.h" 11 12 static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, 13 struct nlattr *ftmreq, 14 struct cfg80211_pmsr_request_peer *out, 15 struct genl_info *info) 16 { 17 const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa; 18 struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1]; 19 u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */ 20 21 /* validate existing data */ 22 if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) { 23 NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth"); 24 return -EINVAL; 25 } 26 27 /* no validation needed - was already done via nested policy */ 28 nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq, 29 NULL, NULL); 30 31 if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) 32 preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]); 33 34 /* set up values - struct is 0-initialized */ 35 out->ftm.requested = true; 36 37 switch (out->chandef.chan->band) { 38 case NL80211_BAND_60GHZ: 39 /* optional */ 40 break; 41 default: 42 if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) { 43 NL_SET_ERR_MSG(info->extack, 44 "FTM: must specify preamble"); 45 return -EINVAL; 46 } 47 } 48 49 if (!(capa->ftm.preambles & BIT(preamble))) { 50 NL_SET_ERR_MSG_ATTR(info->extack, 51 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE], 52 "FTM: invalid preamble"); 53 return -EINVAL; 54 } 55 56 out->ftm.preamble = preamble; 57 58 out->ftm.burst_period = 0; 59 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]) 60 out->ftm.burst_period = 61 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]); 62 63 out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP]; 64 if (out->ftm.asap && !capa->ftm.asap) { 65 NL_SET_ERR_MSG_ATTR(info->extack, 66 tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP], 67 "FTM: ASAP mode not supported"); 68 return -EINVAL; 69 } 70 71 if (!out->ftm.asap && !capa->ftm.non_asap) { 72 NL_SET_ERR_MSG(info->extack, 73 "FTM: non-ASAP mode not supported"); 74 return -EINVAL; 75 } 76 77 out->ftm.num_bursts_exp = 0; 78 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]) 79 out->ftm.num_bursts_exp = 80 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]); 81 82 if (capa->ftm.max_bursts_exponent >= 0 && 83 out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) { 84 NL_SET_ERR_MSG_ATTR(info->extack, 85 tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP], 86 "FTM: max NUM_BURSTS_EXP must be set lower than the device limit"); 87 return -EINVAL; 88 } 89 90 out->ftm.burst_duration = 15; 91 if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]) 92 out->ftm.burst_duration = 93 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]); 94 95 out->ftm.ftms_per_burst = 0; 96 if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]) 97 out->ftm.ftms_per_burst = 98 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]); 99 100 if (capa->ftm.max_ftms_per_burst && 101 (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst || 102 out->ftm.ftms_per_burst == 0)) { 103 NL_SET_ERR_MSG_ATTR(info->extack, 104 tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST], 105 "FTM: FTMs per burst must be set lower than the device limit but non-zero"); 106 return -EINVAL; 107 } 108 109 out->ftm.ftmr_retries = 3; 110 if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]) 111 out->ftm.ftmr_retries = 112 nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]); 113 114 out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI]; 115 if (out->ftm.request_lci && !capa->ftm.request_lci) { 116 NL_SET_ERR_MSG_ATTR(info->extack, 117 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI], 118 "FTM: LCI request not supported"); 119 } 120 121 out->ftm.request_civicloc = 122 !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC]; 123 if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) { 124 NL_SET_ERR_MSG_ATTR(info->extack, 125 tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC], 126 "FTM: civic location request not supported"); 127 } 128 129 out->ftm.trigger_based = 130 !!tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED]; 131 if (out->ftm.trigger_based && !capa->ftm.trigger_based) { 132 NL_SET_ERR_MSG_ATTR(info->extack, 133 tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED], 134 "FTM: trigger based ranging is not supported"); 135 return -EINVAL; 136 } 137 138 out->ftm.non_trigger_based = 139 !!tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED]; 140 if (out->ftm.non_trigger_based && !capa->ftm.non_trigger_based) { 141 NL_SET_ERR_MSG_ATTR(info->extack, 142 tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED], 143 "FTM: trigger based ranging is not supported"); 144 return -EINVAL; 145 } 146 147 if (out->ftm.trigger_based && out->ftm.non_trigger_based) { 148 NL_SET_ERR_MSG(info->extack, 149 "FTM: can't set both trigger based and non trigger based"); 150 return -EINVAL; 151 } 152 153 if ((out->ftm.trigger_based || out->ftm.non_trigger_based) && 154 out->ftm.preamble != NL80211_PREAMBLE_HE) { 155 NL_SET_ERR_MSG_ATTR(info->extack, 156 tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE], 157 "FTM: non EDCA based ranging must use HE preamble"); 158 return -EINVAL; 159 } 160 161 out->ftm.lmr_feedback = 162 !!tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK]; 163 if (!out->ftm.trigger_based && !out->ftm.non_trigger_based && 164 out->ftm.lmr_feedback) { 165 NL_SET_ERR_MSG_ATTR(info->extack, 166 tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK], 167 "FTM: LMR feedback set for EDCA based ranging"); 168 return -EINVAL; 169 } 170 171 return 0; 172 } 173 174 static int pmsr_parse_peer(struct cfg80211_registered_device *rdev, 175 struct nlattr *peer, 176 struct cfg80211_pmsr_request_peer *out, 177 struct genl_info *info) 178 { 179 struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1]; 180 struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1]; 181 struct nlattr *treq; 182 int err, rem; 183 184 /* no validation needed - was already done via nested policy */ 185 nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, 186 NULL, NULL); 187 188 if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] || 189 !tb[NL80211_PMSR_PEER_ATTR_CHAN] || 190 !tb[NL80211_PMSR_PEER_ATTR_REQ]) { 191 NL_SET_ERR_MSG_ATTR(info->extack, peer, 192 "insufficient peer data"); 193 return -EINVAL; 194 } 195 196 memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN); 197 198 /* reuse info->attrs */ 199 memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1)); 200 err = nla_parse_nested_deprecated(info->attrs, NL80211_ATTR_MAX, 201 tb[NL80211_PMSR_PEER_ATTR_CHAN], 202 NULL, info->extack); 203 if (err) 204 return err; 205 206 err = nl80211_parse_chandef(rdev, info, &out->chandef); 207 if (err) 208 return err; 209 210 /* no validation needed - was already done via nested policy */ 211 nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX, 212 tb[NL80211_PMSR_PEER_ATTR_REQ], NULL, 213 NULL); 214 215 if (!req[NL80211_PMSR_REQ_ATTR_DATA]) { 216 NL_SET_ERR_MSG_ATTR(info->extack, 217 tb[NL80211_PMSR_PEER_ATTR_REQ], 218 "missing request type/data"); 219 return -EINVAL; 220 } 221 222 if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF]) 223 out->report_ap_tsf = true; 224 225 if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) { 226 NL_SET_ERR_MSG_ATTR(info->extack, 227 req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF], 228 "reporting AP TSF is not supported"); 229 return -EINVAL; 230 } 231 232 nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) { 233 switch (nla_type(treq)) { 234 case NL80211_PMSR_TYPE_FTM: 235 err = pmsr_parse_ftm(rdev, treq, out, info); 236 break; 237 default: 238 NL_SET_ERR_MSG_ATTR(info->extack, treq, 239 "unsupported measurement type"); 240 err = -EINVAL; 241 } 242 } 243 244 if (err) 245 return err; 246 247 return 0; 248 } 249 250 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) 251 { 252 struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS]; 253 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 254 struct wireless_dev *wdev = info->user_ptr[1]; 255 struct cfg80211_pmsr_request *req; 256 struct nlattr *peers, *peer; 257 int count, rem, err, idx; 258 259 if (!rdev->wiphy.pmsr_capa) 260 return -EOPNOTSUPP; 261 262 if (!reqattr) 263 return -EINVAL; 264 265 peers = nla_find(nla_data(reqattr), nla_len(reqattr), 266 NL80211_PMSR_ATTR_PEERS); 267 if (!peers) 268 return -EINVAL; 269 270 count = 0; 271 nla_for_each_nested(peer, peers, rem) { 272 count++; 273 274 if (count > rdev->wiphy.pmsr_capa->max_peers) { 275 NL_SET_ERR_MSG_ATTR(info->extack, peer, 276 "Too many peers used"); 277 return -EINVAL; 278 } 279 } 280 281 req = kzalloc(struct_size(req, peers, count), GFP_KERNEL); 282 if (!req) 283 return -ENOMEM; 284 285 if (info->attrs[NL80211_ATTR_TIMEOUT]) 286 req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]); 287 288 if (info->attrs[NL80211_ATTR_MAC]) { 289 if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) { 290 NL_SET_ERR_MSG_ATTR(info->extack, 291 info->attrs[NL80211_ATTR_MAC], 292 "device cannot randomize MAC address"); 293 err = -EINVAL; 294 goto out_err; 295 } 296 297 err = nl80211_parse_random_mac(info->attrs, req->mac_addr, 298 req->mac_addr_mask); 299 if (err) 300 goto out_err; 301 } else { 302 memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); 303 eth_broadcast_addr(req->mac_addr_mask); 304 } 305 306 idx = 0; 307 nla_for_each_nested(peer, peers, rem) { 308 /* NB: this reuses info->attrs, but we no longer need it */ 309 err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info); 310 if (err) 311 goto out_err; 312 idx++; 313 } 314 315 req->n_peers = count; 316 req->cookie = cfg80211_assign_cookie(rdev); 317 req->nl_portid = info->snd_portid; 318 319 err = rdev_start_pmsr(rdev, wdev, req); 320 if (err) 321 goto out_err; 322 323 list_add_tail(&req->list, &wdev->pmsr_list); 324 325 nl_set_extack_cookie_u64(info->extack, req->cookie); 326 return 0; 327 out_err: 328 kfree(req); 329 return err; 330 } 331 332 void cfg80211_pmsr_complete(struct wireless_dev *wdev, 333 struct cfg80211_pmsr_request *req, 334 gfp_t gfp) 335 { 336 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 337 struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL; 338 struct sk_buff *msg; 339 void *hdr; 340 341 trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie); 342 343 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 344 if (!msg) 345 goto free_request; 346 347 hdr = nl80211hdr_put(msg, 0, 0, 0, 348 NL80211_CMD_PEER_MEASUREMENT_COMPLETE); 349 if (!hdr) 350 goto free_msg; 351 352 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 353 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 354 NL80211_ATTR_PAD)) 355 goto free_msg; 356 357 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, 358 NL80211_ATTR_PAD)) 359 goto free_msg; 360 361 genlmsg_end(msg, hdr); 362 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); 363 goto free_request; 364 free_msg: 365 nlmsg_free(msg); 366 free_request: 367 spin_lock_bh(&wdev->pmsr_lock); 368 /* 369 * cfg80211_pmsr_process_abort() may have already moved this request 370 * to the free list, and will free it later. In this case, don't free 371 * it here. 372 */ 373 list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) { 374 if (tmp == req) { 375 list_del(&req->list); 376 to_free = req; 377 break; 378 } 379 } 380 spin_unlock_bh(&wdev->pmsr_lock); 381 kfree(to_free); 382 } 383 EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete); 384 385 static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg, 386 struct cfg80211_pmsr_result *res) 387 { 388 if (res->status == NL80211_PMSR_STATUS_FAILURE) { 389 if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON, 390 res->ftm.failure_reason)) 391 goto error; 392 393 if (res->ftm.failure_reason == 394 NL80211_PMSR_FTM_FAILURE_PEER_BUSY && 395 res->ftm.busy_retry_time && 396 nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME, 397 res->ftm.busy_retry_time)) 398 goto error; 399 400 return 0; 401 } 402 403 #define PUT(tp, attr, val) \ 404 do { \ 405 if (nla_put_##tp(msg, \ 406 NL80211_PMSR_FTM_RESP_ATTR_##attr, \ 407 res->ftm.val)) \ 408 goto error; \ 409 } while (0) 410 411 #define PUTOPT(tp, attr, val) \ 412 do { \ 413 if (res->ftm.val##_valid) \ 414 PUT(tp, attr, val); \ 415 } while (0) 416 417 #define PUT_U64(attr, val) \ 418 do { \ 419 if (nla_put_u64_64bit(msg, \ 420 NL80211_PMSR_FTM_RESP_ATTR_##attr,\ 421 res->ftm.val, \ 422 NL80211_PMSR_FTM_RESP_ATTR_PAD)) \ 423 goto error; \ 424 } while (0) 425 426 #define PUTOPT_U64(attr, val) \ 427 do { \ 428 if (res->ftm.val##_valid) \ 429 PUT_U64(attr, val); \ 430 } while (0) 431 432 if (res->ftm.burst_index >= 0) 433 PUT(u32, BURST_INDEX, burst_index); 434 PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts); 435 PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes); 436 PUT(u8, NUM_BURSTS_EXP, num_bursts_exp); 437 PUT(u8, BURST_DURATION, burst_duration); 438 PUT(u8, FTMS_PER_BURST, ftms_per_burst); 439 PUTOPT(s32, RSSI_AVG, rssi_avg); 440 PUTOPT(s32, RSSI_SPREAD, rssi_spread); 441 if (res->ftm.tx_rate_valid && 442 !nl80211_put_sta_rate(msg, &res->ftm.tx_rate, 443 NL80211_PMSR_FTM_RESP_ATTR_TX_RATE)) 444 goto error; 445 if (res->ftm.rx_rate_valid && 446 !nl80211_put_sta_rate(msg, &res->ftm.rx_rate, 447 NL80211_PMSR_FTM_RESP_ATTR_RX_RATE)) 448 goto error; 449 PUTOPT_U64(RTT_AVG, rtt_avg); 450 PUTOPT_U64(RTT_VARIANCE, rtt_variance); 451 PUTOPT_U64(RTT_SPREAD, rtt_spread); 452 PUTOPT_U64(DIST_AVG, dist_avg); 453 PUTOPT_U64(DIST_VARIANCE, dist_variance); 454 PUTOPT_U64(DIST_SPREAD, dist_spread); 455 if (res->ftm.lci && res->ftm.lci_len && 456 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI, 457 res->ftm.lci_len, res->ftm.lci)) 458 goto error; 459 if (res->ftm.civicloc && res->ftm.civicloc_len && 460 nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC, 461 res->ftm.civicloc_len, res->ftm.civicloc)) 462 goto error; 463 #undef PUT 464 #undef PUTOPT 465 #undef PUT_U64 466 #undef PUTOPT_U64 467 468 return 0; 469 error: 470 return -ENOSPC; 471 } 472 473 static int nl80211_pmsr_send_result(struct sk_buff *msg, 474 struct cfg80211_pmsr_result *res) 475 { 476 struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata; 477 478 pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS); 479 if (!pmsr) 480 goto error; 481 482 peers = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_PEERS); 483 if (!peers) 484 goto error; 485 486 peer = nla_nest_start_noflag(msg, 1); 487 if (!peer) 488 goto error; 489 490 if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr)) 491 goto error; 492 493 resp = nla_nest_start_noflag(msg, NL80211_PMSR_PEER_ATTR_RESP); 494 if (!resp) 495 goto error; 496 497 if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) || 498 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME, 499 res->host_time, NL80211_PMSR_RESP_ATTR_PAD)) 500 goto error; 501 502 if (res->ap_tsf_valid && 503 nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF, 504 res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD)) 505 goto error; 506 507 if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL)) 508 goto error; 509 510 data = nla_nest_start_noflag(msg, NL80211_PMSR_RESP_ATTR_DATA); 511 if (!data) 512 goto error; 513 514 typedata = nla_nest_start_noflag(msg, res->type); 515 if (!typedata) 516 goto error; 517 518 switch (res->type) { 519 case NL80211_PMSR_TYPE_FTM: 520 if (nl80211_pmsr_send_ftm_res(msg, res)) 521 goto error; 522 break; 523 default: 524 WARN_ON(1); 525 } 526 527 nla_nest_end(msg, typedata); 528 nla_nest_end(msg, data); 529 nla_nest_end(msg, resp); 530 nla_nest_end(msg, peer); 531 nla_nest_end(msg, peers); 532 nla_nest_end(msg, pmsr); 533 534 return 0; 535 error: 536 return -ENOSPC; 537 } 538 539 void cfg80211_pmsr_report(struct wireless_dev *wdev, 540 struct cfg80211_pmsr_request *req, 541 struct cfg80211_pmsr_result *result, 542 gfp_t gfp) 543 { 544 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 545 struct sk_buff *msg; 546 void *hdr; 547 int err; 548 549 trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie, 550 result->addr); 551 552 /* 553 * Currently, only variable items are LCI and civic location, 554 * both of which are reasonably short so we don't need to 555 * worry about them here for the allocation. 556 */ 557 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 558 if (!msg) 559 return; 560 561 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT); 562 if (!hdr) 563 goto free; 564 565 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 566 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 567 NL80211_ATTR_PAD)) 568 goto free; 569 570 if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, 571 NL80211_ATTR_PAD)) 572 goto free; 573 574 err = nl80211_pmsr_send_result(msg, result); 575 if (err) { 576 pr_err_ratelimited("peer measurement result: message didn't fit!"); 577 goto free; 578 } 579 580 genlmsg_end(msg, hdr); 581 genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); 582 return; 583 free: 584 nlmsg_free(msg); 585 } 586 EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); 587 588 static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) 589 { 590 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 591 struct cfg80211_pmsr_request *req, *tmp; 592 LIST_HEAD(free_list); 593 594 lockdep_assert_held(&wdev->mtx); 595 596 spin_lock_bh(&wdev->pmsr_lock); 597 list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { 598 if (req->nl_portid) 599 continue; 600 list_move_tail(&req->list, &free_list); 601 } 602 spin_unlock_bh(&wdev->pmsr_lock); 603 604 list_for_each_entry_safe(req, tmp, &free_list, list) { 605 rdev_abort_pmsr(rdev, wdev, req); 606 607 kfree(req); 608 } 609 } 610 611 void cfg80211_pmsr_free_wk(struct work_struct *work) 612 { 613 struct wireless_dev *wdev = container_of(work, struct wireless_dev, 614 pmsr_free_wk); 615 616 wdev_lock(wdev); 617 cfg80211_pmsr_process_abort(wdev); 618 wdev_unlock(wdev); 619 } 620 621 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) 622 { 623 struct cfg80211_pmsr_request *req; 624 bool found = false; 625 626 spin_lock_bh(&wdev->pmsr_lock); 627 list_for_each_entry(req, &wdev->pmsr_list, list) { 628 found = true; 629 req->nl_portid = 0; 630 } 631 spin_unlock_bh(&wdev->pmsr_lock); 632 633 if (found) 634 cfg80211_pmsr_process_abort(wdev); 635 636 WARN_ON(!list_empty(&wdev->pmsr_list)); 637 } 638 639 void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid) 640 { 641 struct cfg80211_pmsr_request *req; 642 643 spin_lock_bh(&wdev->pmsr_lock); 644 list_for_each_entry(req, &wdev->pmsr_list, list) { 645 if (req->nl_portid == portid) { 646 req->nl_portid = 0; 647 schedule_work(&wdev->pmsr_free_wk); 648 } 649 } 650 spin_unlock_bh(&wdev->pmsr_lock); 651 } 652 653 #endif /* __PMSR_H */ 654