1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 8 * Copyright (C) 2018-2019 Intel Corporation 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/jiffies.h> 16 #include <linux/slab.h> 17 #include <linux/kernel.h> 18 #include <linux/skbuff.h> 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/rcupdate.h> 22 #include <linux/export.h> 23 #include <linux/bitops.h> 24 #include <net/mac80211.h> 25 #include <net/ieee80211_radiotap.h> 26 #include <asm/unaligned.h> 27 28 #include "ieee80211_i.h" 29 #include "driver-ops.h" 30 #include "led.h" 31 #include "mesh.h" 32 #include "wep.h" 33 #include "wpa.h" 34 #include "tkip.h" 35 #include "wme.h" 36 #include "rate.h" 37 38 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 39 { 40 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 41 42 u64_stats_update_begin(&tstats->syncp); 43 tstats->rx_packets++; 44 tstats->rx_bytes += len; 45 u64_stats_update_end(&tstats->syncp); 46 } 47 48 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 49 enum nl80211_iftype type) 50 { 51 __le16 fc = hdr->frame_control; 52 53 if (ieee80211_is_data(fc)) { 54 if (len < 24) /* drop incorrect hdr len (data) */ 55 return NULL; 56 57 if (ieee80211_has_a4(fc)) 58 return NULL; 59 if (ieee80211_has_tods(fc)) 60 return hdr->addr1; 61 if (ieee80211_has_fromds(fc)) 62 return hdr->addr2; 63 64 return hdr->addr3; 65 } 66 67 if (ieee80211_is_mgmt(fc)) { 68 if (len < 24) /* drop incorrect hdr len (mgmt) */ 69 return NULL; 70 return hdr->addr3; 71 } 72 73 if (ieee80211_is_ctl(fc)) { 74 if (ieee80211_is_pspoll(fc)) 75 return hdr->addr1; 76 77 if (ieee80211_is_back_req(fc)) { 78 switch (type) { 79 case NL80211_IFTYPE_STATION: 80 return hdr->addr2; 81 case NL80211_IFTYPE_AP: 82 case NL80211_IFTYPE_AP_VLAN: 83 return hdr->addr1; 84 default: 85 break; /* fall through to the return */ 86 } 87 } 88 } 89 90 return NULL; 91 } 92 93 /* 94 * monitor mode reception 95 * 96 * This function cleans up the SKB, i.e. it removes all the stuff 97 * only useful for monitoring. 98 */ 99 static void remove_monitor_info(struct sk_buff *skb, 100 unsigned int present_fcs_len, 101 unsigned int rtap_space) 102 { 103 if (present_fcs_len) 104 __pskb_trim(skb, skb->len - present_fcs_len); 105 __pskb_pull(skb, rtap_space); 106 } 107 108 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 109 unsigned int rtap_space) 110 { 111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 112 struct ieee80211_hdr *hdr; 113 114 hdr = (void *)(skb->data + rtap_space); 115 116 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 117 RX_FLAG_FAILED_PLCP_CRC | 118 RX_FLAG_ONLY_MONITOR | 119 RX_FLAG_NO_PSDU)) 120 return true; 121 122 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 123 return true; 124 125 if (ieee80211_is_ctl(hdr->frame_control) && 126 !ieee80211_is_pspoll(hdr->frame_control) && 127 !ieee80211_is_back_req(hdr->frame_control)) 128 return true; 129 130 return false; 131 } 132 133 static int 134 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 135 struct ieee80211_rx_status *status, 136 struct sk_buff *skb) 137 { 138 int len; 139 140 /* always present fields */ 141 len = sizeof(struct ieee80211_radiotap_header) + 8; 142 143 /* allocate extra bitmaps */ 144 if (status->chains) 145 len += 4 * hweight8(status->chains); 146 /* vendor presence bitmap */ 147 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) 148 len += 4; 149 150 if (ieee80211_have_rx_timestamp(status)) { 151 len = ALIGN(len, 8); 152 len += 8; 153 } 154 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 155 len += 1; 156 157 /* antenna field, if we don't have per-chain info */ 158 if (!status->chains) 159 len += 1; 160 161 /* padding for RX_FLAGS if necessary */ 162 len = ALIGN(len, 2); 163 164 if (status->encoding == RX_ENC_HT) /* HT info */ 165 len += 3; 166 167 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 168 len = ALIGN(len, 4); 169 len += 8; 170 } 171 172 if (status->encoding == RX_ENC_VHT) { 173 len = ALIGN(len, 2); 174 len += 12; 175 } 176 177 if (local->hw.radiotap_timestamp.units_pos >= 0) { 178 len = ALIGN(len, 8); 179 len += 12; 180 } 181 182 if (status->encoding == RX_ENC_HE && 183 status->flag & RX_FLAG_RADIOTAP_HE) { 184 len = ALIGN(len, 2); 185 len += 12; 186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 187 } 188 189 if (status->encoding == RX_ENC_HE && 190 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 191 len = ALIGN(len, 2); 192 len += 12; 193 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 194 } 195 196 if (status->flag & RX_FLAG_NO_PSDU) 197 len += 1; 198 199 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 200 len = ALIGN(len, 2); 201 len += 4; 202 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 203 } 204 205 if (status->chains) { 206 /* antenna and antenna signal fields */ 207 len += 2 * hweight8(status->chains); 208 } 209 210 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 211 struct ieee80211_vendor_radiotap *rtap; 212 int vendor_data_offset = 0; 213 214 /* 215 * The position to look at depends on the existence (or non- 216 * existence) of other elements, so take that into account... 217 */ 218 if (status->flag & RX_FLAG_RADIOTAP_HE) 219 vendor_data_offset += 220 sizeof(struct ieee80211_radiotap_he); 221 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 222 vendor_data_offset += 223 sizeof(struct ieee80211_radiotap_he_mu); 224 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 225 vendor_data_offset += 226 sizeof(struct ieee80211_radiotap_lsig); 227 228 rtap = (void *)&skb->data[vendor_data_offset]; 229 230 /* alignment for fixed 6-byte vendor data header */ 231 len = ALIGN(len, 2); 232 /* vendor data header */ 233 len += 6; 234 if (WARN_ON(rtap->align == 0)) 235 rtap->align = 1; 236 len = ALIGN(len, rtap->align); 237 len += rtap->len + rtap->pad; 238 } 239 240 return len; 241 } 242 243 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 244 struct sk_buff *skb, 245 int rtap_space) 246 { 247 struct { 248 struct ieee80211_hdr_3addr hdr; 249 u8 category; 250 u8 action_code; 251 } __packed __aligned(2) action; 252 253 if (!sdata) 254 return; 255 256 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 257 258 if (skb->len < rtap_space + sizeof(action) + 259 VHT_MUMIMO_GROUPS_DATA_LEN) 260 return; 261 262 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 263 return; 264 265 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 266 267 if (!ieee80211_is_action(action.hdr.frame_control)) 268 return; 269 270 if (action.category != WLAN_CATEGORY_VHT) 271 return; 272 273 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 274 return; 275 276 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 277 return; 278 279 skb = skb_copy(skb, GFP_ATOMIC); 280 if (!skb) 281 return; 282 283 skb_queue_tail(&sdata->skb_queue, skb); 284 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 285 } 286 287 /* 288 * ieee80211_add_rx_radiotap_header - add radiotap header 289 * 290 * add a radiotap header containing all the fields which the hardware provided. 291 */ 292 static void 293 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 294 struct sk_buff *skb, 295 struct ieee80211_rate *rate, 296 int rtap_len, bool has_fcs) 297 { 298 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 299 struct ieee80211_radiotap_header *rthdr; 300 unsigned char *pos; 301 __le32 *it_present; 302 u32 it_present_val; 303 u16 rx_flags = 0; 304 u16 channel_flags = 0; 305 int mpdulen, chain; 306 unsigned long chains = status->chains; 307 struct ieee80211_vendor_radiotap rtap = {}; 308 struct ieee80211_radiotap_he he = {}; 309 struct ieee80211_radiotap_he_mu he_mu = {}; 310 struct ieee80211_radiotap_lsig lsig = {}; 311 312 if (status->flag & RX_FLAG_RADIOTAP_HE) { 313 he = *(struct ieee80211_radiotap_he *)skb->data; 314 skb_pull(skb, sizeof(he)); 315 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 316 } 317 318 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 319 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 320 skb_pull(skb, sizeof(he_mu)); 321 } 322 323 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 324 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 325 skb_pull(skb, sizeof(lsig)); 326 } 327 328 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 329 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 330 /* rtap.len and rtap.pad are undone immediately */ 331 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 332 } 333 334 mpdulen = skb->len; 335 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 336 mpdulen += FCS_LEN; 337 338 rthdr = skb_push(skb, rtap_len); 339 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 340 it_present = &rthdr->it_present; 341 342 /* radiotap header, set always present flags */ 343 rthdr->it_len = cpu_to_le16(rtap_len); 344 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 345 BIT(IEEE80211_RADIOTAP_CHANNEL) | 346 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 347 348 if (!status->chains) 349 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 350 351 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 352 it_present_val |= 353 BIT(IEEE80211_RADIOTAP_EXT) | 354 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 355 put_unaligned_le32(it_present_val, it_present); 356 it_present++; 357 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 358 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 359 } 360 361 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 362 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 363 BIT(IEEE80211_RADIOTAP_EXT); 364 put_unaligned_le32(it_present_val, it_present); 365 it_present++; 366 it_present_val = rtap.present; 367 } 368 369 put_unaligned_le32(it_present_val, it_present); 370 371 pos = (void *)(it_present + 1); 372 373 /* the order of the following fields is important */ 374 375 /* IEEE80211_RADIOTAP_TSFT */ 376 if (ieee80211_have_rx_timestamp(status)) { 377 /* padding */ 378 while ((pos - (u8 *)rthdr) & 7) 379 *pos++ = 0; 380 put_unaligned_le64( 381 ieee80211_calculate_rx_timestamp(local, status, 382 mpdulen, 0), 383 pos); 384 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 385 pos += 8; 386 } 387 388 /* IEEE80211_RADIOTAP_FLAGS */ 389 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 390 *pos |= IEEE80211_RADIOTAP_F_FCS; 391 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 392 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 393 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 394 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 395 pos++; 396 397 /* IEEE80211_RADIOTAP_RATE */ 398 if (!rate || status->encoding != RX_ENC_LEGACY) { 399 /* 400 * Without rate information don't add it. If we have, 401 * MCS information is a separate field in radiotap, 402 * added below. The byte here is needed as padding 403 * for the channel though, so initialise it to 0. 404 */ 405 *pos = 0; 406 } else { 407 int shift = 0; 408 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 409 if (status->bw == RATE_INFO_BW_10) 410 shift = 1; 411 else if (status->bw == RATE_INFO_BW_5) 412 shift = 2; 413 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 414 } 415 pos++; 416 417 /* IEEE80211_RADIOTAP_CHANNEL */ 418 put_unaligned_le16(status->freq, pos); 419 pos += 2; 420 if (status->bw == RATE_INFO_BW_10) 421 channel_flags |= IEEE80211_CHAN_HALF; 422 else if (status->bw == RATE_INFO_BW_5) 423 channel_flags |= IEEE80211_CHAN_QUARTER; 424 425 if (status->band == NL80211_BAND_5GHZ) 426 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 427 else if (status->encoding != RX_ENC_LEGACY) 428 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 429 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 430 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 431 else if (rate) 432 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 433 else 434 channel_flags |= IEEE80211_CHAN_2GHZ; 435 put_unaligned_le16(channel_flags, pos); 436 pos += 2; 437 438 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 439 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 440 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 441 *pos = status->signal; 442 rthdr->it_present |= 443 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 444 pos++; 445 } 446 447 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 448 449 if (!status->chains) { 450 /* IEEE80211_RADIOTAP_ANTENNA */ 451 *pos = status->antenna; 452 pos++; 453 } 454 455 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 456 457 /* IEEE80211_RADIOTAP_RX_FLAGS */ 458 /* ensure 2 byte alignment for the 2 byte field as required */ 459 if ((pos - (u8 *)rthdr) & 1) 460 *pos++ = 0; 461 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 462 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 463 put_unaligned_le16(rx_flags, pos); 464 pos += 2; 465 466 if (status->encoding == RX_ENC_HT) { 467 unsigned int stbc; 468 469 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 470 *pos++ = local->hw.radiotap_mcs_details; 471 *pos = 0; 472 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 473 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 474 if (status->bw == RATE_INFO_BW_40) 475 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 476 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 477 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 478 if (status->enc_flags & RX_ENC_FLAG_LDPC) 479 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 480 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 481 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 482 pos++; 483 *pos++ = status->rate_idx; 484 } 485 486 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 487 u16 flags = 0; 488 489 /* ensure 4 byte alignment */ 490 while ((pos - (u8 *)rthdr) & 3) 491 pos++; 492 rthdr->it_present |= 493 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 494 put_unaligned_le32(status->ampdu_reference, pos); 495 pos += 4; 496 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 497 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 498 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 499 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 500 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 501 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 502 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 503 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 504 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 505 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 506 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 507 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 508 put_unaligned_le16(flags, pos); 509 pos += 2; 510 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 511 *pos++ = status->ampdu_delimiter_crc; 512 else 513 *pos++ = 0; 514 *pos++ = 0; 515 } 516 517 if (status->encoding == RX_ENC_VHT) { 518 u16 known = local->hw.radiotap_vht_details; 519 520 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 521 put_unaligned_le16(known, pos); 522 pos += 2; 523 /* flags */ 524 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 525 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 526 /* in VHT, STBC is binary */ 527 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 528 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 529 if (status->enc_flags & RX_ENC_FLAG_BF) 530 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 531 pos++; 532 /* bandwidth */ 533 switch (status->bw) { 534 case RATE_INFO_BW_80: 535 *pos++ = 4; 536 break; 537 case RATE_INFO_BW_160: 538 *pos++ = 11; 539 break; 540 case RATE_INFO_BW_40: 541 *pos++ = 1; 542 break; 543 default: 544 *pos++ = 0; 545 } 546 /* MCS/NSS */ 547 *pos = (status->rate_idx << 4) | status->nss; 548 pos += 4; 549 /* coding field */ 550 if (status->enc_flags & RX_ENC_FLAG_LDPC) 551 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 552 pos++; 553 /* group ID */ 554 pos++; 555 /* partial_aid */ 556 pos += 2; 557 } 558 559 if (local->hw.radiotap_timestamp.units_pos >= 0) { 560 u16 accuracy = 0; 561 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 562 563 rthdr->it_present |= 564 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 565 566 /* ensure 8 byte alignment */ 567 while ((pos - (u8 *)rthdr) & 7) 568 pos++; 569 570 put_unaligned_le64(status->device_timestamp, pos); 571 pos += sizeof(u64); 572 573 if (local->hw.radiotap_timestamp.accuracy >= 0) { 574 accuracy = local->hw.radiotap_timestamp.accuracy; 575 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 576 } 577 put_unaligned_le16(accuracy, pos); 578 pos += sizeof(u16); 579 580 *pos++ = local->hw.radiotap_timestamp.units_pos; 581 *pos++ = flags; 582 } 583 584 if (status->encoding == RX_ENC_HE && 585 status->flag & RX_FLAG_RADIOTAP_HE) { 586 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 587 588 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 589 he.data6 |= HE_PREP(DATA6_NSTS, 590 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 591 status->enc_flags)); 592 he.data3 |= HE_PREP(DATA3_STBC, 1); 593 } else { 594 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 595 } 596 597 #define CHECK_GI(s) \ 598 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 599 (int)NL80211_RATE_INFO_HE_GI_##s) 600 601 CHECK_GI(0_8); 602 CHECK_GI(1_6); 603 CHECK_GI(3_2); 604 605 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 606 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 607 he.data3 |= HE_PREP(DATA3_CODING, 608 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 609 610 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 611 612 switch (status->bw) { 613 case RATE_INFO_BW_20: 614 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 615 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 616 break; 617 case RATE_INFO_BW_40: 618 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 619 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 620 break; 621 case RATE_INFO_BW_80: 622 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 623 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 624 break; 625 case RATE_INFO_BW_160: 626 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 627 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 628 break; 629 case RATE_INFO_BW_HE_RU: 630 #define CHECK_RU_ALLOC(s) \ 631 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 632 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 633 634 CHECK_RU_ALLOC(26); 635 CHECK_RU_ALLOC(52); 636 CHECK_RU_ALLOC(106); 637 CHECK_RU_ALLOC(242); 638 CHECK_RU_ALLOC(484); 639 CHECK_RU_ALLOC(996); 640 CHECK_RU_ALLOC(2x996); 641 642 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 643 status->he_ru + 4); 644 break; 645 default: 646 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 647 } 648 649 /* ensure 2 byte alignment */ 650 while ((pos - (u8 *)rthdr) & 1) 651 pos++; 652 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 653 memcpy(pos, &he, sizeof(he)); 654 pos += sizeof(he); 655 } 656 657 if (status->encoding == RX_ENC_HE && 658 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 659 /* ensure 2 byte alignment */ 660 while ((pos - (u8 *)rthdr) & 1) 661 pos++; 662 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 663 memcpy(pos, &he_mu, sizeof(he_mu)); 664 pos += sizeof(he_mu); 665 } 666 667 if (status->flag & RX_FLAG_NO_PSDU) { 668 rthdr->it_present |= 669 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 670 *pos++ = status->zero_length_psdu_type; 671 } 672 673 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 674 /* ensure 2 byte alignment */ 675 while ((pos - (u8 *)rthdr) & 1) 676 pos++; 677 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 678 memcpy(pos, &lsig, sizeof(lsig)); 679 pos += sizeof(lsig); 680 } 681 682 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 683 *pos++ = status->chain_signal[chain]; 684 *pos++ = chain; 685 } 686 687 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 688 /* ensure 2 byte alignment for the vendor field as required */ 689 if ((pos - (u8 *)rthdr) & 1) 690 *pos++ = 0; 691 *pos++ = rtap.oui[0]; 692 *pos++ = rtap.oui[1]; 693 *pos++ = rtap.oui[2]; 694 *pos++ = rtap.subns; 695 put_unaligned_le16(rtap.len, pos); 696 pos += 2; 697 /* align the actual payload as requested */ 698 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 699 *pos++ = 0; 700 /* data (and possible padding) already follows */ 701 } 702 } 703 704 static struct sk_buff * 705 ieee80211_make_monitor_skb(struct ieee80211_local *local, 706 struct sk_buff **origskb, 707 struct ieee80211_rate *rate, 708 int rtap_space, bool use_origskb) 709 { 710 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 711 int rt_hdrlen, needed_headroom; 712 struct sk_buff *skb; 713 714 /* room for the radiotap header based on driver features */ 715 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 716 needed_headroom = rt_hdrlen - rtap_space; 717 718 if (use_origskb) { 719 /* only need to expand headroom if necessary */ 720 skb = *origskb; 721 *origskb = NULL; 722 723 /* 724 * This shouldn't trigger often because most devices have an 725 * RX header they pull before we get here, and that should 726 * be big enough for our radiotap information. We should 727 * probably export the length to drivers so that we can have 728 * them allocate enough headroom to start with. 729 */ 730 if (skb_headroom(skb) < needed_headroom && 731 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 732 dev_kfree_skb(skb); 733 return NULL; 734 } 735 } else { 736 /* 737 * Need to make a copy and possibly remove radiotap header 738 * and FCS from the original. 739 */ 740 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 741 742 if (!skb) 743 return NULL; 744 } 745 746 /* prepend radiotap information */ 747 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 748 749 skb_reset_mac_header(skb); 750 skb->ip_summed = CHECKSUM_UNNECESSARY; 751 skb->pkt_type = PACKET_OTHERHOST; 752 skb->protocol = htons(ETH_P_802_2); 753 754 return skb; 755 } 756 757 /* 758 * This function copies a received frame to all monitor interfaces and 759 * returns a cleaned-up SKB that no longer includes the FCS nor the 760 * radiotap header the driver might have added. 761 */ 762 static struct sk_buff * 763 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 764 struct ieee80211_rate *rate) 765 { 766 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 767 struct ieee80211_sub_if_data *sdata; 768 struct sk_buff *monskb = NULL; 769 int present_fcs_len = 0; 770 unsigned int rtap_space = 0; 771 struct ieee80211_sub_if_data *monitor_sdata = 772 rcu_dereference(local->monitor_sdata); 773 bool only_monitor = false; 774 unsigned int min_head_len; 775 776 if (status->flag & RX_FLAG_RADIOTAP_HE) 777 rtap_space += sizeof(struct ieee80211_radiotap_he); 778 779 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 780 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 781 782 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 783 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 784 785 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 786 struct ieee80211_vendor_radiotap *rtap = 787 (void *)(origskb->data + rtap_space); 788 789 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 790 } 791 792 min_head_len = rtap_space; 793 794 /* 795 * First, we may need to make a copy of the skb because 796 * (1) we need to modify it for radiotap (if not present), and 797 * (2) the other RX handlers will modify the skb we got. 798 * 799 * We don't need to, of course, if we aren't going to return 800 * the SKB because it has a bad FCS/PLCP checksum. 801 */ 802 803 if (!(status->flag & RX_FLAG_NO_PSDU)) { 804 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 805 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 806 /* driver bug */ 807 WARN_ON(1); 808 dev_kfree_skb(origskb); 809 return NULL; 810 } 811 present_fcs_len = FCS_LEN; 812 } 813 814 /* also consider the hdr->frame_control */ 815 min_head_len += 2; 816 } 817 818 /* ensure that the expected data elements are in skb head */ 819 if (!pskb_may_pull(origskb, min_head_len)) { 820 dev_kfree_skb(origskb); 821 return NULL; 822 } 823 824 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 825 826 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 827 if (only_monitor) { 828 dev_kfree_skb(origskb); 829 return NULL; 830 } 831 832 remove_monitor_info(origskb, present_fcs_len, rtap_space); 833 return origskb; 834 } 835 836 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 837 838 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 839 bool last_monitor = list_is_last(&sdata->u.mntr.list, 840 &local->mon_list); 841 842 if (!monskb) 843 monskb = ieee80211_make_monitor_skb(local, &origskb, 844 rate, rtap_space, 845 only_monitor && 846 last_monitor); 847 848 if (monskb) { 849 struct sk_buff *skb; 850 851 if (last_monitor) { 852 skb = monskb; 853 monskb = NULL; 854 } else { 855 skb = skb_clone(monskb, GFP_ATOMIC); 856 } 857 858 if (skb) { 859 skb->dev = sdata->dev; 860 ieee80211_rx_stats(skb->dev, skb->len); 861 netif_receive_skb(skb); 862 } 863 } 864 865 if (last_monitor) 866 break; 867 } 868 869 /* this happens if last_monitor was erroneously false */ 870 dev_kfree_skb(monskb); 871 872 /* ditto */ 873 if (!origskb) 874 return NULL; 875 876 remove_monitor_info(origskb, present_fcs_len, rtap_space); 877 return origskb; 878 } 879 880 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 881 { 882 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 883 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 884 int tid, seqno_idx, security_idx; 885 886 /* does the frame have a qos control field? */ 887 if (ieee80211_is_data_qos(hdr->frame_control)) { 888 u8 *qc = ieee80211_get_qos_ctl(hdr); 889 /* frame has qos control */ 890 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 891 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 892 status->rx_flags |= IEEE80211_RX_AMSDU; 893 894 seqno_idx = tid; 895 security_idx = tid; 896 } else { 897 /* 898 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 899 * 900 * Sequence numbers for management frames, QoS data 901 * frames with a broadcast/multicast address in the 902 * Address 1 field, and all non-QoS data frames sent 903 * by QoS STAs are assigned using an additional single 904 * modulo-4096 counter, [...] 905 * 906 * We also use that counter for non-QoS STAs. 907 */ 908 seqno_idx = IEEE80211_NUM_TIDS; 909 security_idx = 0; 910 if (ieee80211_is_mgmt(hdr->frame_control)) 911 security_idx = IEEE80211_NUM_TIDS; 912 tid = 0; 913 } 914 915 rx->seqno_idx = seqno_idx; 916 rx->security_idx = security_idx; 917 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 918 * For now, set skb->priority to 0 for other cases. */ 919 rx->skb->priority = (tid > 7) ? 0 : tid; 920 } 921 922 /** 923 * DOC: Packet alignment 924 * 925 * Drivers always need to pass packets that are aligned to two-byte boundaries 926 * to the stack. 927 * 928 * Additionally, should, if possible, align the payload data in a way that 929 * guarantees that the contained IP header is aligned to a four-byte 930 * boundary. In the case of regular frames, this simply means aligning the 931 * payload to a four-byte boundary (because either the IP header is directly 932 * contained, or IV/RFC1042 headers that have a length divisible by four are 933 * in front of it). If the payload data is not properly aligned and the 934 * architecture doesn't support efficient unaligned operations, mac80211 935 * will align the data. 936 * 937 * With A-MSDU frames, however, the payload data address must yield two modulo 938 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 939 * push the IP header further back to a multiple of four again. Thankfully, the 940 * specs were sane enough this time around to require padding each A-MSDU 941 * subframe to a length that is a multiple of four. 942 * 943 * Padding like Atheros hardware adds which is between the 802.11 header and 944 * the payload is not supported, the driver is required to move the 802.11 945 * header to be directly in front of the payload in that case. 946 */ 947 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 948 { 949 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 950 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 951 #endif 952 } 953 954 955 /* rx handlers */ 956 957 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 958 { 959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 960 961 if (is_multicast_ether_addr(hdr->addr1)) 962 return 0; 963 964 return ieee80211_is_robust_mgmt_frame(skb); 965 } 966 967 968 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 969 { 970 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 971 972 if (!is_multicast_ether_addr(hdr->addr1)) 973 return 0; 974 975 return ieee80211_is_robust_mgmt_frame(skb); 976 } 977 978 979 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 980 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 981 { 982 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 983 struct ieee80211_mmie *mmie; 984 struct ieee80211_mmie_16 *mmie16; 985 986 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 987 return -1; 988 989 if (!ieee80211_is_robust_mgmt_frame(skb)) 990 return -1; /* not a robust management frame */ 991 992 mmie = (struct ieee80211_mmie *) 993 (skb->data + skb->len - sizeof(*mmie)); 994 if (mmie->element_id == WLAN_EID_MMIE && 995 mmie->length == sizeof(*mmie) - 2) 996 return le16_to_cpu(mmie->key_id); 997 998 mmie16 = (struct ieee80211_mmie_16 *) 999 (skb->data + skb->len - sizeof(*mmie16)); 1000 if (skb->len >= 24 + sizeof(*mmie16) && 1001 mmie16->element_id == WLAN_EID_MMIE && 1002 mmie16->length == sizeof(*mmie16) - 2) 1003 return le16_to_cpu(mmie16->key_id); 1004 1005 return -1; 1006 } 1007 1008 static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 1009 struct sk_buff *skb) 1010 { 1011 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1012 __le16 fc; 1013 int hdrlen; 1014 u8 keyid; 1015 1016 fc = hdr->frame_control; 1017 hdrlen = ieee80211_hdrlen(fc); 1018 1019 if (skb->len < hdrlen + cs->hdr_len) 1020 return -EINVAL; 1021 1022 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 1023 keyid &= cs->key_idx_mask; 1024 keyid >>= cs->key_idx_shift; 1025 1026 return keyid; 1027 } 1028 1029 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1030 { 1031 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1032 char *dev_addr = rx->sdata->vif.addr; 1033 1034 if (ieee80211_is_data(hdr->frame_control)) { 1035 if (is_multicast_ether_addr(hdr->addr1)) { 1036 if (ieee80211_has_tods(hdr->frame_control) || 1037 !ieee80211_has_fromds(hdr->frame_control)) 1038 return RX_DROP_MONITOR; 1039 if (ether_addr_equal(hdr->addr3, dev_addr)) 1040 return RX_DROP_MONITOR; 1041 } else { 1042 if (!ieee80211_has_a4(hdr->frame_control)) 1043 return RX_DROP_MONITOR; 1044 if (ether_addr_equal(hdr->addr4, dev_addr)) 1045 return RX_DROP_MONITOR; 1046 } 1047 } 1048 1049 /* If there is not an established peer link and this is not a peer link 1050 * establisment frame, beacon or probe, drop the frame. 1051 */ 1052 1053 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1054 struct ieee80211_mgmt *mgmt; 1055 1056 if (!ieee80211_is_mgmt(hdr->frame_control)) 1057 return RX_DROP_MONITOR; 1058 1059 if (ieee80211_is_action(hdr->frame_control)) { 1060 u8 category; 1061 1062 /* make sure category field is present */ 1063 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1064 return RX_DROP_MONITOR; 1065 1066 mgmt = (struct ieee80211_mgmt *)hdr; 1067 category = mgmt->u.action.category; 1068 if (category != WLAN_CATEGORY_MESH_ACTION && 1069 category != WLAN_CATEGORY_SELF_PROTECTED) 1070 return RX_DROP_MONITOR; 1071 return RX_CONTINUE; 1072 } 1073 1074 if (ieee80211_is_probe_req(hdr->frame_control) || 1075 ieee80211_is_probe_resp(hdr->frame_control) || 1076 ieee80211_is_beacon(hdr->frame_control) || 1077 ieee80211_is_auth(hdr->frame_control)) 1078 return RX_CONTINUE; 1079 1080 return RX_DROP_MONITOR; 1081 } 1082 1083 return RX_CONTINUE; 1084 } 1085 1086 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1087 int index) 1088 { 1089 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1090 struct sk_buff *tail = skb_peek_tail(frames); 1091 struct ieee80211_rx_status *status; 1092 1093 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1094 return true; 1095 1096 if (!tail) 1097 return false; 1098 1099 status = IEEE80211_SKB_RXCB(tail); 1100 if (status->flag & RX_FLAG_AMSDU_MORE) 1101 return false; 1102 1103 return true; 1104 } 1105 1106 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1107 struct tid_ampdu_rx *tid_agg_rx, 1108 int index, 1109 struct sk_buff_head *frames) 1110 { 1111 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1112 struct sk_buff *skb; 1113 struct ieee80211_rx_status *status; 1114 1115 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1116 1117 if (skb_queue_empty(skb_list)) 1118 goto no_frame; 1119 1120 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1121 __skb_queue_purge(skb_list); 1122 goto no_frame; 1123 } 1124 1125 /* release frames from the reorder ring buffer */ 1126 tid_agg_rx->stored_mpdu_num--; 1127 while ((skb = __skb_dequeue(skb_list))) { 1128 status = IEEE80211_SKB_RXCB(skb); 1129 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1130 __skb_queue_tail(frames, skb); 1131 } 1132 1133 no_frame: 1134 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1135 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1136 } 1137 1138 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1139 struct tid_ampdu_rx *tid_agg_rx, 1140 u16 head_seq_num, 1141 struct sk_buff_head *frames) 1142 { 1143 int index; 1144 1145 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1146 1147 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1148 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1149 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1150 frames); 1151 } 1152 } 1153 1154 /* 1155 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1156 * the skb was added to the buffer longer than this time ago, the earlier 1157 * frames that have not yet been received are assumed to be lost and the skb 1158 * can be released for processing. This may also release other skb's from the 1159 * reorder buffer if there are no additional gaps between the frames. 1160 * 1161 * Callers must hold tid_agg_rx->reorder_lock. 1162 */ 1163 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1164 1165 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1166 struct tid_ampdu_rx *tid_agg_rx, 1167 struct sk_buff_head *frames) 1168 { 1169 int index, i, j; 1170 1171 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1172 1173 /* release the buffer until next missing frame */ 1174 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1175 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1176 tid_agg_rx->stored_mpdu_num) { 1177 /* 1178 * No buffers ready to be released, but check whether any 1179 * frames in the reorder buffer have timed out. 1180 */ 1181 int skipped = 1; 1182 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1183 j = (j + 1) % tid_agg_rx->buf_size) { 1184 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1185 skipped++; 1186 continue; 1187 } 1188 if (skipped && 1189 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1190 HT_RX_REORDER_BUF_TIMEOUT)) 1191 goto set_release_timer; 1192 1193 /* don't leave incomplete A-MSDUs around */ 1194 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1195 i = (i + 1) % tid_agg_rx->buf_size) 1196 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1197 1198 ht_dbg_ratelimited(sdata, 1199 "release an RX reorder frame due to timeout on earlier frames\n"); 1200 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1201 frames); 1202 1203 /* 1204 * Increment the head seq# also for the skipped slots. 1205 */ 1206 tid_agg_rx->head_seq_num = 1207 (tid_agg_rx->head_seq_num + 1208 skipped) & IEEE80211_SN_MASK; 1209 skipped = 0; 1210 } 1211 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1212 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1213 frames); 1214 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1215 } 1216 1217 if (tid_agg_rx->stored_mpdu_num) { 1218 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1219 1220 for (; j != (index - 1) % tid_agg_rx->buf_size; 1221 j = (j + 1) % tid_agg_rx->buf_size) { 1222 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1223 break; 1224 } 1225 1226 set_release_timer: 1227 1228 if (!tid_agg_rx->removed) 1229 mod_timer(&tid_agg_rx->reorder_timer, 1230 tid_agg_rx->reorder_time[j] + 1 + 1231 HT_RX_REORDER_BUF_TIMEOUT); 1232 } else { 1233 del_timer(&tid_agg_rx->reorder_timer); 1234 } 1235 } 1236 1237 /* 1238 * As this function belongs to the RX path it must be under 1239 * rcu_read_lock protection. It returns false if the frame 1240 * can be processed immediately, true if it was consumed. 1241 */ 1242 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1243 struct tid_ampdu_rx *tid_agg_rx, 1244 struct sk_buff *skb, 1245 struct sk_buff_head *frames) 1246 { 1247 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1248 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1249 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1250 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1251 u16 head_seq_num, buf_size; 1252 int index; 1253 bool ret = true; 1254 1255 spin_lock(&tid_agg_rx->reorder_lock); 1256 1257 /* 1258 * Offloaded BA sessions have no known starting sequence number so pick 1259 * one from first Rxed frame for this tid after BA was started. 1260 */ 1261 if (unlikely(tid_agg_rx->auto_seq)) { 1262 tid_agg_rx->auto_seq = false; 1263 tid_agg_rx->ssn = mpdu_seq_num; 1264 tid_agg_rx->head_seq_num = mpdu_seq_num; 1265 } 1266 1267 buf_size = tid_agg_rx->buf_size; 1268 head_seq_num = tid_agg_rx->head_seq_num; 1269 1270 /* 1271 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1272 * be reordered. 1273 */ 1274 if (unlikely(!tid_agg_rx->started)) { 1275 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1276 ret = false; 1277 goto out; 1278 } 1279 tid_agg_rx->started = true; 1280 } 1281 1282 /* frame with out of date sequence number */ 1283 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1284 dev_kfree_skb(skb); 1285 goto out; 1286 } 1287 1288 /* 1289 * If frame the sequence number exceeds our buffering window 1290 * size release some previous frames to make room for this one. 1291 */ 1292 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1293 head_seq_num = ieee80211_sn_inc( 1294 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1295 /* release stored frames up to new head to stack */ 1296 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1297 head_seq_num, frames); 1298 } 1299 1300 /* Now the new frame is always in the range of the reordering buffer */ 1301 1302 index = mpdu_seq_num % tid_agg_rx->buf_size; 1303 1304 /* check if we already stored this frame */ 1305 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1306 dev_kfree_skb(skb); 1307 goto out; 1308 } 1309 1310 /* 1311 * If the current MPDU is in the right order and nothing else 1312 * is stored we can process it directly, no need to buffer it. 1313 * If it is first but there's something stored, we may be able 1314 * to release frames after this one. 1315 */ 1316 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1317 tid_agg_rx->stored_mpdu_num == 0) { 1318 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1319 tid_agg_rx->head_seq_num = 1320 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1321 ret = false; 1322 goto out; 1323 } 1324 1325 /* put the frame in the reordering buffer */ 1326 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1327 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1328 tid_agg_rx->reorder_time[index] = jiffies; 1329 tid_agg_rx->stored_mpdu_num++; 1330 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1331 } 1332 1333 out: 1334 spin_unlock(&tid_agg_rx->reorder_lock); 1335 return ret; 1336 } 1337 1338 /* 1339 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1340 * true if the MPDU was buffered, false if it should be processed. 1341 */ 1342 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1343 struct sk_buff_head *frames) 1344 { 1345 struct sk_buff *skb = rx->skb; 1346 struct ieee80211_local *local = rx->local; 1347 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1348 struct sta_info *sta = rx->sta; 1349 struct tid_ampdu_rx *tid_agg_rx; 1350 u16 sc; 1351 u8 tid, ack_policy; 1352 1353 if (!ieee80211_is_data_qos(hdr->frame_control) || 1354 is_multicast_ether_addr(hdr->addr1)) 1355 goto dont_reorder; 1356 1357 /* 1358 * filter the QoS data rx stream according to 1359 * STA/TID and check if this STA/TID is on aggregation 1360 */ 1361 1362 if (!sta) 1363 goto dont_reorder; 1364 1365 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1366 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1367 tid = ieee80211_get_tid(hdr); 1368 1369 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1370 if (!tid_agg_rx) { 1371 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1372 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1373 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1374 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1375 WLAN_BACK_RECIPIENT, 1376 WLAN_REASON_QSTA_REQUIRE_SETUP); 1377 goto dont_reorder; 1378 } 1379 1380 /* qos null data frames are excluded */ 1381 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1382 goto dont_reorder; 1383 1384 /* not part of a BA session */ 1385 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1386 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1387 goto dont_reorder; 1388 1389 /* new, potentially un-ordered, ampdu frame - process it */ 1390 1391 /* reset session timer */ 1392 if (tid_agg_rx->timeout) 1393 tid_agg_rx->last_rx = jiffies; 1394 1395 /* if this mpdu is fragmented - terminate rx aggregation session */ 1396 sc = le16_to_cpu(hdr->seq_ctrl); 1397 if (sc & IEEE80211_SCTL_FRAG) { 1398 skb_queue_tail(&rx->sdata->skb_queue, skb); 1399 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1400 return; 1401 } 1402 1403 /* 1404 * No locking needed -- we will only ever process one 1405 * RX packet at a time, and thus own tid_agg_rx. All 1406 * other code manipulating it needs to (and does) make 1407 * sure that we cannot get to it any more before doing 1408 * anything with it. 1409 */ 1410 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1411 frames)) 1412 return; 1413 1414 dont_reorder: 1415 __skb_queue_tail(frames, skb); 1416 } 1417 1418 static ieee80211_rx_result debug_noinline 1419 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1420 { 1421 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1422 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1423 1424 if (status->flag & RX_FLAG_DUP_VALIDATED) 1425 return RX_CONTINUE; 1426 1427 /* 1428 * Drop duplicate 802.11 retransmissions 1429 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1430 */ 1431 1432 if (rx->skb->len < 24) 1433 return RX_CONTINUE; 1434 1435 if (ieee80211_is_ctl(hdr->frame_control) || 1436 ieee80211_is_nullfunc(hdr->frame_control) || 1437 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1438 is_multicast_ether_addr(hdr->addr1)) 1439 return RX_CONTINUE; 1440 1441 if (!rx->sta) 1442 return RX_CONTINUE; 1443 1444 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1445 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1446 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1447 rx->sta->rx_stats.num_duplicates++; 1448 return RX_DROP_UNUSABLE; 1449 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1450 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1451 } 1452 1453 return RX_CONTINUE; 1454 } 1455 1456 static ieee80211_rx_result debug_noinline 1457 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1458 { 1459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1460 1461 /* Drop disallowed frame classes based on STA auth/assoc state; 1462 * IEEE 802.11, Chap 5.5. 1463 * 1464 * mac80211 filters only based on association state, i.e. it drops 1465 * Class 3 frames from not associated stations. hostapd sends 1466 * deauth/disassoc frames when needed. In addition, hostapd is 1467 * responsible for filtering on both auth and assoc states. 1468 */ 1469 1470 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1471 return ieee80211_rx_mesh_check(rx); 1472 1473 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1474 ieee80211_is_pspoll(hdr->frame_control)) && 1475 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1476 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1477 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1478 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1479 /* 1480 * accept port control frames from the AP even when it's not 1481 * yet marked ASSOC to prevent a race where we don't set the 1482 * assoc bit quickly enough before it sends the first frame 1483 */ 1484 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1485 ieee80211_is_data_present(hdr->frame_control)) { 1486 unsigned int hdrlen; 1487 __be16 ethertype; 1488 1489 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1490 1491 if (rx->skb->len < hdrlen + 8) 1492 return RX_DROP_MONITOR; 1493 1494 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1495 if (ethertype == rx->sdata->control_port_protocol) 1496 return RX_CONTINUE; 1497 } 1498 1499 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1500 cfg80211_rx_spurious_frame(rx->sdata->dev, 1501 hdr->addr2, 1502 GFP_ATOMIC)) 1503 return RX_DROP_UNUSABLE; 1504 1505 return RX_DROP_MONITOR; 1506 } 1507 1508 return RX_CONTINUE; 1509 } 1510 1511 1512 static ieee80211_rx_result debug_noinline 1513 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1514 { 1515 struct ieee80211_local *local; 1516 struct ieee80211_hdr *hdr; 1517 struct sk_buff *skb; 1518 1519 local = rx->local; 1520 skb = rx->skb; 1521 hdr = (struct ieee80211_hdr *) skb->data; 1522 1523 if (!local->pspolling) 1524 return RX_CONTINUE; 1525 1526 if (!ieee80211_has_fromds(hdr->frame_control)) 1527 /* this is not from AP */ 1528 return RX_CONTINUE; 1529 1530 if (!ieee80211_is_data(hdr->frame_control)) 1531 return RX_CONTINUE; 1532 1533 if (!ieee80211_has_moredata(hdr->frame_control)) { 1534 /* AP has no more frames buffered for us */ 1535 local->pspolling = false; 1536 return RX_CONTINUE; 1537 } 1538 1539 /* more data bit is set, let's request a new frame from the AP */ 1540 ieee80211_send_pspoll(local, rx->sdata); 1541 1542 return RX_CONTINUE; 1543 } 1544 1545 static void sta_ps_start(struct sta_info *sta) 1546 { 1547 struct ieee80211_sub_if_data *sdata = sta->sdata; 1548 struct ieee80211_local *local = sdata->local; 1549 struct ps_data *ps; 1550 int tid; 1551 1552 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1553 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1554 ps = &sdata->bss->ps; 1555 else 1556 return; 1557 1558 atomic_inc(&ps->num_sta_ps); 1559 set_sta_flag(sta, WLAN_STA_PS_STA); 1560 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1561 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1562 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1563 sta->sta.addr, sta->sta.aid); 1564 1565 ieee80211_clear_fast_xmit(sta); 1566 1567 if (!sta->sta.txq[0]) 1568 return; 1569 1570 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1571 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1572 struct txq_info *txqi = to_txq_info(txq); 1573 1574 spin_lock(&local->active_txq_lock[txq->ac]); 1575 if (!list_empty(&txqi->schedule_order)) 1576 list_del_init(&txqi->schedule_order); 1577 spin_unlock(&local->active_txq_lock[txq->ac]); 1578 1579 if (txq_has_queue(txq)) 1580 set_bit(tid, &sta->txq_buffered_tids); 1581 else 1582 clear_bit(tid, &sta->txq_buffered_tids); 1583 } 1584 } 1585 1586 static void sta_ps_end(struct sta_info *sta) 1587 { 1588 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1589 sta->sta.addr, sta->sta.aid); 1590 1591 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1592 /* 1593 * Clear the flag only if the other one is still set 1594 * so that the TX path won't start TX'ing new frames 1595 * directly ... In the case that the driver flag isn't 1596 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1597 */ 1598 clear_sta_flag(sta, WLAN_STA_PS_STA); 1599 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1600 sta->sta.addr, sta->sta.aid); 1601 return; 1602 } 1603 1604 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1605 clear_sta_flag(sta, WLAN_STA_PS_STA); 1606 ieee80211_sta_ps_deliver_wakeup(sta); 1607 } 1608 1609 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1610 { 1611 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1612 bool in_ps; 1613 1614 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1615 1616 /* Don't let the same PS state be set twice */ 1617 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1618 if ((start && in_ps) || (!start && !in_ps)) 1619 return -EINVAL; 1620 1621 if (start) 1622 sta_ps_start(sta); 1623 else 1624 sta_ps_end(sta); 1625 1626 return 0; 1627 } 1628 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1629 1630 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1631 { 1632 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1633 1634 if (test_sta_flag(sta, WLAN_STA_SP)) 1635 return; 1636 1637 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1638 ieee80211_sta_ps_deliver_poll_response(sta); 1639 else 1640 set_sta_flag(sta, WLAN_STA_PSPOLL); 1641 } 1642 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1643 1644 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1645 { 1646 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1647 int ac = ieee80211_ac_from_tid(tid); 1648 1649 /* 1650 * If this AC is not trigger-enabled do nothing unless the 1651 * driver is calling us after it already checked. 1652 * 1653 * NB: This could/should check a separate bitmap of trigger- 1654 * enabled queues, but for now we only implement uAPSD w/o 1655 * TSPEC changes to the ACs, so they're always the same. 1656 */ 1657 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1658 tid != IEEE80211_NUM_TIDS) 1659 return; 1660 1661 /* if we are in a service period, do nothing */ 1662 if (test_sta_flag(sta, WLAN_STA_SP)) 1663 return; 1664 1665 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1666 ieee80211_sta_ps_deliver_uapsd(sta); 1667 else 1668 set_sta_flag(sta, WLAN_STA_UAPSD); 1669 } 1670 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1671 1672 static ieee80211_rx_result debug_noinline 1673 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1674 { 1675 struct ieee80211_sub_if_data *sdata = rx->sdata; 1676 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1677 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1678 1679 if (!rx->sta) 1680 return RX_CONTINUE; 1681 1682 if (sdata->vif.type != NL80211_IFTYPE_AP && 1683 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1684 return RX_CONTINUE; 1685 1686 /* 1687 * The device handles station powersave, so don't do anything about 1688 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1689 * it to mac80211 since they're handled.) 1690 */ 1691 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1692 return RX_CONTINUE; 1693 1694 /* 1695 * Don't do anything if the station isn't already asleep. In 1696 * the uAPSD case, the station will probably be marked asleep, 1697 * in the PS-Poll case the station must be confused ... 1698 */ 1699 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1700 return RX_CONTINUE; 1701 1702 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1703 ieee80211_sta_pspoll(&rx->sta->sta); 1704 1705 /* Free PS Poll skb here instead of returning RX_DROP that would 1706 * count as an dropped frame. */ 1707 dev_kfree_skb(rx->skb); 1708 1709 return RX_QUEUED; 1710 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1711 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1712 ieee80211_has_pm(hdr->frame_control) && 1713 (ieee80211_is_data_qos(hdr->frame_control) || 1714 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1715 u8 tid = ieee80211_get_tid(hdr); 1716 1717 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1718 } 1719 1720 return RX_CONTINUE; 1721 } 1722 1723 static ieee80211_rx_result debug_noinline 1724 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1725 { 1726 struct sta_info *sta = rx->sta; 1727 struct sk_buff *skb = rx->skb; 1728 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1729 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1730 int i; 1731 1732 if (!sta) 1733 return RX_CONTINUE; 1734 1735 /* 1736 * Update last_rx only for IBSS packets which are for the current 1737 * BSSID and for station already AUTHORIZED to avoid keeping the 1738 * current IBSS network alive in cases where other STAs start 1739 * using different BSSID. This will also give the station another 1740 * chance to restart the authentication/authorization in case 1741 * something went wrong the first time. 1742 */ 1743 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1744 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1745 NL80211_IFTYPE_ADHOC); 1746 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1747 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1748 sta->rx_stats.last_rx = jiffies; 1749 if (ieee80211_is_data(hdr->frame_control) && 1750 !is_multicast_ether_addr(hdr->addr1)) 1751 sta->rx_stats.last_rate = 1752 sta_stats_encode_rate(status); 1753 } 1754 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1755 sta->rx_stats.last_rx = jiffies; 1756 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1757 /* 1758 * Mesh beacons will update last_rx when if they are found to 1759 * match the current local configuration when processed. 1760 */ 1761 sta->rx_stats.last_rx = jiffies; 1762 if (ieee80211_is_data(hdr->frame_control)) 1763 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1764 } 1765 1766 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1767 ieee80211_sta_rx_notify(rx->sdata, hdr); 1768 1769 sta->rx_stats.fragments++; 1770 1771 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1772 sta->rx_stats.bytes += rx->skb->len; 1773 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1774 1775 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1776 sta->rx_stats.last_signal = status->signal; 1777 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1778 } 1779 1780 if (status->chains) { 1781 sta->rx_stats.chains = status->chains; 1782 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1783 int signal = status->chain_signal[i]; 1784 1785 if (!(status->chains & BIT(i))) 1786 continue; 1787 1788 sta->rx_stats.chain_signal_last[i] = signal; 1789 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1790 -signal); 1791 } 1792 } 1793 1794 /* 1795 * Change STA power saving mode only at the end of a frame 1796 * exchange sequence, and only for a data or management 1797 * frame as specified in IEEE 802.11-2016 11.2.3.2 1798 */ 1799 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1800 !ieee80211_has_morefrags(hdr->frame_control) && 1801 !is_multicast_ether_addr(hdr->addr1) && 1802 (ieee80211_is_mgmt(hdr->frame_control) || 1803 ieee80211_is_data(hdr->frame_control)) && 1804 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1805 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1806 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1807 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1808 if (!ieee80211_has_pm(hdr->frame_control)) 1809 sta_ps_end(sta); 1810 } else { 1811 if (ieee80211_has_pm(hdr->frame_control)) 1812 sta_ps_start(sta); 1813 } 1814 } 1815 1816 /* mesh power save support */ 1817 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1818 ieee80211_mps_rx_h_sta_process(sta, hdr); 1819 1820 /* 1821 * Drop (qos-)data::nullfunc frames silently, since they 1822 * are used only to control station power saving mode. 1823 */ 1824 if (ieee80211_is_nullfunc(hdr->frame_control) || 1825 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1826 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1827 1828 /* 1829 * If we receive a 4-addr nullfunc frame from a STA 1830 * that was not moved to a 4-addr STA vlan yet send 1831 * the event to userspace and for older hostapd drop 1832 * the frame to the monitor interface. 1833 */ 1834 if (ieee80211_has_a4(hdr->frame_control) && 1835 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1836 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1837 !rx->sdata->u.vlan.sta))) { 1838 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1839 cfg80211_rx_unexpected_4addr_frame( 1840 rx->sdata->dev, sta->sta.addr, 1841 GFP_ATOMIC); 1842 return RX_DROP_MONITOR; 1843 } 1844 /* 1845 * Update counter and free packet here to avoid 1846 * counting this as a dropped packed. 1847 */ 1848 sta->rx_stats.packets++; 1849 dev_kfree_skb(rx->skb); 1850 return RX_QUEUED; 1851 } 1852 1853 return RX_CONTINUE; 1854 } /* ieee80211_rx_h_sta_process */ 1855 1856 static ieee80211_rx_result debug_noinline 1857 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1858 { 1859 struct sk_buff *skb = rx->skb; 1860 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1861 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1862 int keyidx; 1863 int hdrlen; 1864 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1865 struct ieee80211_key *sta_ptk = NULL; 1866 int mmie_keyidx = -1; 1867 __le16 fc; 1868 const struct ieee80211_cipher_scheme *cs = NULL; 1869 1870 /* 1871 * Key selection 101 1872 * 1873 * There are four types of keys: 1874 * - GTK (group keys) 1875 * - IGTK (group keys for management frames) 1876 * - PTK (pairwise keys) 1877 * - STK (station-to-station pairwise keys) 1878 * 1879 * When selecting a key, we have to distinguish between multicast 1880 * (including broadcast) and unicast frames, the latter can only 1881 * use PTKs and STKs while the former always use GTKs and IGTKs. 1882 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1883 * unicast frames can also use key indices like GTKs. Hence, if we 1884 * don't have a PTK/STK we check the key index for a WEP key. 1885 * 1886 * Note that in a regular BSS, multicast frames are sent by the 1887 * AP only, associated stations unicast the frame to the AP first 1888 * which then multicasts it on their behalf. 1889 * 1890 * There is also a slight problem in IBSS mode: GTKs are negotiated 1891 * with each station, that is something we don't currently handle. 1892 * The spec seems to expect that one negotiates the same key with 1893 * every station but there's no such requirement; VLANs could be 1894 * possible. 1895 */ 1896 1897 /* start without a key */ 1898 rx->key = NULL; 1899 fc = hdr->frame_control; 1900 1901 if (rx->sta) { 1902 int keyid = rx->sta->ptk_idx; 1903 1904 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1905 cs = rx->sta->cipher_scheme; 1906 keyid = ieee80211_get_cs_keyid(cs, rx->skb); 1907 if (unlikely(keyid < 0)) 1908 return RX_DROP_UNUSABLE; 1909 } 1910 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1911 } 1912 1913 if (!ieee80211_has_protected(fc)) 1914 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1915 1916 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1917 rx->key = sta_ptk; 1918 if ((status->flag & RX_FLAG_DECRYPTED) && 1919 (status->flag & RX_FLAG_IV_STRIPPED)) 1920 return RX_CONTINUE; 1921 /* Skip decryption if the frame is not protected. */ 1922 if (!ieee80211_has_protected(fc)) 1923 return RX_CONTINUE; 1924 } else if (mmie_keyidx >= 0) { 1925 /* Broadcast/multicast robust management frame / BIP */ 1926 if ((status->flag & RX_FLAG_DECRYPTED) && 1927 (status->flag & RX_FLAG_IV_STRIPPED)) 1928 return RX_CONTINUE; 1929 1930 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1931 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1932 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1933 if (rx->sta) { 1934 if (ieee80211_is_group_privacy_action(skb) && 1935 test_sta_flag(rx->sta, WLAN_STA_MFP)) 1936 return RX_DROP_MONITOR; 1937 1938 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1939 } 1940 if (!rx->key) 1941 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1942 } else if (!ieee80211_has_protected(fc)) { 1943 /* 1944 * The frame was not protected, so skip decryption. However, we 1945 * need to set rx->key if there is a key that could have been 1946 * used so that the frame may be dropped if encryption would 1947 * have been expected. 1948 */ 1949 struct ieee80211_key *key = NULL; 1950 struct ieee80211_sub_if_data *sdata = rx->sdata; 1951 int i; 1952 1953 if (ieee80211_is_mgmt(fc) && 1954 is_multicast_ether_addr(hdr->addr1) && 1955 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1956 rx->key = key; 1957 else { 1958 if (rx->sta) { 1959 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1960 key = rcu_dereference(rx->sta->gtk[i]); 1961 if (key) 1962 break; 1963 } 1964 } 1965 if (!key) { 1966 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1967 key = rcu_dereference(sdata->keys[i]); 1968 if (key) 1969 break; 1970 } 1971 } 1972 if (key) 1973 rx->key = key; 1974 } 1975 return RX_CONTINUE; 1976 } else { 1977 u8 keyid; 1978 1979 /* 1980 * The device doesn't give us the IV so we won't be 1981 * able to look up the key. That's ok though, we 1982 * don't need to decrypt the frame, we just won't 1983 * be able to keep statistics accurate. 1984 * Except for key threshold notifications, should 1985 * we somehow allow the driver to tell us which key 1986 * the hardware used if this flag is set? 1987 */ 1988 if ((status->flag & RX_FLAG_DECRYPTED) && 1989 (status->flag & RX_FLAG_IV_STRIPPED)) 1990 return RX_CONTINUE; 1991 1992 hdrlen = ieee80211_hdrlen(fc); 1993 1994 if (cs) { 1995 keyidx = ieee80211_get_cs_keyid(cs, rx->skb); 1996 1997 if (unlikely(keyidx < 0)) 1998 return RX_DROP_UNUSABLE; 1999 } else { 2000 if (rx->skb->len < 8 + hdrlen) 2001 return RX_DROP_UNUSABLE; /* TODO: count this? */ 2002 /* 2003 * no need to call ieee80211_wep_get_keyidx, 2004 * it verifies a bunch of things we've done already 2005 */ 2006 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 2007 keyidx = keyid >> 6; 2008 } 2009 2010 /* check per-station GTK first, if multicast packet */ 2011 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 2012 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 2013 2014 /* if not found, try default key */ 2015 if (!rx->key) { 2016 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2017 2018 /* 2019 * RSNA-protected unicast frames should always be 2020 * sent with pairwise or station-to-station keys, 2021 * but for WEP we allow using a key index as well. 2022 */ 2023 if (rx->key && 2024 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2025 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2026 !is_multicast_ether_addr(hdr->addr1)) 2027 rx->key = NULL; 2028 } 2029 } 2030 2031 if (rx->key) { 2032 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2033 return RX_DROP_MONITOR; 2034 2035 /* TODO: add threshold stuff again */ 2036 } else { 2037 return RX_DROP_MONITOR; 2038 } 2039 2040 switch (rx->key->conf.cipher) { 2041 case WLAN_CIPHER_SUITE_WEP40: 2042 case WLAN_CIPHER_SUITE_WEP104: 2043 result = ieee80211_crypto_wep_decrypt(rx); 2044 break; 2045 case WLAN_CIPHER_SUITE_TKIP: 2046 result = ieee80211_crypto_tkip_decrypt(rx); 2047 break; 2048 case WLAN_CIPHER_SUITE_CCMP: 2049 result = ieee80211_crypto_ccmp_decrypt( 2050 rx, IEEE80211_CCMP_MIC_LEN); 2051 break; 2052 case WLAN_CIPHER_SUITE_CCMP_256: 2053 result = ieee80211_crypto_ccmp_decrypt( 2054 rx, IEEE80211_CCMP_256_MIC_LEN); 2055 break; 2056 case WLAN_CIPHER_SUITE_AES_CMAC: 2057 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2058 break; 2059 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2060 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2061 break; 2062 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2063 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2064 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2065 break; 2066 case WLAN_CIPHER_SUITE_GCMP: 2067 case WLAN_CIPHER_SUITE_GCMP_256: 2068 result = ieee80211_crypto_gcmp_decrypt(rx); 2069 break; 2070 default: 2071 result = ieee80211_crypto_hw_decrypt(rx); 2072 } 2073 2074 /* the hdr variable is invalid after the decrypt handlers */ 2075 2076 /* either the frame has been decrypted or will be dropped */ 2077 status->flag |= RX_FLAG_DECRYPTED; 2078 2079 return result; 2080 } 2081 2082 static inline struct ieee80211_fragment_entry * 2083 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 2084 unsigned int frag, unsigned int seq, int rx_queue, 2085 struct sk_buff **skb) 2086 { 2087 struct ieee80211_fragment_entry *entry; 2088 2089 entry = &sdata->fragments[sdata->fragment_next++]; 2090 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 2091 sdata->fragment_next = 0; 2092 2093 if (!skb_queue_empty(&entry->skb_list)) 2094 __skb_queue_purge(&entry->skb_list); 2095 2096 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2097 *skb = NULL; 2098 entry->first_frag_time = jiffies; 2099 entry->seq = seq; 2100 entry->rx_queue = rx_queue; 2101 entry->last_frag = frag; 2102 entry->check_sequential_pn = false; 2103 entry->extra_len = 0; 2104 2105 return entry; 2106 } 2107 2108 static inline struct ieee80211_fragment_entry * 2109 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 2110 unsigned int frag, unsigned int seq, 2111 int rx_queue, struct ieee80211_hdr *hdr) 2112 { 2113 struct ieee80211_fragment_entry *entry; 2114 int i, idx; 2115 2116 idx = sdata->fragment_next; 2117 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2118 struct ieee80211_hdr *f_hdr; 2119 struct sk_buff *f_skb; 2120 2121 idx--; 2122 if (idx < 0) 2123 idx = IEEE80211_FRAGMENT_MAX - 1; 2124 2125 entry = &sdata->fragments[idx]; 2126 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2127 entry->rx_queue != rx_queue || 2128 entry->last_frag + 1 != frag) 2129 continue; 2130 2131 f_skb = __skb_peek(&entry->skb_list); 2132 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2133 2134 /* 2135 * Check ftype and addresses are equal, else check next fragment 2136 */ 2137 if (((hdr->frame_control ^ f_hdr->frame_control) & 2138 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2139 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2140 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2141 continue; 2142 2143 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2144 __skb_queue_purge(&entry->skb_list); 2145 continue; 2146 } 2147 return entry; 2148 } 2149 2150 return NULL; 2151 } 2152 2153 static ieee80211_rx_result debug_noinline 2154 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2155 { 2156 struct ieee80211_hdr *hdr; 2157 u16 sc; 2158 __le16 fc; 2159 unsigned int frag, seq; 2160 struct ieee80211_fragment_entry *entry; 2161 struct sk_buff *skb; 2162 2163 hdr = (struct ieee80211_hdr *)rx->skb->data; 2164 fc = hdr->frame_control; 2165 2166 if (ieee80211_is_ctl(fc)) 2167 return RX_CONTINUE; 2168 2169 sc = le16_to_cpu(hdr->seq_ctrl); 2170 frag = sc & IEEE80211_SCTL_FRAG; 2171 2172 if (is_multicast_ether_addr(hdr->addr1)) { 2173 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 2174 goto out_no_led; 2175 } 2176 2177 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2178 goto out; 2179 2180 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2181 2182 if (skb_linearize(rx->skb)) 2183 return RX_DROP_UNUSABLE; 2184 2185 /* 2186 * skb_linearize() might change the skb->data and 2187 * previously cached variables (in this case, hdr) need to 2188 * be refreshed with the new data. 2189 */ 2190 hdr = (struct ieee80211_hdr *)rx->skb->data; 2191 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2192 2193 if (frag == 0) { 2194 /* This is the first fragment of a new frame. */ 2195 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 2196 rx->seqno_idx, &(rx->skb)); 2197 if (rx->key && 2198 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2199 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2200 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2201 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2202 ieee80211_has_protected(fc)) { 2203 int queue = rx->security_idx; 2204 2205 /* Store CCMP/GCMP PN so that we can verify that the 2206 * next fragment has a sequential PN value. 2207 */ 2208 entry->check_sequential_pn = true; 2209 memcpy(entry->last_pn, 2210 rx->key->u.ccmp.rx_pn[queue], 2211 IEEE80211_CCMP_PN_LEN); 2212 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2213 u.ccmp.rx_pn) != 2214 offsetof(struct ieee80211_key, 2215 u.gcmp.rx_pn)); 2216 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2217 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2218 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2219 IEEE80211_GCMP_PN_LEN); 2220 } 2221 return RX_QUEUED; 2222 } 2223 2224 /* This is a fragment for a frame that should already be pending in 2225 * fragment cache. Add this fragment to the end of the pending entry. 2226 */ 2227 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 2228 rx->seqno_idx, hdr); 2229 if (!entry) { 2230 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2231 return RX_DROP_MONITOR; 2232 } 2233 2234 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2235 * MPDU PN values are not incrementing in steps of 1." 2236 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2237 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2238 */ 2239 if (entry->check_sequential_pn) { 2240 int i; 2241 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2242 int queue; 2243 2244 if (!rx->key || 2245 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 2246 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 2247 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 2248 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 2249 return RX_DROP_UNUSABLE; 2250 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2251 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2252 pn[i]++; 2253 if (pn[i]) 2254 break; 2255 } 2256 queue = rx->security_idx; 2257 rpn = rx->key->u.ccmp.rx_pn[queue]; 2258 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2259 return RX_DROP_UNUSABLE; 2260 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2261 } 2262 2263 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2264 __skb_queue_tail(&entry->skb_list, rx->skb); 2265 entry->last_frag = frag; 2266 entry->extra_len += rx->skb->len; 2267 if (ieee80211_has_morefrags(fc)) { 2268 rx->skb = NULL; 2269 return RX_QUEUED; 2270 } 2271 2272 rx->skb = __skb_dequeue(&entry->skb_list); 2273 if (skb_tailroom(rx->skb) < entry->extra_len) { 2274 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2275 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2276 GFP_ATOMIC))) { 2277 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2278 __skb_queue_purge(&entry->skb_list); 2279 return RX_DROP_UNUSABLE; 2280 } 2281 } 2282 while ((skb = __skb_dequeue(&entry->skb_list))) { 2283 skb_put_data(rx->skb, skb->data, skb->len); 2284 dev_kfree_skb(skb); 2285 } 2286 2287 out: 2288 ieee80211_led_rx(rx->local); 2289 out_no_led: 2290 if (rx->sta) 2291 rx->sta->rx_stats.packets++; 2292 return RX_CONTINUE; 2293 } 2294 2295 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2296 { 2297 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2298 return -EACCES; 2299 2300 return 0; 2301 } 2302 2303 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2304 { 2305 struct sk_buff *skb = rx->skb; 2306 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2307 2308 /* 2309 * Pass through unencrypted frames if the hardware has 2310 * decrypted them already. 2311 */ 2312 if (status->flag & RX_FLAG_DECRYPTED) 2313 return 0; 2314 2315 /* Drop unencrypted frames if key is set. */ 2316 if (unlikely(!ieee80211_has_protected(fc) && 2317 !ieee80211_is_nullfunc(fc) && 2318 ieee80211_is_data(fc) && rx->key)) 2319 return -EACCES; 2320 2321 return 0; 2322 } 2323 2324 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2325 { 2326 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2327 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2328 __le16 fc = hdr->frame_control; 2329 2330 /* 2331 * Pass through unencrypted frames if the hardware has 2332 * decrypted them already. 2333 */ 2334 if (status->flag & RX_FLAG_DECRYPTED) 2335 return 0; 2336 2337 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2338 if (unlikely(!ieee80211_has_protected(fc) && 2339 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2340 rx->key)) { 2341 if (ieee80211_is_deauth(fc) || 2342 ieee80211_is_disassoc(fc)) 2343 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2344 rx->skb->data, 2345 rx->skb->len); 2346 return -EACCES; 2347 } 2348 /* BIP does not use Protected field, so need to check MMIE */ 2349 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2350 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2351 if (ieee80211_is_deauth(fc) || 2352 ieee80211_is_disassoc(fc)) 2353 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2354 rx->skb->data, 2355 rx->skb->len); 2356 return -EACCES; 2357 } 2358 /* 2359 * When using MFP, Action frames are not allowed prior to 2360 * having configured keys. 2361 */ 2362 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2363 ieee80211_is_robust_mgmt_frame(rx->skb))) 2364 return -EACCES; 2365 } 2366 2367 return 0; 2368 } 2369 2370 static int 2371 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2372 { 2373 struct ieee80211_sub_if_data *sdata = rx->sdata; 2374 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2375 bool check_port_control = false; 2376 struct ethhdr *ehdr; 2377 int ret; 2378 2379 *port_control = false; 2380 if (ieee80211_has_a4(hdr->frame_control) && 2381 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2382 return -1; 2383 2384 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2385 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2386 2387 if (!sdata->u.mgd.use_4addr) 2388 return -1; 2389 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2390 check_port_control = true; 2391 } 2392 2393 if (is_multicast_ether_addr(hdr->addr1) && 2394 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2395 return -1; 2396 2397 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2398 if (ret < 0) 2399 return ret; 2400 2401 ehdr = (struct ethhdr *) rx->skb->data; 2402 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2403 *port_control = true; 2404 else if (check_port_control) 2405 return -1; 2406 2407 return 0; 2408 } 2409 2410 /* 2411 * requires that rx->skb is a frame with ethernet header 2412 */ 2413 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2414 { 2415 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2416 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2417 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2418 2419 /* 2420 * Allow EAPOL frames to us/the PAE group address regardless 2421 * of whether the frame was encrypted or not. 2422 */ 2423 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2424 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2425 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2426 return true; 2427 2428 if (ieee80211_802_1x_port_control(rx) || 2429 ieee80211_drop_unencrypted(rx, fc)) 2430 return false; 2431 2432 return true; 2433 } 2434 2435 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2436 struct ieee80211_rx_data *rx) 2437 { 2438 struct ieee80211_sub_if_data *sdata = rx->sdata; 2439 struct net_device *dev = sdata->dev; 2440 2441 if (unlikely((skb->protocol == sdata->control_port_protocol || 2442 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2443 sdata->control_port_over_nl80211)) { 2444 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2445 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2446 2447 cfg80211_rx_control_port(dev, skb, noencrypt); 2448 dev_kfree_skb(skb); 2449 } else { 2450 /* deliver to local stack */ 2451 if (rx->napi) 2452 napi_gro_receive(rx->napi, skb); 2453 else 2454 netif_receive_skb(skb); 2455 } 2456 } 2457 2458 /* 2459 * requires that rx->skb is a frame with ethernet header 2460 */ 2461 static void 2462 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2463 { 2464 struct ieee80211_sub_if_data *sdata = rx->sdata; 2465 struct net_device *dev = sdata->dev; 2466 struct sk_buff *skb, *xmit_skb; 2467 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2468 struct sta_info *dsta; 2469 2470 skb = rx->skb; 2471 xmit_skb = NULL; 2472 2473 ieee80211_rx_stats(dev, skb->len); 2474 2475 if (rx->sta) { 2476 /* The seqno index has the same property as needed 2477 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2478 * for non-QoS-data frames. Here we know it's a data 2479 * frame, so count MSDUs. 2480 */ 2481 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2482 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2483 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2484 } 2485 2486 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2487 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2488 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2489 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2490 if (is_multicast_ether_addr(ehdr->h_dest) && 2491 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2492 /* 2493 * send multicast frames both to higher layers in 2494 * local net stack and back to the wireless medium 2495 */ 2496 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2497 if (!xmit_skb) 2498 net_info_ratelimited("%s: failed to clone multicast frame\n", 2499 dev->name); 2500 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2501 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2502 dsta = sta_info_get(sdata, ehdr->h_dest); 2503 if (dsta) { 2504 /* 2505 * The destination station is associated to 2506 * this AP (in this VLAN), so send the frame 2507 * directly to it and do not pass it to local 2508 * net stack. 2509 */ 2510 xmit_skb = skb; 2511 skb = NULL; 2512 } 2513 } 2514 } 2515 2516 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2517 if (skb) { 2518 /* 'align' will only take the values 0 or 2 here since all 2519 * frames are required to be aligned to 2-byte boundaries 2520 * when being passed to mac80211; the code here works just 2521 * as well if that isn't true, but mac80211 assumes it can 2522 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2523 */ 2524 int align; 2525 2526 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2527 if (align) { 2528 if (WARN_ON(skb_headroom(skb) < 3)) { 2529 dev_kfree_skb(skb); 2530 skb = NULL; 2531 } else { 2532 u8 *data = skb->data; 2533 size_t len = skb_headlen(skb); 2534 skb->data -= align; 2535 memmove(skb->data, data, len); 2536 skb_set_tail_pointer(skb, len); 2537 } 2538 } 2539 } 2540 #endif 2541 2542 if (skb) { 2543 skb->protocol = eth_type_trans(skb, dev); 2544 memset(skb->cb, 0, sizeof(skb->cb)); 2545 2546 ieee80211_deliver_skb_to_local_stack(skb, rx); 2547 } 2548 2549 if (xmit_skb) { 2550 /* 2551 * Send to wireless media and increase priority by 256 to 2552 * keep the received priority instead of reclassifying 2553 * the frame (see cfg80211_classify8021d). 2554 */ 2555 xmit_skb->priority += 256; 2556 xmit_skb->protocol = htons(ETH_P_802_3); 2557 skb_reset_network_header(xmit_skb); 2558 skb_reset_mac_header(xmit_skb); 2559 dev_queue_xmit(xmit_skb); 2560 } 2561 } 2562 2563 static ieee80211_rx_result debug_noinline 2564 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2565 { 2566 struct net_device *dev = rx->sdata->dev; 2567 struct sk_buff *skb = rx->skb; 2568 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2569 __le16 fc = hdr->frame_control; 2570 struct sk_buff_head frame_list; 2571 struct ethhdr ethhdr; 2572 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2573 2574 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2575 check_da = NULL; 2576 check_sa = NULL; 2577 } else switch (rx->sdata->vif.type) { 2578 case NL80211_IFTYPE_AP: 2579 case NL80211_IFTYPE_AP_VLAN: 2580 check_da = NULL; 2581 break; 2582 case NL80211_IFTYPE_STATION: 2583 if (!rx->sta || 2584 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2585 check_sa = NULL; 2586 break; 2587 case NL80211_IFTYPE_MESH_POINT: 2588 check_sa = NULL; 2589 break; 2590 default: 2591 break; 2592 } 2593 2594 skb->dev = dev; 2595 __skb_queue_head_init(&frame_list); 2596 2597 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2598 rx->sdata->vif.addr, 2599 rx->sdata->vif.type, 2600 data_offset)) 2601 return RX_DROP_UNUSABLE; 2602 2603 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2604 rx->sdata->vif.type, 2605 rx->local->hw.extra_tx_headroom, 2606 check_da, check_sa); 2607 2608 while (!skb_queue_empty(&frame_list)) { 2609 rx->skb = __skb_dequeue(&frame_list); 2610 2611 if (!ieee80211_frame_allowed(rx, fc)) { 2612 dev_kfree_skb(rx->skb); 2613 continue; 2614 } 2615 2616 ieee80211_deliver_skb(rx); 2617 } 2618 2619 return RX_QUEUED; 2620 } 2621 2622 static ieee80211_rx_result debug_noinline 2623 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2624 { 2625 struct sk_buff *skb = rx->skb; 2626 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2627 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2628 __le16 fc = hdr->frame_control; 2629 2630 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2631 return RX_CONTINUE; 2632 2633 if (unlikely(!ieee80211_is_data(fc))) 2634 return RX_CONTINUE; 2635 2636 if (unlikely(!ieee80211_is_data_present(fc))) 2637 return RX_DROP_MONITOR; 2638 2639 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2640 switch (rx->sdata->vif.type) { 2641 case NL80211_IFTYPE_AP_VLAN: 2642 if (!rx->sdata->u.vlan.sta) 2643 return RX_DROP_UNUSABLE; 2644 break; 2645 case NL80211_IFTYPE_STATION: 2646 if (!rx->sdata->u.mgd.use_4addr) 2647 return RX_DROP_UNUSABLE; 2648 break; 2649 default: 2650 return RX_DROP_UNUSABLE; 2651 } 2652 } 2653 2654 if (is_multicast_ether_addr(hdr->addr1)) 2655 return RX_DROP_UNUSABLE; 2656 2657 return __ieee80211_rx_h_amsdu(rx, 0); 2658 } 2659 2660 #ifdef CONFIG_MAC80211_MESH 2661 static ieee80211_rx_result 2662 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2663 { 2664 struct ieee80211_hdr *fwd_hdr, *hdr; 2665 struct ieee80211_tx_info *info; 2666 struct ieee80211s_hdr *mesh_hdr; 2667 struct sk_buff *skb = rx->skb, *fwd_skb; 2668 struct ieee80211_local *local = rx->local; 2669 struct ieee80211_sub_if_data *sdata = rx->sdata; 2670 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2671 u16 ac, q, hdrlen; 2672 int tailroom = 0; 2673 2674 hdr = (struct ieee80211_hdr *) skb->data; 2675 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2676 2677 /* make sure fixed part of mesh header is there, also checks skb len */ 2678 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2679 return RX_DROP_MONITOR; 2680 2681 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2682 2683 /* make sure full mesh header is there, also checks skb len */ 2684 if (!pskb_may_pull(rx->skb, 2685 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2686 return RX_DROP_MONITOR; 2687 2688 /* reload pointers */ 2689 hdr = (struct ieee80211_hdr *) skb->data; 2690 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2691 2692 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2693 return RX_DROP_MONITOR; 2694 2695 /* frame is in RMC, don't forward */ 2696 if (ieee80211_is_data(hdr->frame_control) && 2697 is_multicast_ether_addr(hdr->addr1) && 2698 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2699 return RX_DROP_MONITOR; 2700 2701 if (!ieee80211_is_data(hdr->frame_control)) 2702 return RX_CONTINUE; 2703 2704 if (!mesh_hdr->ttl) 2705 return RX_DROP_MONITOR; 2706 2707 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2708 struct mesh_path *mppath; 2709 char *proxied_addr; 2710 char *mpp_addr; 2711 2712 if (is_multicast_ether_addr(hdr->addr1)) { 2713 mpp_addr = hdr->addr3; 2714 proxied_addr = mesh_hdr->eaddr1; 2715 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2716 MESH_FLAGS_AE_A5_A6) { 2717 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2718 mpp_addr = hdr->addr4; 2719 proxied_addr = mesh_hdr->eaddr2; 2720 } else { 2721 return RX_DROP_MONITOR; 2722 } 2723 2724 rcu_read_lock(); 2725 mppath = mpp_path_lookup(sdata, proxied_addr); 2726 if (!mppath) { 2727 mpp_path_add(sdata, proxied_addr, mpp_addr); 2728 } else { 2729 spin_lock_bh(&mppath->state_lock); 2730 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2731 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2732 mppath->exp_time = jiffies; 2733 spin_unlock_bh(&mppath->state_lock); 2734 } 2735 rcu_read_unlock(); 2736 } 2737 2738 /* Frame has reached destination. Don't forward */ 2739 if (!is_multicast_ether_addr(hdr->addr1) && 2740 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2741 return RX_CONTINUE; 2742 2743 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2744 q = sdata->vif.hw_queue[ac]; 2745 if (ieee80211_queue_stopped(&local->hw, q)) { 2746 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2747 return RX_DROP_MONITOR; 2748 } 2749 skb_set_queue_mapping(skb, q); 2750 2751 if (!--mesh_hdr->ttl) { 2752 if (!is_multicast_ether_addr(hdr->addr1)) 2753 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, 2754 dropped_frames_ttl); 2755 goto out; 2756 } 2757 2758 if (!ifmsh->mshcfg.dot11MeshForwarding) 2759 goto out; 2760 2761 if (sdata->crypto_tx_tailroom_needed_cnt) 2762 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2763 2764 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2765 sdata->encrypt_headroom, 2766 tailroom, GFP_ATOMIC); 2767 if (!fwd_skb) 2768 goto out; 2769 2770 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2771 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2772 info = IEEE80211_SKB_CB(fwd_skb); 2773 memset(info, 0, sizeof(*info)); 2774 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2775 info->control.vif = &rx->sdata->vif; 2776 info->control.jiffies = jiffies; 2777 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2778 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2779 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2780 /* update power mode indication when forwarding */ 2781 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2782 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2783 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2784 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2785 } else { 2786 /* unable to resolve next hop */ 2787 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2788 fwd_hdr->addr3, 0, 2789 WLAN_REASON_MESH_PATH_NOFORWARD, 2790 fwd_hdr->addr2); 2791 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2792 kfree_skb(fwd_skb); 2793 return RX_DROP_MONITOR; 2794 } 2795 2796 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2797 ieee80211_add_pending_skb(local, fwd_skb); 2798 out: 2799 if (is_multicast_ether_addr(hdr->addr1)) 2800 return RX_CONTINUE; 2801 return RX_DROP_MONITOR; 2802 } 2803 #endif 2804 2805 static ieee80211_rx_result debug_noinline 2806 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2807 { 2808 struct ieee80211_sub_if_data *sdata = rx->sdata; 2809 struct ieee80211_local *local = rx->local; 2810 struct net_device *dev = sdata->dev; 2811 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2812 __le16 fc = hdr->frame_control; 2813 bool port_control; 2814 int err; 2815 2816 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2817 return RX_CONTINUE; 2818 2819 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2820 return RX_DROP_MONITOR; 2821 2822 /* 2823 * Send unexpected-4addr-frame event to hostapd. For older versions, 2824 * also drop the frame to cooked monitor interfaces. 2825 */ 2826 if (ieee80211_has_a4(hdr->frame_control) && 2827 sdata->vif.type == NL80211_IFTYPE_AP) { 2828 if (rx->sta && 2829 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2830 cfg80211_rx_unexpected_4addr_frame( 2831 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2832 return RX_DROP_MONITOR; 2833 } 2834 2835 err = __ieee80211_data_to_8023(rx, &port_control); 2836 if (unlikely(err)) 2837 return RX_DROP_UNUSABLE; 2838 2839 if (!ieee80211_frame_allowed(rx, fc)) 2840 return RX_DROP_MONITOR; 2841 2842 /* directly handle TDLS channel switch requests/responses */ 2843 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2844 cpu_to_be16(ETH_P_TDLS))) { 2845 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2846 2847 if (pskb_may_pull(rx->skb, 2848 offsetof(struct ieee80211_tdls_data, u)) && 2849 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2850 tf->category == WLAN_CATEGORY_TDLS && 2851 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2852 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2853 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2854 schedule_work(&local->tdls_chsw_work); 2855 if (rx->sta) 2856 rx->sta->rx_stats.packets++; 2857 2858 return RX_QUEUED; 2859 } 2860 } 2861 2862 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2863 unlikely(port_control) && sdata->bss) { 2864 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2865 u.ap); 2866 dev = sdata->dev; 2867 rx->sdata = sdata; 2868 } 2869 2870 rx->skb->dev = dev; 2871 2872 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 2873 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2874 !is_multicast_ether_addr( 2875 ((struct ethhdr *)rx->skb->data)->h_dest) && 2876 (!local->scanning && 2877 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 2878 mod_timer(&local->dynamic_ps_timer, jiffies + 2879 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2880 2881 ieee80211_deliver_skb(rx); 2882 2883 return RX_QUEUED; 2884 } 2885 2886 static ieee80211_rx_result debug_noinline 2887 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2888 { 2889 struct sk_buff *skb = rx->skb; 2890 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2891 struct tid_ampdu_rx *tid_agg_rx; 2892 u16 start_seq_num; 2893 u16 tid; 2894 2895 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2896 return RX_CONTINUE; 2897 2898 if (ieee80211_is_back_req(bar->frame_control)) { 2899 struct { 2900 __le16 control, start_seq_num; 2901 } __packed bar_data; 2902 struct ieee80211_event event = { 2903 .type = BAR_RX_EVENT, 2904 }; 2905 2906 if (!rx->sta) 2907 return RX_DROP_MONITOR; 2908 2909 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2910 &bar_data, sizeof(bar_data))) 2911 return RX_DROP_MONITOR; 2912 2913 tid = le16_to_cpu(bar_data.control) >> 12; 2914 2915 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 2916 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 2917 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 2918 WLAN_BACK_RECIPIENT, 2919 WLAN_REASON_QSTA_REQUIRE_SETUP); 2920 2921 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2922 if (!tid_agg_rx) 2923 return RX_DROP_MONITOR; 2924 2925 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2926 event.u.ba.tid = tid; 2927 event.u.ba.ssn = start_seq_num; 2928 event.u.ba.sta = &rx->sta->sta; 2929 2930 /* reset session timer */ 2931 if (tid_agg_rx->timeout) 2932 mod_timer(&tid_agg_rx->session_timer, 2933 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2934 2935 spin_lock(&tid_agg_rx->reorder_lock); 2936 /* release stored frames up to start of BAR */ 2937 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2938 start_seq_num, frames); 2939 spin_unlock(&tid_agg_rx->reorder_lock); 2940 2941 drv_event_callback(rx->local, rx->sdata, &event); 2942 2943 kfree_skb(skb); 2944 return RX_QUEUED; 2945 } 2946 2947 /* 2948 * After this point, we only want management frames, 2949 * so we can drop all remaining control frames to 2950 * cooked monitor interfaces. 2951 */ 2952 return RX_DROP_MONITOR; 2953 } 2954 2955 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2956 struct ieee80211_mgmt *mgmt, 2957 size_t len) 2958 { 2959 struct ieee80211_local *local = sdata->local; 2960 struct sk_buff *skb; 2961 struct ieee80211_mgmt *resp; 2962 2963 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2964 /* Not to own unicast address */ 2965 return; 2966 } 2967 2968 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2969 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2970 /* Not from the current AP or not associated yet. */ 2971 return; 2972 } 2973 2974 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2975 /* Too short SA Query request frame */ 2976 return; 2977 } 2978 2979 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2980 if (skb == NULL) 2981 return; 2982 2983 skb_reserve(skb, local->hw.extra_tx_headroom); 2984 resp = skb_put_zero(skb, 24); 2985 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2986 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2987 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2988 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2989 IEEE80211_STYPE_ACTION); 2990 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2991 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2992 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2993 memcpy(resp->u.action.u.sa_query.trans_id, 2994 mgmt->u.action.u.sa_query.trans_id, 2995 WLAN_SA_QUERY_TR_ID_LEN); 2996 2997 ieee80211_tx_skb(sdata, skb); 2998 } 2999 3000 static ieee80211_rx_result debug_noinline 3001 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3002 { 3003 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3004 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3005 3006 /* 3007 * From here on, look only at management frames. 3008 * Data and control frames are already handled, 3009 * and unknown (reserved) frames are useless. 3010 */ 3011 if (rx->skb->len < 24) 3012 return RX_DROP_MONITOR; 3013 3014 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3015 return RX_DROP_MONITOR; 3016 3017 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3018 ieee80211_is_beacon(mgmt->frame_control) && 3019 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3020 int sig = 0; 3021 3022 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3023 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3024 sig = status->signal; 3025 3026 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 3027 rx->skb->data, rx->skb->len, 3028 status->freq, sig); 3029 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3030 } 3031 3032 if (ieee80211_drop_unencrypted_mgmt(rx)) 3033 return RX_DROP_UNUSABLE; 3034 3035 return RX_CONTINUE; 3036 } 3037 3038 static ieee80211_rx_result debug_noinline 3039 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3040 { 3041 struct ieee80211_local *local = rx->local; 3042 struct ieee80211_sub_if_data *sdata = rx->sdata; 3043 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3044 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3045 int len = rx->skb->len; 3046 3047 if (!ieee80211_is_action(mgmt->frame_control)) 3048 return RX_CONTINUE; 3049 3050 /* drop too small frames */ 3051 if (len < IEEE80211_MIN_ACTION_SIZE) 3052 return RX_DROP_UNUSABLE; 3053 3054 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3055 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3056 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3057 return RX_DROP_UNUSABLE; 3058 3059 switch (mgmt->u.action.category) { 3060 case WLAN_CATEGORY_HT: 3061 /* reject HT action frames from stations not supporting HT */ 3062 if (!rx->sta->sta.ht_cap.ht_supported) 3063 goto invalid; 3064 3065 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3066 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3067 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3068 sdata->vif.type != NL80211_IFTYPE_AP && 3069 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3070 break; 3071 3072 /* verify action & smps_control/chanwidth are present */ 3073 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3074 goto invalid; 3075 3076 switch (mgmt->u.action.u.ht_smps.action) { 3077 case WLAN_HT_ACTION_SMPS: { 3078 struct ieee80211_supported_band *sband; 3079 enum ieee80211_smps_mode smps_mode; 3080 struct sta_opmode_info sta_opmode = {}; 3081 3082 /* convert to HT capability */ 3083 switch (mgmt->u.action.u.ht_smps.smps_control) { 3084 case WLAN_HT_SMPS_CONTROL_DISABLED: 3085 smps_mode = IEEE80211_SMPS_OFF; 3086 break; 3087 case WLAN_HT_SMPS_CONTROL_STATIC: 3088 smps_mode = IEEE80211_SMPS_STATIC; 3089 break; 3090 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3091 smps_mode = IEEE80211_SMPS_DYNAMIC; 3092 break; 3093 default: 3094 goto invalid; 3095 } 3096 3097 /* if no change do nothing */ 3098 if (rx->sta->sta.smps_mode == smps_mode) 3099 goto handled; 3100 rx->sta->sta.smps_mode = smps_mode; 3101 sta_opmode.smps_mode = 3102 ieee80211_smps_mode_to_smps_mode(smps_mode); 3103 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3104 3105 sband = rx->local->hw.wiphy->bands[status->band]; 3106 3107 rate_control_rate_update(local, sband, rx->sta, 3108 IEEE80211_RC_SMPS_CHANGED); 3109 cfg80211_sta_opmode_change_notify(sdata->dev, 3110 rx->sta->addr, 3111 &sta_opmode, 3112 GFP_ATOMIC); 3113 goto handled; 3114 } 3115 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3116 struct ieee80211_supported_band *sband; 3117 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3118 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3119 struct sta_opmode_info sta_opmode = {}; 3120 3121 /* If it doesn't support 40 MHz it can't change ... */ 3122 if (!(rx->sta->sta.ht_cap.cap & 3123 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3124 goto handled; 3125 3126 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3127 max_bw = IEEE80211_STA_RX_BW_20; 3128 else 3129 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3130 3131 /* set cur_max_bandwidth and recalc sta bw */ 3132 rx->sta->cur_max_bandwidth = max_bw; 3133 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3134 3135 if (rx->sta->sta.bandwidth == new_bw) 3136 goto handled; 3137 3138 rx->sta->sta.bandwidth = new_bw; 3139 sband = rx->local->hw.wiphy->bands[status->band]; 3140 sta_opmode.bw = 3141 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3142 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3143 3144 rate_control_rate_update(local, sband, rx->sta, 3145 IEEE80211_RC_BW_CHANGED); 3146 cfg80211_sta_opmode_change_notify(sdata->dev, 3147 rx->sta->addr, 3148 &sta_opmode, 3149 GFP_ATOMIC); 3150 goto handled; 3151 } 3152 default: 3153 goto invalid; 3154 } 3155 3156 break; 3157 case WLAN_CATEGORY_PUBLIC: 3158 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3159 goto invalid; 3160 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3161 break; 3162 if (!rx->sta) 3163 break; 3164 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3165 break; 3166 if (mgmt->u.action.u.ext_chan_switch.action_code != 3167 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3168 break; 3169 if (len < offsetof(struct ieee80211_mgmt, 3170 u.action.u.ext_chan_switch.variable)) 3171 goto invalid; 3172 goto queue; 3173 case WLAN_CATEGORY_VHT: 3174 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3175 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3176 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3177 sdata->vif.type != NL80211_IFTYPE_AP && 3178 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3179 break; 3180 3181 /* verify action code is present */ 3182 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3183 goto invalid; 3184 3185 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3186 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3187 /* verify opmode is present */ 3188 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3189 goto invalid; 3190 goto queue; 3191 } 3192 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3193 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3194 goto invalid; 3195 goto queue; 3196 } 3197 default: 3198 break; 3199 } 3200 break; 3201 case WLAN_CATEGORY_BACK: 3202 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3203 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3204 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3205 sdata->vif.type != NL80211_IFTYPE_AP && 3206 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3207 break; 3208 3209 /* verify action_code is present */ 3210 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3211 break; 3212 3213 switch (mgmt->u.action.u.addba_req.action_code) { 3214 case WLAN_ACTION_ADDBA_REQ: 3215 if (len < (IEEE80211_MIN_ACTION_SIZE + 3216 sizeof(mgmt->u.action.u.addba_req))) 3217 goto invalid; 3218 break; 3219 case WLAN_ACTION_ADDBA_RESP: 3220 if (len < (IEEE80211_MIN_ACTION_SIZE + 3221 sizeof(mgmt->u.action.u.addba_resp))) 3222 goto invalid; 3223 break; 3224 case WLAN_ACTION_DELBA: 3225 if (len < (IEEE80211_MIN_ACTION_SIZE + 3226 sizeof(mgmt->u.action.u.delba))) 3227 goto invalid; 3228 break; 3229 default: 3230 goto invalid; 3231 } 3232 3233 goto queue; 3234 case WLAN_CATEGORY_SPECTRUM_MGMT: 3235 /* verify action_code is present */ 3236 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3237 break; 3238 3239 switch (mgmt->u.action.u.measurement.action_code) { 3240 case WLAN_ACTION_SPCT_MSR_REQ: 3241 if (status->band != NL80211_BAND_5GHZ) 3242 break; 3243 3244 if (len < (IEEE80211_MIN_ACTION_SIZE + 3245 sizeof(mgmt->u.action.u.measurement))) 3246 break; 3247 3248 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3249 break; 3250 3251 ieee80211_process_measurement_req(sdata, mgmt, len); 3252 goto handled; 3253 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3254 u8 *bssid; 3255 if (len < (IEEE80211_MIN_ACTION_SIZE + 3256 sizeof(mgmt->u.action.u.chan_switch))) 3257 break; 3258 3259 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3260 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3261 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3262 break; 3263 3264 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3265 bssid = sdata->u.mgd.bssid; 3266 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3267 bssid = sdata->u.ibss.bssid; 3268 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3269 bssid = mgmt->sa; 3270 else 3271 break; 3272 3273 if (!ether_addr_equal(mgmt->bssid, bssid)) 3274 break; 3275 3276 goto queue; 3277 } 3278 } 3279 break; 3280 case WLAN_CATEGORY_SA_QUERY: 3281 if (len < (IEEE80211_MIN_ACTION_SIZE + 3282 sizeof(mgmt->u.action.u.sa_query))) 3283 break; 3284 3285 switch (mgmt->u.action.u.sa_query.action) { 3286 case WLAN_ACTION_SA_QUERY_REQUEST: 3287 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3288 break; 3289 ieee80211_process_sa_query_req(sdata, mgmt, len); 3290 goto handled; 3291 } 3292 break; 3293 case WLAN_CATEGORY_SELF_PROTECTED: 3294 if (len < (IEEE80211_MIN_ACTION_SIZE + 3295 sizeof(mgmt->u.action.u.self_prot.action_code))) 3296 break; 3297 3298 switch (mgmt->u.action.u.self_prot.action_code) { 3299 case WLAN_SP_MESH_PEERING_OPEN: 3300 case WLAN_SP_MESH_PEERING_CLOSE: 3301 case WLAN_SP_MESH_PEERING_CONFIRM: 3302 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3303 goto invalid; 3304 if (sdata->u.mesh.user_mpm) 3305 /* userspace handles this frame */ 3306 break; 3307 goto queue; 3308 case WLAN_SP_MGK_INFORM: 3309 case WLAN_SP_MGK_ACK: 3310 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3311 goto invalid; 3312 break; 3313 } 3314 break; 3315 case WLAN_CATEGORY_MESH_ACTION: 3316 if (len < (IEEE80211_MIN_ACTION_SIZE + 3317 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3318 break; 3319 3320 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3321 break; 3322 if (mesh_action_is_path_sel(mgmt) && 3323 !mesh_path_sel_is_hwmp(sdata)) 3324 break; 3325 goto queue; 3326 } 3327 3328 return RX_CONTINUE; 3329 3330 invalid: 3331 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3332 /* will return in the next handlers */ 3333 return RX_CONTINUE; 3334 3335 handled: 3336 if (rx->sta) 3337 rx->sta->rx_stats.packets++; 3338 dev_kfree_skb(rx->skb); 3339 return RX_QUEUED; 3340 3341 queue: 3342 skb_queue_tail(&sdata->skb_queue, rx->skb); 3343 ieee80211_queue_work(&local->hw, &sdata->work); 3344 if (rx->sta) 3345 rx->sta->rx_stats.packets++; 3346 return RX_QUEUED; 3347 } 3348 3349 static ieee80211_rx_result debug_noinline 3350 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3351 { 3352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3353 int sig = 0; 3354 3355 /* skip known-bad action frames and return them in the next handler */ 3356 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3357 return RX_CONTINUE; 3358 3359 /* 3360 * Getting here means the kernel doesn't know how to handle 3361 * it, but maybe userspace does ... include returned frames 3362 * so userspace can register for those to know whether ones 3363 * it transmitted were processed or returned. 3364 */ 3365 3366 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3367 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3368 sig = status->signal; 3369 3370 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 3371 rx->skb->data, rx->skb->len, 0)) { 3372 if (rx->sta) 3373 rx->sta->rx_stats.packets++; 3374 dev_kfree_skb(rx->skb); 3375 return RX_QUEUED; 3376 } 3377 3378 return RX_CONTINUE; 3379 } 3380 3381 static ieee80211_rx_result debug_noinline 3382 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3383 { 3384 struct ieee80211_local *local = rx->local; 3385 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3386 struct sk_buff *nskb; 3387 struct ieee80211_sub_if_data *sdata = rx->sdata; 3388 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3389 3390 if (!ieee80211_is_action(mgmt->frame_control)) 3391 return RX_CONTINUE; 3392 3393 /* 3394 * For AP mode, hostapd is responsible for handling any action 3395 * frames that we didn't handle, including returning unknown 3396 * ones. For all other modes we will return them to the sender, 3397 * setting the 0x80 bit in the action category, as required by 3398 * 802.11-2012 9.24.4. 3399 * Newer versions of hostapd shall also use the management frame 3400 * registration mechanisms, but older ones still use cooked 3401 * monitor interfaces so push all frames there. 3402 */ 3403 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3404 (sdata->vif.type == NL80211_IFTYPE_AP || 3405 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3406 return RX_DROP_MONITOR; 3407 3408 if (is_multicast_ether_addr(mgmt->da)) 3409 return RX_DROP_MONITOR; 3410 3411 /* do not return rejected action frames */ 3412 if (mgmt->u.action.category & 0x80) 3413 return RX_DROP_UNUSABLE; 3414 3415 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3416 GFP_ATOMIC); 3417 if (nskb) { 3418 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3419 3420 nmgmt->u.action.category |= 0x80; 3421 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3422 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3423 3424 memset(nskb->cb, 0, sizeof(nskb->cb)); 3425 3426 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3427 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3428 3429 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3430 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3431 IEEE80211_TX_CTL_NO_CCK_RATE; 3432 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3433 info->hw_queue = 3434 local->hw.offchannel_tx_hw_queue; 3435 } 3436 3437 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3438 status->band, 0); 3439 } 3440 dev_kfree_skb(rx->skb); 3441 return RX_QUEUED; 3442 } 3443 3444 static ieee80211_rx_result debug_noinline 3445 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3446 { 3447 struct ieee80211_sub_if_data *sdata = rx->sdata; 3448 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3449 __le16 stype; 3450 3451 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3452 3453 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3454 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3455 sdata->vif.type != NL80211_IFTYPE_OCB && 3456 sdata->vif.type != NL80211_IFTYPE_STATION) 3457 return RX_DROP_MONITOR; 3458 3459 switch (stype) { 3460 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3461 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3462 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3463 /* process for all: mesh, mlme, ibss */ 3464 break; 3465 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3466 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3467 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3468 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3469 if (is_multicast_ether_addr(mgmt->da) && 3470 !is_broadcast_ether_addr(mgmt->da)) 3471 return RX_DROP_MONITOR; 3472 3473 /* process only for station */ 3474 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3475 return RX_DROP_MONITOR; 3476 break; 3477 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3478 /* process only for ibss and mesh */ 3479 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3480 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3481 return RX_DROP_MONITOR; 3482 break; 3483 default: 3484 return RX_DROP_MONITOR; 3485 } 3486 3487 /* queue up frame and kick off work to process it */ 3488 skb_queue_tail(&sdata->skb_queue, rx->skb); 3489 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3490 if (rx->sta) 3491 rx->sta->rx_stats.packets++; 3492 3493 return RX_QUEUED; 3494 } 3495 3496 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3497 struct ieee80211_rate *rate) 3498 { 3499 struct ieee80211_sub_if_data *sdata; 3500 struct ieee80211_local *local = rx->local; 3501 struct sk_buff *skb = rx->skb, *skb2; 3502 struct net_device *prev_dev = NULL; 3503 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3504 int needed_headroom; 3505 3506 /* 3507 * If cooked monitor has been processed already, then 3508 * don't do it again. If not, set the flag. 3509 */ 3510 if (rx->flags & IEEE80211_RX_CMNTR) 3511 goto out_free_skb; 3512 rx->flags |= IEEE80211_RX_CMNTR; 3513 3514 /* If there are no cooked monitor interfaces, just free the SKB */ 3515 if (!local->cooked_mntrs) 3516 goto out_free_skb; 3517 3518 /* vendor data is long removed here */ 3519 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3520 /* room for the radiotap header based on driver features */ 3521 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3522 3523 if (skb_headroom(skb) < needed_headroom && 3524 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3525 goto out_free_skb; 3526 3527 /* prepend radiotap information */ 3528 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3529 false); 3530 3531 skb_reset_mac_header(skb); 3532 skb->ip_summed = CHECKSUM_UNNECESSARY; 3533 skb->pkt_type = PACKET_OTHERHOST; 3534 skb->protocol = htons(ETH_P_802_2); 3535 3536 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3537 if (!ieee80211_sdata_running(sdata)) 3538 continue; 3539 3540 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3541 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3542 continue; 3543 3544 if (prev_dev) { 3545 skb2 = skb_clone(skb, GFP_ATOMIC); 3546 if (skb2) { 3547 skb2->dev = prev_dev; 3548 netif_receive_skb(skb2); 3549 } 3550 } 3551 3552 prev_dev = sdata->dev; 3553 ieee80211_rx_stats(sdata->dev, skb->len); 3554 } 3555 3556 if (prev_dev) { 3557 skb->dev = prev_dev; 3558 netif_receive_skb(skb); 3559 return; 3560 } 3561 3562 out_free_skb: 3563 dev_kfree_skb(skb); 3564 } 3565 3566 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3567 ieee80211_rx_result res) 3568 { 3569 switch (res) { 3570 case RX_DROP_MONITOR: 3571 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3572 if (rx->sta) 3573 rx->sta->rx_stats.dropped++; 3574 /* fall through */ 3575 case RX_CONTINUE: { 3576 struct ieee80211_rate *rate = NULL; 3577 struct ieee80211_supported_band *sband; 3578 struct ieee80211_rx_status *status; 3579 3580 status = IEEE80211_SKB_RXCB((rx->skb)); 3581 3582 sband = rx->local->hw.wiphy->bands[status->band]; 3583 if (status->encoding == RX_ENC_LEGACY) 3584 rate = &sband->bitrates[status->rate_idx]; 3585 3586 ieee80211_rx_cooked_monitor(rx, rate); 3587 break; 3588 } 3589 case RX_DROP_UNUSABLE: 3590 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3591 if (rx->sta) 3592 rx->sta->rx_stats.dropped++; 3593 dev_kfree_skb(rx->skb); 3594 break; 3595 case RX_QUEUED: 3596 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3597 break; 3598 } 3599 } 3600 3601 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3602 struct sk_buff_head *frames) 3603 { 3604 ieee80211_rx_result res = RX_DROP_MONITOR; 3605 struct sk_buff *skb; 3606 3607 #define CALL_RXH(rxh) \ 3608 do { \ 3609 res = rxh(rx); \ 3610 if (res != RX_CONTINUE) \ 3611 goto rxh_next; \ 3612 } while (0) 3613 3614 /* Lock here to avoid hitting all of the data used in the RX 3615 * path (e.g. key data, station data, ...) concurrently when 3616 * a frame is released from the reorder buffer due to timeout 3617 * from the timer, potentially concurrently with RX from the 3618 * driver. 3619 */ 3620 spin_lock_bh(&rx->local->rx_path_lock); 3621 3622 while ((skb = __skb_dequeue(frames))) { 3623 /* 3624 * all the other fields are valid across frames 3625 * that belong to an aMPDU since they are on the 3626 * same TID from the same station 3627 */ 3628 rx->skb = skb; 3629 3630 CALL_RXH(ieee80211_rx_h_check_more_data); 3631 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3632 CALL_RXH(ieee80211_rx_h_sta_process); 3633 CALL_RXH(ieee80211_rx_h_decrypt); 3634 CALL_RXH(ieee80211_rx_h_defragment); 3635 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3636 /* must be after MMIC verify so header is counted in MPDU mic */ 3637 #ifdef CONFIG_MAC80211_MESH 3638 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3639 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3640 #endif 3641 CALL_RXH(ieee80211_rx_h_amsdu); 3642 CALL_RXH(ieee80211_rx_h_data); 3643 3644 /* special treatment -- needs the queue */ 3645 res = ieee80211_rx_h_ctrl(rx, frames); 3646 if (res != RX_CONTINUE) 3647 goto rxh_next; 3648 3649 CALL_RXH(ieee80211_rx_h_mgmt_check); 3650 CALL_RXH(ieee80211_rx_h_action); 3651 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3652 CALL_RXH(ieee80211_rx_h_action_return); 3653 CALL_RXH(ieee80211_rx_h_mgmt); 3654 3655 rxh_next: 3656 ieee80211_rx_handlers_result(rx, res); 3657 3658 #undef CALL_RXH 3659 } 3660 3661 spin_unlock_bh(&rx->local->rx_path_lock); 3662 } 3663 3664 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3665 { 3666 struct sk_buff_head reorder_release; 3667 ieee80211_rx_result res = RX_DROP_MONITOR; 3668 3669 __skb_queue_head_init(&reorder_release); 3670 3671 #define CALL_RXH(rxh) \ 3672 do { \ 3673 res = rxh(rx); \ 3674 if (res != RX_CONTINUE) \ 3675 goto rxh_next; \ 3676 } while (0) 3677 3678 CALL_RXH(ieee80211_rx_h_check_dup); 3679 CALL_RXH(ieee80211_rx_h_check); 3680 3681 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3682 3683 ieee80211_rx_handlers(rx, &reorder_release); 3684 return; 3685 3686 rxh_next: 3687 ieee80211_rx_handlers_result(rx, res); 3688 3689 #undef CALL_RXH 3690 } 3691 3692 /* 3693 * This function makes calls into the RX path, therefore 3694 * it has to be invoked under RCU read lock. 3695 */ 3696 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3697 { 3698 struct sk_buff_head frames; 3699 struct ieee80211_rx_data rx = { 3700 .sta = sta, 3701 .sdata = sta->sdata, 3702 .local = sta->local, 3703 /* This is OK -- must be QoS data frame */ 3704 .security_idx = tid, 3705 .seqno_idx = tid, 3706 .napi = NULL, /* must be NULL to not have races */ 3707 }; 3708 struct tid_ampdu_rx *tid_agg_rx; 3709 3710 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3711 if (!tid_agg_rx) 3712 return; 3713 3714 __skb_queue_head_init(&frames); 3715 3716 spin_lock(&tid_agg_rx->reorder_lock); 3717 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3718 spin_unlock(&tid_agg_rx->reorder_lock); 3719 3720 if (!skb_queue_empty(&frames)) { 3721 struct ieee80211_event event = { 3722 .type = BA_FRAME_TIMEOUT, 3723 .u.ba.tid = tid, 3724 .u.ba.sta = &sta->sta, 3725 }; 3726 drv_event_callback(rx.local, rx.sdata, &event); 3727 } 3728 3729 ieee80211_rx_handlers(&rx, &frames); 3730 } 3731 3732 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3733 u16 ssn, u64 filtered, 3734 u16 received_mpdus) 3735 { 3736 struct sta_info *sta; 3737 struct tid_ampdu_rx *tid_agg_rx; 3738 struct sk_buff_head frames; 3739 struct ieee80211_rx_data rx = { 3740 /* This is OK -- must be QoS data frame */ 3741 .security_idx = tid, 3742 .seqno_idx = tid, 3743 }; 3744 int i, diff; 3745 3746 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3747 return; 3748 3749 __skb_queue_head_init(&frames); 3750 3751 sta = container_of(pubsta, struct sta_info, sta); 3752 3753 rx.sta = sta; 3754 rx.sdata = sta->sdata; 3755 rx.local = sta->local; 3756 3757 rcu_read_lock(); 3758 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3759 if (!tid_agg_rx) 3760 goto out; 3761 3762 spin_lock_bh(&tid_agg_rx->reorder_lock); 3763 3764 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3765 int release; 3766 3767 /* release all frames in the reorder buffer */ 3768 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3769 IEEE80211_SN_MODULO; 3770 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3771 release, &frames); 3772 /* update ssn to match received ssn */ 3773 tid_agg_rx->head_seq_num = ssn; 3774 } else { 3775 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3776 &frames); 3777 } 3778 3779 /* handle the case that received ssn is behind the mac ssn. 3780 * it can be tid_agg_rx->buf_size behind and still be valid */ 3781 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 3782 if (diff >= tid_agg_rx->buf_size) { 3783 tid_agg_rx->reorder_buf_filtered = 0; 3784 goto release; 3785 } 3786 filtered = filtered >> diff; 3787 ssn += diff; 3788 3789 /* update bitmap */ 3790 for (i = 0; i < tid_agg_rx->buf_size; i++) { 3791 int index = (ssn + i) % tid_agg_rx->buf_size; 3792 3793 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 3794 if (filtered & BIT_ULL(i)) 3795 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 3796 } 3797 3798 /* now process also frames that the filter marking released */ 3799 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3800 3801 release: 3802 spin_unlock_bh(&tid_agg_rx->reorder_lock); 3803 3804 ieee80211_rx_handlers(&rx, &frames); 3805 3806 out: 3807 rcu_read_unlock(); 3808 } 3809 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 3810 3811 /* main receive path */ 3812 3813 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3814 { 3815 struct ieee80211_sub_if_data *sdata = rx->sdata; 3816 struct sk_buff *skb = rx->skb; 3817 struct ieee80211_hdr *hdr = (void *)skb->data; 3818 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3819 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3820 bool multicast = is_multicast_ether_addr(hdr->addr1); 3821 3822 switch (sdata->vif.type) { 3823 case NL80211_IFTYPE_STATION: 3824 if (!bssid && !sdata->u.mgd.use_4addr) 3825 return false; 3826 if (multicast) 3827 return true; 3828 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3829 case NL80211_IFTYPE_ADHOC: 3830 if (!bssid) 3831 return false; 3832 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3833 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3834 return false; 3835 if (ieee80211_is_beacon(hdr->frame_control)) 3836 return true; 3837 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3838 return false; 3839 if (!multicast && 3840 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3841 return false; 3842 if (!rx->sta) { 3843 int rate_idx; 3844 if (status->encoding != RX_ENC_LEGACY) 3845 rate_idx = 0; /* TODO: HT/VHT rates */ 3846 else 3847 rate_idx = status->rate_idx; 3848 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3849 BIT(rate_idx)); 3850 } 3851 return true; 3852 case NL80211_IFTYPE_OCB: 3853 if (!bssid) 3854 return false; 3855 if (!ieee80211_is_data_present(hdr->frame_control)) 3856 return false; 3857 if (!is_broadcast_ether_addr(bssid)) 3858 return false; 3859 if (!multicast && 3860 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3861 return false; 3862 if (!rx->sta) { 3863 int rate_idx; 3864 if (status->encoding != RX_ENC_LEGACY) 3865 rate_idx = 0; /* TODO: HT rates */ 3866 else 3867 rate_idx = status->rate_idx; 3868 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3869 BIT(rate_idx)); 3870 } 3871 return true; 3872 case NL80211_IFTYPE_MESH_POINT: 3873 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 3874 return false; 3875 if (multicast) 3876 return true; 3877 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3878 case NL80211_IFTYPE_AP_VLAN: 3879 case NL80211_IFTYPE_AP: 3880 if (!bssid) 3881 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3882 3883 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3884 /* 3885 * Accept public action frames even when the 3886 * BSSID doesn't match, this is used for P2P 3887 * and location updates. Note that mac80211 3888 * itself never looks at these frames. 3889 */ 3890 if (!multicast && 3891 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3892 return false; 3893 if (ieee80211_is_public_action(hdr, skb->len)) 3894 return true; 3895 return ieee80211_is_beacon(hdr->frame_control); 3896 } 3897 3898 if (!ieee80211_has_tods(hdr->frame_control)) { 3899 /* ignore data frames to TDLS-peers */ 3900 if (ieee80211_is_data(hdr->frame_control)) 3901 return false; 3902 /* ignore action frames to TDLS-peers */ 3903 if (ieee80211_is_action(hdr->frame_control) && 3904 !is_broadcast_ether_addr(bssid) && 3905 !ether_addr_equal(bssid, hdr->addr1)) 3906 return false; 3907 } 3908 3909 /* 3910 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 3911 * the BSSID - we've checked that already but may have accepted 3912 * the wildcard (ff:ff:ff:ff:ff:ff). 3913 * 3914 * It also says: 3915 * The BSSID of the Data frame is determined as follows: 3916 * a) If the STA is contained within an AP or is associated 3917 * with an AP, the BSSID is the address currently in use 3918 * by the STA contained in the AP. 3919 * 3920 * So we should not accept data frames with an address that's 3921 * multicast. 3922 * 3923 * Accepting it also opens a security problem because stations 3924 * could encrypt it with the GTK and inject traffic that way. 3925 */ 3926 if (ieee80211_is_data(hdr->frame_control) && multicast) 3927 return false; 3928 3929 return true; 3930 case NL80211_IFTYPE_WDS: 3931 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3932 return false; 3933 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3934 case NL80211_IFTYPE_P2P_DEVICE: 3935 return ieee80211_is_public_action(hdr, skb->len) || 3936 ieee80211_is_probe_req(hdr->frame_control) || 3937 ieee80211_is_probe_resp(hdr->frame_control) || 3938 ieee80211_is_beacon(hdr->frame_control); 3939 case NL80211_IFTYPE_NAN: 3940 /* Currently no frames on NAN interface are allowed */ 3941 return false; 3942 default: 3943 break; 3944 } 3945 3946 WARN_ON_ONCE(1); 3947 return false; 3948 } 3949 3950 void ieee80211_check_fast_rx(struct sta_info *sta) 3951 { 3952 struct ieee80211_sub_if_data *sdata = sta->sdata; 3953 struct ieee80211_local *local = sdata->local; 3954 struct ieee80211_key *key; 3955 struct ieee80211_fast_rx fastrx = { 3956 .dev = sdata->dev, 3957 .vif_type = sdata->vif.type, 3958 .control_port_protocol = sdata->control_port_protocol, 3959 }, *old, *new = NULL; 3960 bool assign = false; 3961 3962 /* use sparse to check that we don't return without updating */ 3963 __acquire(check_fast_rx); 3964 3965 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 3966 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 3967 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 3968 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 3969 3970 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 3971 3972 /* fast-rx doesn't do reordering */ 3973 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 3974 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 3975 goto clear; 3976 3977 switch (sdata->vif.type) { 3978 case NL80211_IFTYPE_STATION: 3979 if (sta->sta.tdls) { 3980 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3981 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 3982 fastrx.expected_ds_bits = 0; 3983 } else { 3984 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0; 3985 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3986 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 3987 fastrx.expected_ds_bits = 3988 cpu_to_le16(IEEE80211_FCTL_FROMDS); 3989 } 3990 3991 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 3992 fastrx.expected_ds_bits |= 3993 cpu_to_le16(IEEE80211_FCTL_TODS); 3994 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 3995 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 3996 } 3997 3998 if (!sdata->u.mgd.powersave) 3999 break; 4000 4001 /* software powersave is a huge mess, avoid all of it */ 4002 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4003 goto clear; 4004 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4005 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4006 goto clear; 4007 break; 4008 case NL80211_IFTYPE_AP_VLAN: 4009 case NL80211_IFTYPE_AP: 4010 /* parallel-rx requires this, at least with calls to 4011 * ieee80211_sta_ps_transition() 4012 */ 4013 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4014 goto clear; 4015 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4016 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4017 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4018 4019 fastrx.internal_forward = 4020 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4021 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4022 !sdata->u.vlan.sta); 4023 4024 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4025 sdata->u.vlan.sta) { 4026 fastrx.expected_ds_bits |= 4027 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4028 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4029 fastrx.internal_forward = 0; 4030 } 4031 4032 break; 4033 default: 4034 goto clear; 4035 } 4036 4037 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4038 goto clear; 4039 4040 rcu_read_lock(); 4041 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4042 if (key) { 4043 switch (key->conf.cipher) { 4044 case WLAN_CIPHER_SUITE_TKIP: 4045 /* we don't want to deal with MMIC in fast-rx */ 4046 goto clear_rcu; 4047 case WLAN_CIPHER_SUITE_CCMP: 4048 case WLAN_CIPHER_SUITE_CCMP_256: 4049 case WLAN_CIPHER_SUITE_GCMP: 4050 case WLAN_CIPHER_SUITE_GCMP_256: 4051 break; 4052 default: 4053 /* we also don't want to deal with WEP or cipher scheme 4054 * since those require looking up the key idx in the 4055 * frame, rather than assuming the PTK is used 4056 * (we need to revisit this once we implement the real 4057 * PTK index, which is now valid in the spec, but we 4058 * haven't implemented that part yet) 4059 */ 4060 goto clear_rcu; 4061 } 4062 4063 fastrx.key = true; 4064 fastrx.icv_len = key->conf.icv_len; 4065 } 4066 4067 assign = true; 4068 clear_rcu: 4069 rcu_read_unlock(); 4070 clear: 4071 __release(check_fast_rx); 4072 4073 if (assign) 4074 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4075 4076 spin_lock_bh(&sta->lock); 4077 old = rcu_dereference_protected(sta->fast_rx, true); 4078 rcu_assign_pointer(sta->fast_rx, new); 4079 spin_unlock_bh(&sta->lock); 4080 4081 if (old) 4082 kfree_rcu(old, rcu_head); 4083 } 4084 4085 void ieee80211_clear_fast_rx(struct sta_info *sta) 4086 { 4087 struct ieee80211_fast_rx *old; 4088 4089 spin_lock_bh(&sta->lock); 4090 old = rcu_dereference_protected(sta->fast_rx, true); 4091 RCU_INIT_POINTER(sta->fast_rx, NULL); 4092 spin_unlock_bh(&sta->lock); 4093 4094 if (old) 4095 kfree_rcu(old, rcu_head); 4096 } 4097 4098 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4099 { 4100 struct ieee80211_local *local = sdata->local; 4101 struct sta_info *sta; 4102 4103 lockdep_assert_held(&local->sta_mtx); 4104 4105 list_for_each_entry_rcu(sta, &local->sta_list, list) { 4106 if (sdata != sta->sdata && 4107 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4108 continue; 4109 ieee80211_check_fast_rx(sta); 4110 } 4111 } 4112 4113 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4114 { 4115 struct ieee80211_local *local = sdata->local; 4116 4117 mutex_lock(&local->sta_mtx); 4118 __ieee80211_check_fast_rx_iface(sdata); 4119 mutex_unlock(&local->sta_mtx); 4120 } 4121 4122 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4123 struct ieee80211_fast_rx *fast_rx) 4124 { 4125 struct sk_buff *skb = rx->skb; 4126 struct ieee80211_hdr *hdr = (void *)skb->data; 4127 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4128 struct sta_info *sta = rx->sta; 4129 int orig_len = skb->len; 4130 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4131 int snap_offs = hdrlen; 4132 struct { 4133 u8 snap[sizeof(rfc1042_header)]; 4134 __be16 proto; 4135 } *payload __aligned(2); 4136 struct { 4137 u8 da[ETH_ALEN]; 4138 u8 sa[ETH_ALEN]; 4139 } addrs __aligned(2); 4140 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4141 4142 if (fast_rx->uses_rss) 4143 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4144 4145 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4146 * to a common data structure; drivers can implement that per queue 4147 * but we don't have that information in mac80211 4148 */ 4149 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4150 return false; 4151 4152 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4153 4154 /* If using encryption, we also need to have: 4155 * - PN_VALIDATED: similar, but the implementation is tricky 4156 * - DECRYPTED: necessary for PN_VALIDATED 4157 */ 4158 if (fast_rx->key && 4159 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4160 return false; 4161 4162 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4163 return false; 4164 4165 if (unlikely(ieee80211_is_frag(hdr))) 4166 return false; 4167 4168 /* Since our interface address cannot be multicast, this 4169 * implicitly also rejects multicast frames without the 4170 * explicit check. 4171 * 4172 * We shouldn't get any *data* frames not addressed to us 4173 * (AP mode will accept multicast *management* frames), but 4174 * punting here will make it go through the full checks in 4175 * ieee80211_accept_frame(). 4176 */ 4177 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4178 return false; 4179 4180 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4181 IEEE80211_FCTL_TODS)) != 4182 fast_rx->expected_ds_bits) 4183 return false; 4184 4185 /* assign the key to drop unencrypted frames (later) 4186 * and strip the IV/MIC if necessary 4187 */ 4188 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4189 /* GCMP header length is the same */ 4190 snap_offs += IEEE80211_CCMP_HDR_LEN; 4191 } 4192 4193 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4194 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4195 goto drop; 4196 4197 payload = (void *)(skb->data + snap_offs); 4198 4199 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4200 return false; 4201 4202 /* Don't handle these here since they require special code. 4203 * Accept AARP and IPX even though they should come with a 4204 * bridge-tunnel header - but if we get them this way then 4205 * there's little point in discarding them. 4206 */ 4207 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4208 payload->proto == fast_rx->control_port_protocol)) 4209 return false; 4210 } 4211 4212 /* after this point, don't punt to the slowpath! */ 4213 4214 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4215 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4216 goto drop; 4217 4218 if (unlikely(fast_rx->sta_notify)) { 4219 ieee80211_sta_rx_notify(rx->sdata, hdr); 4220 fast_rx->sta_notify = false; 4221 } 4222 4223 /* statistics part of ieee80211_rx_h_sta_process() */ 4224 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4225 stats->last_signal = status->signal; 4226 if (!fast_rx->uses_rss) 4227 ewma_signal_add(&sta->rx_stats_avg.signal, 4228 -status->signal); 4229 } 4230 4231 if (status->chains) { 4232 int i; 4233 4234 stats->chains = status->chains; 4235 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4236 int signal = status->chain_signal[i]; 4237 4238 if (!(status->chains & BIT(i))) 4239 continue; 4240 4241 stats->chain_signal_last[i] = signal; 4242 if (!fast_rx->uses_rss) 4243 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4244 -signal); 4245 } 4246 } 4247 /* end of statistics */ 4248 4249 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4250 goto drop; 4251 4252 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4253 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4254 RX_QUEUED) 4255 goto drop; 4256 4257 return true; 4258 } 4259 4260 stats->last_rx = jiffies; 4261 stats->last_rate = sta_stats_encode_rate(status); 4262 4263 stats->fragments++; 4264 stats->packets++; 4265 4266 /* do the header conversion - first grab the addresses */ 4267 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4268 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4269 /* remove the SNAP but leave the ethertype */ 4270 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4271 /* push the addresses in front */ 4272 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4273 4274 skb->dev = fast_rx->dev; 4275 4276 ieee80211_rx_stats(fast_rx->dev, skb->len); 4277 4278 /* The seqno index has the same property as needed 4279 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4280 * for non-QoS-data frames. Here we know it's a data 4281 * frame, so count MSDUs. 4282 */ 4283 u64_stats_update_begin(&stats->syncp); 4284 stats->msdu[rx->seqno_idx]++; 4285 stats->bytes += orig_len; 4286 u64_stats_update_end(&stats->syncp); 4287 4288 if (fast_rx->internal_forward) { 4289 struct sk_buff *xmit_skb = NULL; 4290 if (is_multicast_ether_addr(addrs.da)) { 4291 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4292 } else if (!ether_addr_equal(addrs.da, addrs.sa) && 4293 sta_info_get(rx->sdata, addrs.da)) { 4294 xmit_skb = skb; 4295 skb = NULL; 4296 } 4297 4298 if (xmit_skb) { 4299 /* 4300 * Send to wireless media and increase priority by 256 4301 * to keep the received priority instead of 4302 * reclassifying the frame (see cfg80211_classify8021d). 4303 */ 4304 xmit_skb->priority += 256; 4305 xmit_skb->protocol = htons(ETH_P_802_3); 4306 skb_reset_network_header(xmit_skb); 4307 skb_reset_mac_header(xmit_skb); 4308 dev_queue_xmit(xmit_skb); 4309 } 4310 4311 if (!skb) 4312 return true; 4313 } 4314 4315 /* deliver to local stack */ 4316 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4317 memset(skb->cb, 0, sizeof(skb->cb)); 4318 if (rx->napi) 4319 napi_gro_receive(rx->napi, skb); 4320 else 4321 netif_receive_skb(skb); 4322 4323 return true; 4324 drop: 4325 dev_kfree_skb(skb); 4326 stats->dropped++; 4327 return true; 4328 } 4329 4330 /* 4331 * This function returns whether or not the SKB 4332 * was destined for RX processing or not, which, 4333 * if consume is true, is equivalent to whether 4334 * or not the skb was consumed. 4335 */ 4336 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4337 struct sk_buff *skb, bool consume) 4338 { 4339 struct ieee80211_local *local = rx->local; 4340 struct ieee80211_sub_if_data *sdata = rx->sdata; 4341 4342 rx->skb = skb; 4343 4344 /* See if we can do fast-rx; if we have to copy we already lost, 4345 * so punt in that case. We should never have to deliver a data 4346 * frame to multiple interfaces anyway. 4347 * 4348 * We skip the ieee80211_accept_frame() call and do the necessary 4349 * checking inside ieee80211_invoke_fast_rx(). 4350 */ 4351 if (consume && rx->sta) { 4352 struct ieee80211_fast_rx *fast_rx; 4353 4354 fast_rx = rcu_dereference(rx->sta->fast_rx); 4355 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4356 return true; 4357 } 4358 4359 if (!ieee80211_accept_frame(rx)) 4360 return false; 4361 4362 if (!consume) { 4363 skb = skb_copy(skb, GFP_ATOMIC); 4364 if (!skb) { 4365 if (net_ratelimit()) 4366 wiphy_debug(local->hw.wiphy, 4367 "failed to copy skb for %s\n", 4368 sdata->name); 4369 return true; 4370 } 4371 4372 rx->skb = skb; 4373 } 4374 4375 ieee80211_invoke_rx_handlers(rx); 4376 return true; 4377 } 4378 4379 /* 4380 * This is the actual Rx frames handler. as it belongs to Rx path it must 4381 * be called with rcu_read_lock protection. 4382 */ 4383 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4384 struct ieee80211_sta *pubsta, 4385 struct sk_buff *skb, 4386 struct napi_struct *napi) 4387 { 4388 struct ieee80211_local *local = hw_to_local(hw); 4389 struct ieee80211_sub_if_data *sdata; 4390 struct ieee80211_hdr *hdr; 4391 __le16 fc; 4392 struct ieee80211_rx_data rx; 4393 struct ieee80211_sub_if_data *prev; 4394 struct rhlist_head *tmp; 4395 int err = 0; 4396 4397 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4398 memset(&rx, 0, sizeof(rx)); 4399 rx.skb = skb; 4400 rx.local = local; 4401 rx.napi = napi; 4402 4403 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4404 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4405 4406 if (ieee80211_is_mgmt(fc)) { 4407 /* drop frame if too short for header */ 4408 if (skb->len < ieee80211_hdrlen(fc)) 4409 err = -ENOBUFS; 4410 else 4411 err = skb_linearize(skb); 4412 } else { 4413 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4414 } 4415 4416 if (err) { 4417 dev_kfree_skb(skb); 4418 return; 4419 } 4420 4421 hdr = (struct ieee80211_hdr *)skb->data; 4422 ieee80211_parse_qos(&rx); 4423 ieee80211_verify_alignment(&rx); 4424 4425 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4426 ieee80211_is_beacon(hdr->frame_control))) 4427 ieee80211_scan_rx(local, skb); 4428 4429 if (ieee80211_is_data(fc)) { 4430 struct sta_info *sta, *prev_sta; 4431 4432 if (pubsta) { 4433 rx.sta = container_of(pubsta, struct sta_info, sta); 4434 rx.sdata = rx.sta->sdata; 4435 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4436 return; 4437 goto out; 4438 } 4439 4440 prev_sta = NULL; 4441 4442 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4443 if (!prev_sta) { 4444 prev_sta = sta; 4445 continue; 4446 } 4447 4448 rx.sta = prev_sta; 4449 rx.sdata = prev_sta->sdata; 4450 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4451 4452 prev_sta = sta; 4453 } 4454 4455 if (prev_sta) { 4456 rx.sta = prev_sta; 4457 rx.sdata = prev_sta->sdata; 4458 4459 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4460 return; 4461 goto out; 4462 } 4463 } 4464 4465 prev = NULL; 4466 4467 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4468 if (!ieee80211_sdata_running(sdata)) 4469 continue; 4470 4471 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4472 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4473 continue; 4474 4475 /* 4476 * frame is destined for this interface, but if it's 4477 * not also for the previous one we handle that after 4478 * the loop to avoid copying the SKB once too much 4479 */ 4480 4481 if (!prev) { 4482 prev = sdata; 4483 continue; 4484 } 4485 4486 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4487 rx.sdata = prev; 4488 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4489 4490 prev = sdata; 4491 } 4492 4493 if (prev) { 4494 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4495 rx.sdata = prev; 4496 4497 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4498 return; 4499 } 4500 4501 out: 4502 dev_kfree_skb(skb); 4503 } 4504 4505 /* 4506 * This is the receive path handler. It is called by a low level driver when an 4507 * 802.11 MPDU is received from the hardware. 4508 */ 4509 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4510 struct sk_buff *skb, struct napi_struct *napi) 4511 { 4512 struct ieee80211_local *local = hw_to_local(hw); 4513 struct ieee80211_rate *rate = NULL; 4514 struct ieee80211_supported_band *sband; 4515 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4516 4517 WARN_ON_ONCE(softirq_count() == 0); 4518 4519 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4520 goto drop; 4521 4522 sband = local->hw.wiphy->bands[status->band]; 4523 if (WARN_ON(!sband)) 4524 goto drop; 4525 4526 /* 4527 * If we're suspending, it is possible although not too likely 4528 * that we'd be receiving frames after having already partially 4529 * quiesced the stack. We can't process such frames then since 4530 * that might, for example, cause stations to be added or other 4531 * driver callbacks be invoked. 4532 */ 4533 if (unlikely(local->quiescing || local->suspended)) 4534 goto drop; 4535 4536 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4537 if (unlikely(local->in_reconfig)) 4538 goto drop; 4539 4540 /* 4541 * The same happens when we're not even started, 4542 * but that's worth a warning. 4543 */ 4544 if (WARN_ON(!local->started)) 4545 goto drop; 4546 4547 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4548 /* 4549 * Validate the rate, unless a PLCP error means that 4550 * we probably can't have a valid rate here anyway. 4551 */ 4552 4553 switch (status->encoding) { 4554 case RX_ENC_HT: 4555 /* 4556 * rate_idx is MCS index, which can be [0-76] 4557 * as documented on: 4558 * 4559 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 4560 * 4561 * Anything else would be some sort of driver or 4562 * hardware error. The driver should catch hardware 4563 * errors. 4564 */ 4565 if (WARN(status->rate_idx > 76, 4566 "Rate marked as an HT rate but passed " 4567 "status->rate_idx is not " 4568 "an MCS index [0-76]: %d (0x%02x)\n", 4569 status->rate_idx, 4570 status->rate_idx)) 4571 goto drop; 4572 break; 4573 case RX_ENC_VHT: 4574 if (WARN_ONCE(status->rate_idx > 9 || 4575 !status->nss || 4576 status->nss > 8, 4577 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4578 status->rate_idx, status->nss)) 4579 goto drop; 4580 break; 4581 case RX_ENC_HE: 4582 if (WARN_ONCE(status->rate_idx > 11 || 4583 !status->nss || 4584 status->nss > 8, 4585 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4586 status->rate_idx, status->nss)) 4587 goto drop; 4588 break; 4589 default: 4590 WARN_ON_ONCE(1); 4591 /* fall through */ 4592 case RX_ENC_LEGACY: 4593 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4594 goto drop; 4595 rate = &sband->bitrates[status->rate_idx]; 4596 } 4597 } 4598 4599 status->rx_flags = 0; 4600 4601 /* 4602 * key references and virtual interfaces are protected using RCU 4603 * and this requires that we are in a read-side RCU section during 4604 * receive processing 4605 */ 4606 rcu_read_lock(); 4607 4608 /* 4609 * Frames with failed FCS/PLCP checksum are not returned, 4610 * all other frames are returned without radiotap header 4611 * if it was previously present. 4612 * Also, frames with less than 16 bytes are dropped. 4613 */ 4614 skb = ieee80211_rx_monitor(local, skb, rate); 4615 if (!skb) { 4616 rcu_read_unlock(); 4617 return; 4618 } 4619 4620 ieee80211_tpt_led_trig_rx(local, 4621 ((struct ieee80211_hdr *)skb->data)->frame_control, 4622 skb->len); 4623 4624 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); 4625 4626 rcu_read_unlock(); 4627 4628 return; 4629 drop: 4630 kfree_skb(skb); 4631 } 4632 EXPORT_SYMBOL(ieee80211_rx_napi); 4633 4634 /* This is a version of the rx handler that can be called from hard irq 4635 * context. Post the skb on the queue and schedule the tasklet */ 4636 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4637 { 4638 struct ieee80211_local *local = hw_to_local(hw); 4639 4640 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4641 4642 skb->pkt_type = IEEE80211_RX_MSG; 4643 skb_queue_tail(&local->skb_queue, skb); 4644 tasklet_schedule(&local->tasklet); 4645 } 4646 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4647