1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 8 * Copyright (C) 2018-2019 Intel Corporation 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/jiffies.h> 16 #include <linux/slab.h> 17 #include <linux/kernel.h> 18 #include <linux/skbuff.h> 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/rcupdate.h> 22 #include <linux/export.h> 23 #include <linux/bitops.h> 24 #include <net/mac80211.h> 25 #include <net/ieee80211_radiotap.h> 26 #include <asm/unaligned.h> 27 28 #include "ieee80211_i.h" 29 #include "driver-ops.h" 30 #include "led.h" 31 #include "mesh.h" 32 #include "wep.h" 33 #include "wpa.h" 34 #include "tkip.h" 35 #include "wme.h" 36 #include "rate.h" 37 38 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 39 { 40 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 41 42 u64_stats_update_begin(&tstats->syncp); 43 tstats->rx_packets++; 44 tstats->rx_bytes += len; 45 u64_stats_update_end(&tstats->syncp); 46 } 47 48 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 49 enum nl80211_iftype type) 50 { 51 __le16 fc = hdr->frame_control; 52 53 if (ieee80211_is_data(fc)) { 54 if (len < 24) /* drop incorrect hdr len (data) */ 55 return NULL; 56 57 if (ieee80211_has_a4(fc)) 58 return NULL; 59 if (ieee80211_has_tods(fc)) 60 return hdr->addr1; 61 if (ieee80211_has_fromds(fc)) 62 return hdr->addr2; 63 64 return hdr->addr3; 65 } 66 67 if (ieee80211_is_mgmt(fc)) { 68 if (len < 24) /* drop incorrect hdr len (mgmt) */ 69 return NULL; 70 return hdr->addr3; 71 } 72 73 if (ieee80211_is_ctl(fc)) { 74 if (ieee80211_is_pspoll(fc)) 75 return hdr->addr1; 76 77 if (ieee80211_is_back_req(fc)) { 78 switch (type) { 79 case NL80211_IFTYPE_STATION: 80 return hdr->addr2; 81 case NL80211_IFTYPE_AP: 82 case NL80211_IFTYPE_AP_VLAN: 83 return hdr->addr1; 84 default: 85 break; /* fall through to the return */ 86 } 87 } 88 } 89 90 return NULL; 91 } 92 93 /* 94 * monitor mode reception 95 * 96 * This function cleans up the SKB, i.e. it removes all the stuff 97 * only useful for monitoring. 98 */ 99 static void remove_monitor_info(struct sk_buff *skb, 100 unsigned int present_fcs_len, 101 unsigned int rtap_space) 102 { 103 if (present_fcs_len) 104 __pskb_trim(skb, skb->len - present_fcs_len); 105 __pskb_pull(skb, rtap_space); 106 } 107 108 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 109 unsigned int rtap_space) 110 { 111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 112 struct ieee80211_hdr *hdr; 113 114 hdr = (void *)(skb->data + rtap_space); 115 116 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 117 RX_FLAG_FAILED_PLCP_CRC | 118 RX_FLAG_ONLY_MONITOR | 119 RX_FLAG_NO_PSDU)) 120 return true; 121 122 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 123 return true; 124 125 if (ieee80211_is_ctl(hdr->frame_control) && 126 !ieee80211_is_pspoll(hdr->frame_control) && 127 !ieee80211_is_back_req(hdr->frame_control)) 128 return true; 129 130 return false; 131 } 132 133 static int 134 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 135 struct ieee80211_rx_status *status, 136 struct sk_buff *skb) 137 { 138 int len; 139 140 /* always present fields */ 141 len = sizeof(struct ieee80211_radiotap_header) + 8; 142 143 /* allocate extra bitmaps */ 144 if (status->chains) 145 len += 4 * hweight8(status->chains); 146 /* vendor presence bitmap */ 147 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) 148 len += 4; 149 150 if (ieee80211_have_rx_timestamp(status)) { 151 len = ALIGN(len, 8); 152 len += 8; 153 } 154 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 155 len += 1; 156 157 /* antenna field, if we don't have per-chain info */ 158 if (!status->chains) 159 len += 1; 160 161 /* padding for RX_FLAGS if necessary */ 162 len = ALIGN(len, 2); 163 164 if (status->encoding == RX_ENC_HT) /* HT info */ 165 len += 3; 166 167 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 168 len = ALIGN(len, 4); 169 len += 8; 170 } 171 172 if (status->encoding == RX_ENC_VHT) { 173 len = ALIGN(len, 2); 174 len += 12; 175 } 176 177 if (local->hw.radiotap_timestamp.units_pos >= 0) { 178 len = ALIGN(len, 8); 179 len += 12; 180 } 181 182 if (status->encoding == RX_ENC_HE && 183 status->flag & RX_FLAG_RADIOTAP_HE) { 184 len = ALIGN(len, 2); 185 len += 12; 186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 187 } 188 189 if (status->encoding == RX_ENC_HE && 190 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 191 len = ALIGN(len, 2); 192 len += 12; 193 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 194 } 195 196 if (status->flag & RX_FLAG_NO_PSDU) 197 len += 1; 198 199 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 200 len = ALIGN(len, 2); 201 len += 4; 202 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 203 } 204 205 if (status->chains) { 206 /* antenna and antenna signal fields */ 207 len += 2 * hweight8(status->chains); 208 } 209 210 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 211 struct ieee80211_vendor_radiotap *rtap; 212 int vendor_data_offset = 0; 213 214 /* 215 * The position to look at depends on the existence (or non- 216 * existence) of other elements, so take that into account... 217 */ 218 if (status->flag & RX_FLAG_RADIOTAP_HE) 219 vendor_data_offset += 220 sizeof(struct ieee80211_radiotap_he); 221 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 222 vendor_data_offset += 223 sizeof(struct ieee80211_radiotap_he_mu); 224 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 225 vendor_data_offset += 226 sizeof(struct ieee80211_radiotap_lsig); 227 228 rtap = (void *)&skb->data[vendor_data_offset]; 229 230 /* alignment for fixed 6-byte vendor data header */ 231 len = ALIGN(len, 2); 232 /* vendor data header */ 233 len += 6; 234 if (WARN_ON(rtap->align == 0)) 235 rtap->align = 1; 236 len = ALIGN(len, rtap->align); 237 len += rtap->len + rtap->pad; 238 } 239 240 return len; 241 } 242 243 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 244 struct sk_buff *skb, 245 int rtap_space) 246 { 247 struct { 248 struct ieee80211_hdr_3addr hdr; 249 u8 category; 250 u8 action_code; 251 } __packed __aligned(2) action; 252 253 if (!sdata) 254 return; 255 256 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 257 258 if (skb->len < rtap_space + sizeof(action) + 259 VHT_MUMIMO_GROUPS_DATA_LEN) 260 return; 261 262 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 263 return; 264 265 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 266 267 if (!ieee80211_is_action(action.hdr.frame_control)) 268 return; 269 270 if (action.category != WLAN_CATEGORY_VHT) 271 return; 272 273 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 274 return; 275 276 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 277 return; 278 279 skb = skb_copy(skb, GFP_ATOMIC); 280 if (!skb) 281 return; 282 283 skb_queue_tail(&sdata->skb_queue, skb); 284 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 285 } 286 287 /* 288 * ieee80211_add_rx_radiotap_header - add radiotap header 289 * 290 * add a radiotap header containing all the fields which the hardware provided. 291 */ 292 static void 293 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 294 struct sk_buff *skb, 295 struct ieee80211_rate *rate, 296 int rtap_len, bool has_fcs) 297 { 298 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 299 struct ieee80211_radiotap_header *rthdr; 300 unsigned char *pos; 301 __le32 *it_present; 302 u32 it_present_val; 303 u16 rx_flags = 0; 304 u16 channel_flags = 0; 305 int mpdulen, chain; 306 unsigned long chains = status->chains; 307 struct ieee80211_vendor_radiotap rtap = {}; 308 struct ieee80211_radiotap_he he = {}; 309 struct ieee80211_radiotap_he_mu he_mu = {}; 310 struct ieee80211_radiotap_lsig lsig = {}; 311 312 if (status->flag & RX_FLAG_RADIOTAP_HE) { 313 he = *(struct ieee80211_radiotap_he *)skb->data; 314 skb_pull(skb, sizeof(he)); 315 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 316 } 317 318 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 319 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 320 skb_pull(skb, sizeof(he_mu)); 321 } 322 323 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 324 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 325 skb_pull(skb, sizeof(lsig)); 326 } 327 328 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 329 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 330 /* rtap.len and rtap.pad are undone immediately */ 331 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 332 } 333 334 mpdulen = skb->len; 335 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 336 mpdulen += FCS_LEN; 337 338 rthdr = skb_push(skb, rtap_len); 339 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 340 it_present = &rthdr->it_present; 341 342 /* radiotap header, set always present flags */ 343 rthdr->it_len = cpu_to_le16(rtap_len); 344 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 345 BIT(IEEE80211_RADIOTAP_CHANNEL) | 346 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 347 348 if (!status->chains) 349 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 350 351 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 352 it_present_val |= 353 BIT(IEEE80211_RADIOTAP_EXT) | 354 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 355 put_unaligned_le32(it_present_val, it_present); 356 it_present++; 357 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 358 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 359 } 360 361 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 362 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 363 BIT(IEEE80211_RADIOTAP_EXT); 364 put_unaligned_le32(it_present_val, it_present); 365 it_present++; 366 it_present_val = rtap.present; 367 } 368 369 put_unaligned_le32(it_present_val, it_present); 370 371 pos = (void *)(it_present + 1); 372 373 /* the order of the following fields is important */ 374 375 /* IEEE80211_RADIOTAP_TSFT */ 376 if (ieee80211_have_rx_timestamp(status)) { 377 /* padding */ 378 while ((pos - (u8 *)rthdr) & 7) 379 *pos++ = 0; 380 put_unaligned_le64( 381 ieee80211_calculate_rx_timestamp(local, status, 382 mpdulen, 0), 383 pos); 384 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 385 pos += 8; 386 } 387 388 /* IEEE80211_RADIOTAP_FLAGS */ 389 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 390 *pos |= IEEE80211_RADIOTAP_F_FCS; 391 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 392 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 393 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 394 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 395 pos++; 396 397 /* IEEE80211_RADIOTAP_RATE */ 398 if (!rate || status->encoding != RX_ENC_LEGACY) { 399 /* 400 * Without rate information don't add it. If we have, 401 * MCS information is a separate field in radiotap, 402 * added below. The byte here is needed as padding 403 * for the channel though, so initialise it to 0. 404 */ 405 *pos = 0; 406 } else { 407 int shift = 0; 408 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 409 if (status->bw == RATE_INFO_BW_10) 410 shift = 1; 411 else if (status->bw == RATE_INFO_BW_5) 412 shift = 2; 413 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 414 } 415 pos++; 416 417 /* IEEE80211_RADIOTAP_CHANNEL */ 418 put_unaligned_le16(status->freq, pos); 419 pos += 2; 420 if (status->bw == RATE_INFO_BW_10) 421 channel_flags |= IEEE80211_CHAN_HALF; 422 else if (status->bw == RATE_INFO_BW_5) 423 channel_flags |= IEEE80211_CHAN_QUARTER; 424 425 if (status->band == NL80211_BAND_5GHZ) 426 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 427 else if (status->encoding != RX_ENC_LEGACY) 428 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 429 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 430 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 431 else if (rate) 432 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 433 else 434 channel_flags |= IEEE80211_CHAN_2GHZ; 435 put_unaligned_le16(channel_flags, pos); 436 pos += 2; 437 438 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 439 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 440 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 441 *pos = status->signal; 442 rthdr->it_present |= 443 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 444 pos++; 445 } 446 447 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 448 449 if (!status->chains) { 450 /* IEEE80211_RADIOTAP_ANTENNA */ 451 *pos = status->antenna; 452 pos++; 453 } 454 455 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 456 457 /* IEEE80211_RADIOTAP_RX_FLAGS */ 458 /* ensure 2 byte alignment for the 2 byte field as required */ 459 if ((pos - (u8 *)rthdr) & 1) 460 *pos++ = 0; 461 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 462 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 463 put_unaligned_le16(rx_flags, pos); 464 pos += 2; 465 466 if (status->encoding == RX_ENC_HT) { 467 unsigned int stbc; 468 469 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 470 *pos++ = local->hw.radiotap_mcs_details; 471 *pos = 0; 472 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 473 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 474 if (status->bw == RATE_INFO_BW_40) 475 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 476 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 477 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 478 if (status->enc_flags & RX_ENC_FLAG_LDPC) 479 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 480 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 481 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 482 pos++; 483 *pos++ = status->rate_idx; 484 } 485 486 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 487 u16 flags = 0; 488 489 /* ensure 4 byte alignment */ 490 while ((pos - (u8 *)rthdr) & 3) 491 pos++; 492 rthdr->it_present |= 493 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 494 put_unaligned_le32(status->ampdu_reference, pos); 495 pos += 4; 496 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 497 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 498 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 499 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 500 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 501 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 502 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 503 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 504 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 505 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 506 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 507 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 508 put_unaligned_le16(flags, pos); 509 pos += 2; 510 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 511 *pos++ = status->ampdu_delimiter_crc; 512 else 513 *pos++ = 0; 514 *pos++ = 0; 515 } 516 517 if (status->encoding == RX_ENC_VHT) { 518 u16 known = local->hw.radiotap_vht_details; 519 520 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 521 put_unaligned_le16(known, pos); 522 pos += 2; 523 /* flags */ 524 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 525 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 526 /* in VHT, STBC is binary */ 527 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 528 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 529 if (status->enc_flags & RX_ENC_FLAG_BF) 530 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 531 pos++; 532 /* bandwidth */ 533 switch (status->bw) { 534 case RATE_INFO_BW_80: 535 *pos++ = 4; 536 break; 537 case RATE_INFO_BW_160: 538 *pos++ = 11; 539 break; 540 case RATE_INFO_BW_40: 541 *pos++ = 1; 542 break; 543 default: 544 *pos++ = 0; 545 } 546 /* MCS/NSS */ 547 *pos = (status->rate_idx << 4) | status->nss; 548 pos += 4; 549 /* coding field */ 550 if (status->enc_flags & RX_ENC_FLAG_LDPC) 551 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 552 pos++; 553 /* group ID */ 554 pos++; 555 /* partial_aid */ 556 pos += 2; 557 } 558 559 if (local->hw.radiotap_timestamp.units_pos >= 0) { 560 u16 accuracy = 0; 561 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 562 563 rthdr->it_present |= 564 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 565 566 /* ensure 8 byte alignment */ 567 while ((pos - (u8 *)rthdr) & 7) 568 pos++; 569 570 put_unaligned_le64(status->device_timestamp, pos); 571 pos += sizeof(u64); 572 573 if (local->hw.radiotap_timestamp.accuracy >= 0) { 574 accuracy = local->hw.radiotap_timestamp.accuracy; 575 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 576 } 577 put_unaligned_le16(accuracy, pos); 578 pos += sizeof(u16); 579 580 *pos++ = local->hw.radiotap_timestamp.units_pos; 581 *pos++ = flags; 582 } 583 584 if (status->encoding == RX_ENC_HE && 585 status->flag & RX_FLAG_RADIOTAP_HE) { 586 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 587 588 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 589 he.data6 |= HE_PREP(DATA6_NSTS, 590 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 591 status->enc_flags)); 592 he.data3 |= HE_PREP(DATA3_STBC, 1); 593 } else { 594 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 595 } 596 597 #define CHECK_GI(s) \ 598 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 599 (int)NL80211_RATE_INFO_HE_GI_##s) 600 601 CHECK_GI(0_8); 602 CHECK_GI(1_6); 603 CHECK_GI(3_2); 604 605 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 606 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 607 he.data3 |= HE_PREP(DATA3_CODING, 608 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 609 610 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 611 612 switch (status->bw) { 613 case RATE_INFO_BW_20: 614 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 615 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 616 break; 617 case RATE_INFO_BW_40: 618 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 619 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 620 break; 621 case RATE_INFO_BW_80: 622 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 623 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 624 break; 625 case RATE_INFO_BW_160: 626 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 627 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 628 break; 629 case RATE_INFO_BW_HE_RU: 630 #define CHECK_RU_ALLOC(s) \ 631 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 632 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 633 634 CHECK_RU_ALLOC(26); 635 CHECK_RU_ALLOC(52); 636 CHECK_RU_ALLOC(106); 637 CHECK_RU_ALLOC(242); 638 CHECK_RU_ALLOC(484); 639 CHECK_RU_ALLOC(996); 640 CHECK_RU_ALLOC(2x996); 641 642 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 643 status->he_ru + 4); 644 break; 645 default: 646 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 647 } 648 649 /* ensure 2 byte alignment */ 650 while ((pos - (u8 *)rthdr) & 1) 651 pos++; 652 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 653 memcpy(pos, &he, sizeof(he)); 654 pos += sizeof(he); 655 } 656 657 if (status->encoding == RX_ENC_HE && 658 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 659 /* ensure 2 byte alignment */ 660 while ((pos - (u8 *)rthdr) & 1) 661 pos++; 662 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 663 memcpy(pos, &he_mu, sizeof(he_mu)); 664 pos += sizeof(he_mu); 665 } 666 667 if (status->flag & RX_FLAG_NO_PSDU) { 668 rthdr->it_present |= 669 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 670 *pos++ = status->zero_length_psdu_type; 671 } 672 673 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 674 /* ensure 2 byte alignment */ 675 while ((pos - (u8 *)rthdr) & 1) 676 pos++; 677 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 678 memcpy(pos, &lsig, sizeof(lsig)); 679 pos += sizeof(lsig); 680 } 681 682 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 683 *pos++ = status->chain_signal[chain]; 684 *pos++ = chain; 685 } 686 687 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 688 /* ensure 2 byte alignment for the vendor field as required */ 689 if ((pos - (u8 *)rthdr) & 1) 690 *pos++ = 0; 691 *pos++ = rtap.oui[0]; 692 *pos++ = rtap.oui[1]; 693 *pos++ = rtap.oui[2]; 694 *pos++ = rtap.subns; 695 put_unaligned_le16(rtap.len, pos); 696 pos += 2; 697 /* align the actual payload as requested */ 698 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 699 *pos++ = 0; 700 /* data (and possible padding) already follows */ 701 } 702 } 703 704 static struct sk_buff * 705 ieee80211_make_monitor_skb(struct ieee80211_local *local, 706 struct sk_buff **origskb, 707 struct ieee80211_rate *rate, 708 int rtap_space, bool use_origskb) 709 { 710 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 711 int rt_hdrlen, needed_headroom; 712 struct sk_buff *skb; 713 714 /* room for the radiotap header based on driver features */ 715 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 716 needed_headroom = rt_hdrlen - rtap_space; 717 718 if (use_origskb) { 719 /* only need to expand headroom if necessary */ 720 skb = *origskb; 721 *origskb = NULL; 722 723 /* 724 * This shouldn't trigger often because most devices have an 725 * RX header they pull before we get here, and that should 726 * be big enough for our radiotap information. We should 727 * probably export the length to drivers so that we can have 728 * them allocate enough headroom to start with. 729 */ 730 if (skb_headroom(skb) < needed_headroom && 731 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 732 dev_kfree_skb(skb); 733 return NULL; 734 } 735 } else { 736 /* 737 * Need to make a copy and possibly remove radiotap header 738 * and FCS from the original. 739 */ 740 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 741 742 if (!skb) 743 return NULL; 744 } 745 746 /* prepend radiotap information */ 747 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 748 749 skb_reset_mac_header(skb); 750 skb->ip_summed = CHECKSUM_UNNECESSARY; 751 skb->pkt_type = PACKET_OTHERHOST; 752 skb->protocol = htons(ETH_P_802_2); 753 754 return skb; 755 } 756 757 /* 758 * This function copies a received frame to all monitor interfaces and 759 * returns a cleaned-up SKB that no longer includes the FCS nor the 760 * radiotap header the driver might have added. 761 */ 762 static struct sk_buff * 763 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 764 struct ieee80211_rate *rate) 765 { 766 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 767 struct ieee80211_sub_if_data *sdata; 768 struct sk_buff *monskb = NULL; 769 int present_fcs_len = 0; 770 unsigned int rtap_space = 0; 771 struct ieee80211_sub_if_data *monitor_sdata = 772 rcu_dereference(local->monitor_sdata); 773 bool only_monitor = false; 774 unsigned int min_head_len; 775 776 if (status->flag & RX_FLAG_RADIOTAP_HE) 777 rtap_space += sizeof(struct ieee80211_radiotap_he); 778 779 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 780 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 781 782 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 783 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 784 785 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 786 struct ieee80211_vendor_radiotap *rtap = 787 (void *)(origskb->data + rtap_space); 788 789 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 790 } 791 792 min_head_len = rtap_space; 793 794 /* 795 * First, we may need to make a copy of the skb because 796 * (1) we need to modify it for radiotap (if not present), and 797 * (2) the other RX handlers will modify the skb we got. 798 * 799 * We don't need to, of course, if we aren't going to return 800 * the SKB because it has a bad FCS/PLCP checksum. 801 */ 802 803 if (!(status->flag & RX_FLAG_NO_PSDU)) { 804 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 805 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 806 /* driver bug */ 807 WARN_ON(1); 808 dev_kfree_skb(origskb); 809 return NULL; 810 } 811 present_fcs_len = FCS_LEN; 812 } 813 814 /* also consider the hdr->frame_control */ 815 min_head_len += 2; 816 } 817 818 /* ensure that the expected data elements are in skb head */ 819 if (!pskb_may_pull(origskb, min_head_len)) { 820 dev_kfree_skb(origskb); 821 return NULL; 822 } 823 824 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 825 826 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 827 if (only_monitor) { 828 dev_kfree_skb(origskb); 829 return NULL; 830 } 831 832 remove_monitor_info(origskb, present_fcs_len, rtap_space); 833 return origskb; 834 } 835 836 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 837 838 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 839 bool last_monitor = list_is_last(&sdata->u.mntr.list, 840 &local->mon_list); 841 842 if (!monskb) 843 monskb = ieee80211_make_monitor_skb(local, &origskb, 844 rate, rtap_space, 845 only_monitor && 846 last_monitor); 847 848 if (monskb) { 849 struct sk_buff *skb; 850 851 if (last_monitor) { 852 skb = monskb; 853 monskb = NULL; 854 } else { 855 skb = skb_clone(monskb, GFP_ATOMIC); 856 } 857 858 if (skb) { 859 skb->dev = sdata->dev; 860 ieee80211_rx_stats(skb->dev, skb->len); 861 netif_receive_skb(skb); 862 } 863 } 864 865 if (last_monitor) 866 break; 867 } 868 869 /* this happens if last_monitor was erroneously false */ 870 dev_kfree_skb(monskb); 871 872 /* ditto */ 873 if (!origskb) 874 return NULL; 875 876 remove_monitor_info(origskb, present_fcs_len, rtap_space); 877 return origskb; 878 } 879 880 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 881 { 882 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 883 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 884 int tid, seqno_idx, security_idx; 885 886 /* does the frame have a qos control field? */ 887 if (ieee80211_is_data_qos(hdr->frame_control)) { 888 u8 *qc = ieee80211_get_qos_ctl(hdr); 889 /* frame has qos control */ 890 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 891 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 892 status->rx_flags |= IEEE80211_RX_AMSDU; 893 894 seqno_idx = tid; 895 security_idx = tid; 896 } else { 897 /* 898 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 899 * 900 * Sequence numbers for management frames, QoS data 901 * frames with a broadcast/multicast address in the 902 * Address 1 field, and all non-QoS data frames sent 903 * by QoS STAs are assigned using an additional single 904 * modulo-4096 counter, [...] 905 * 906 * We also use that counter for non-QoS STAs. 907 */ 908 seqno_idx = IEEE80211_NUM_TIDS; 909 security_idx = 0; 910 if (ieee80211_is_mgmt(hdr->frame_control)) 911 security_idx = IEEE80211_NUM_TIDS; 912 tid = 0; 913 } 914 915 rx->seqno_idx = seqno_idx; 916 rx->security_idx = security_idx; 917 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 918 * For now, set skb->priority to 0 for other cases. */ 919 rx->skb->priority = (tid > 7) ? 0 : tid; 920 } 921 922 /** 923 * DOC: Packet alignment 924 * 925 * Drivers always need to pass packets that are aligned to two-byte boundaries 926 * to the stack. 927 * 928 * Additionally, should, if possible, align the payload data in a way that 929 * guarantees that the contained IP header is aligned to a four-byte 930 * boundary. In the case of regular frames, this simply means aligning the 931 * payload to a four-byte boundary (because either the IP header is directly 932 * contained, or IV/RFC1042 headers that have a length divisible by four are 933 * in front of it). If the payload data is not properly aligned and the 934 * architecture doesn't support efficient unaligned operations, mac80211 935 * will align the data. 936 * 937 * With A-MSDU frames, however, the payload data address must yield two modulo 938 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 939 * push the IP header further back to a multiple of four again. Thankfully, the 940 * specs were sane enough this time around to require padding each A-MSDU 941 * subframe to a length that is a multiple of four. 942 * 943 * Padding like Atheros hardware adds which is between the 802.11 header and 944 * the payload is not supported, the driver is required to move the 802.11 945 * header to be directly in front of the payload in that case. 946 */ 947 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 948 { 949 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 950 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 951 #endif 952 } 953 954 955 /* rx handlers */ 956 957 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 958 { 959 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 960 961 if (is_multicast_ether_addr(hdr->addr1)) 962 return 0; 963 964 return ieee80211_is_robust_mgmt_frame(skb); 965 } 966 967 968 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 969 { 970 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 971 972 if (!is_multicast_ether_addr(hdr->addr1)) 973 return 0; 974 975 return ieee80211_is_robust_mgmt_frame(skb); 976 } 977 978 979 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 980 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 981 { 982 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 983 struct ieee80211_mmie *mmie; 984 struct ieee80211_mmie_16 *mmie16; 985 986 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 987 return -1; 988 989 if (!ieee80211_is_robust_mgmt_frame(skb)) 990 return -1; /* not a robust management frame */ 991 992 mmie = (struct ieee80211_mmie *) 993 (skb->data + skb->len - sizeof(*mmie)); 994 if (mmie->element_id == WLAN_EID_MMIE && 995 mmie->length == sizeof(*mmie) - 2) 996 return le16_to_cpu(mmie->key_id); 997 998 mmie16 = (struct ieee80211_mmie_16 *) 999 (skb->data + skb->len - sizeof(*mmie16)); 1000 if (skb->len >= 24 + sizeof(*mmie16) && 1001 mmie16->element_id == WLAN_EID_MMIE && 1002 mmie16->length == sizeof(*mmie16) - 2) 1003 return le16_to_cpu(mmie16->key_id); 1004 1005 return -1; 1006 } 1007 1008 static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 1009 struct sk_buff *skb) 1010 { 1011 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1012 __le16 fc; 1013 int hdrlen; 1014 u8 keyid; 1015 1016 fc = hdr->frame_control; 1017 hdrlen = ieee80211_hdrlen(fc); 1018 1019 if (skb->len < hdrlen + cs->hdr_len) 1020 return -EINVAL; 1021 1022 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 1023 keyid &= cs->key_idx_mask; 1024 keyid >>= cs->key_idx_shift; 1025 1026 return keyid; 1027 } 1028 1029 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1030 { 1031 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1032 char *dev_addr = rx->sdata->vif.addr; 1033 1034 if (ieee80211_is_data(hdr->frame_control)) { 1035 if (is_multicast_ether_addr(hdr->addr1)) { 1036 if (ieee80211_has_tods(hdr->frame_control) || 1037 !ieee80211_has_fromds(hdr->frame_control)) 1038 return RX_DROP_MONITOR; 1039 if (ether_addr_equal(hdr->addr3, dev_addr)) 1040 return RX_DROP_MONITOR; 1041 } else { 1042 if (!ieee80211_has_a4(hdr->frame_control)) 1043 return RX_DROP_MONITOR; 1044 if (ether_addr_equal(hdr->addr4, dev_addr)) 1045 return RX_DROP_MONITOR; 1046 } 1047 } 1048 1049 /* If there is not an established peer link and this is not a peer link 1050 * establisment frame, beacon or probe, drop the frame. 1051 */ 1052 1053 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1054 struct ieee80211_mgmt *mgmt; 1055 1056 if (!ieee80211_is_mgmt(hdr->frame_control)) 1057 return RX_DROP_MONITOR; 1058 1059 if (ieee80211_is_action(hdr->frame_control)) { 1060 u8 category; 1061 1062 /* make sure category field is present */ 1063 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1064 return RX_DROP_MONITOR; 1065 1066 mgmt = (struct ieee80211_mgmt *)hdr; 1067 category = mgmt->u.action.category; 1068 if (category != WLAN_CATEGORY_MESH_ACTION && 1069 category != WLAN_CATEGORY_SELF_PROTECTED) 1070 return RX_DROP_MONITOR; 1071 return RX_CONTINUE; 1072 } 1073 1074 if (ieee80211_is_probe_req(hdr->frame_control) || 1075 ieee80211_is_probe_resp(hdr->frame_control) || 1076 ieee80211_is_beacon(hdr->frame_control) || 1077 ieee80211_is_auth(hdr->frame_control)) 1078 return RX_CONTINUE; 1079 1080 return RX_DROP_MONITOR; 1081 } 1082 1083 return RX_CONTINUE; 1084 } 1085 1086 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1087 int index) 1088 { 1089 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1090 struct sk_buff *tail = skb_peek_tail(frames); 1091 struct ieee80211_rx_status *status; 1092 1093 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1094 return true; 1095 1096 if (!tail) 1097 return false; 1098 1099 status = IEEE80211_SKB_RXCB(tail); 1100 if (status->flag & RX_FLAG_AMSDU_MORE) 1101 return false; 1102 1103 return true; 1104 } 1105 1106 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1107 struct tid_ampdu_rx *tid_agg_rx, 1108 int index, 1109 struct sk_buff_head *frames) 1110 { 1111 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1112 struct sk_buff *skb; 1113 struct ieee80211_rx_status *status; 1114 1115 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1116 1117 if (skb_queue_empty(skb_list)) 1118 goto no_frame; 1119 1120 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1121 __skb_queue_purge(skb_list); 1122 goto no_frame; 1123 } 1124 1125 /* release frames from the reorder ring buffer */ 1126 tid_agg_rx->stored_mpdu_num--; 1127 while ((skb = __skb_dequeue(skb_list))) { 1128 status = IEEE80211_SKB_RXCB(skb); 1129 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1130 __skb_queue_tail(frames, skb); 1131 } 1132 1133 no_frame: 1134 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1135 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1136 } 1137 1138 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1139 struct tid_ampdu_rx *tid_agg_rx, 1140 u16 head_seq_num, 1141 struct sk_buff_head *frames) 1142 { 1143 int index; 1144 1145 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1146 1147 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1148 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1149 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1150 frames); 1151 } 1152 } 1153 1154 /* 1155 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1156 * the skb was added to the buffer longer than this time ago, the earlier 1157 * frames that have not yet been received are assumed to be lost and the skb 1158 * can be released for processing. This may also release other skb's from the 1159 * reorder buffer if there are no additional gaps between the frames. 1160 * 1161 * Callers must hold tid_agg_rx->reorder_lock. 1162 */ 1163 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1164 1165 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1166 struct tid_ampdu_rx *tid_agg_rx, 1167 struct sk_buff_head *frames) 1168 { 1169 int index, i, j; 1170 1171 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1172 1173 /* release the buffer until next missing frame */ 1174 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1175 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1176 tid_agg_rx->stored_mpdu_num) { 1177 /* 1178 * No buffers ready to be released, but check whether any 1179 * frames in the reorder buffer have timed out. 1180 */ 1181 int skipped = 1; 1182 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1183 j = (j + 1) % tid_agg_rx->buf_size) { 1184 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1185 skipped++; 1186 continue; 1187 } 1188 if (skipped && 1189 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1190 HT_RX_REORDER_BUF_TIMEOUT)) 1191 goto set_release_timer; 1192 1193 /* don't leave incomplete A-MSDUs around */ 1194 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1195 i = (i + 1) % tid_agg_rx->buf_size) 1196 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1197 1198 ht_dbg_ratelimited(sdata, 1199 "release an RX reorder frame due to timeout on earlier frames\n"); 1200 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1201 frames); 1202 1203 /* 1204 * Increment the head seq# also for the skipped slots. 1205 */ 1206 tid_agg_rx->head_seq_num = 1207 (tid_agg_rx->head_seq_num + 1208 skipped) & IEEE80211_SN_MASK; 1209 skipped = 0; 1210 } 1211 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1212 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1213 frames); 1214 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1215 } 1216 1217 if (tid_agg_rx->stored_mpdu_num) { 1218 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1219 1220 for (; j != (index - 1) % tid_agg_rx->buf_size; 1221 j = (j + 1) % tid_agg_rx->buf_size) { 1222 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1223 break; 1224 } 1225 1226 set_release_timer: 1227 1228 if (!tid_agg_rx->removed) 1229 mod_timer(&tid_agg_rx->reorder_timer, 1230 tid_agg_rx->reorder_time[j] + 1 + 1231 HT_RX_REORDER_BUF_TIMEOUT); 1232 } else { 1233 del_timer(&tid_agg_rx->reorder_timer); 1234 } 1235 } 1236 1237 /* 1238 * As this function belongs to the RX path it must be under 1239 * rcu_read_lock protection. It returns false if the frame 1240 * can be processed immediately, true if it was consumed. 1241 */ 1242 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1243 struct tid_ampdu_rx *tid_agg_rx, 1244 struct sk_buff *skb, 1245 struct sk_buff_head *frames) 1246 { 1247 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1248 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1249 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1250 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1251 u16 head_seq_num, buf_size; 1252 int index; 1253 bool ret = true; 1254 1255 spin_lock(&tid_agg_rx->reorder_lock); 1256 1257 /* 1258 * Offloaded BA sessions have no known starting sequence number so pick 1259 * one from first Rxed frame for this tid after BA was started. 1260 */ 1261 if (unlikely(tid_agg_rx->auto_seq)) { 1262 tid_agg_rx->auto_seq = false; 1263 tid_agg_rx->ssn = mpdu_seq_num; 1264 tid_agg_rx->head_seq_num = mpdu_seq_num; 1265 } 1266 1267 buf_size = tid_agg_rx->buf_size; 1268 head_seq_num = tid_agg_rx->head_seq_num; 1269 1270 /* 1271 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1272 * be reordered. 1273 */ 1274 if (unlikely(!tid_agg_rx->started)) { 1275 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1276 ret = false; 1277 goto out; 1278 } 1279 tid_agg_rx->started = true; 1280 } 1281 1282 /* frame with out of date sequence number */ 1283 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1284 dev_kfree_skb(skb); 1285 goto out; 1286 } 1287 1288 /* 1289 * If frame the sequence number exceeds our buffering window 1290 * size release some previous frames to make room for this one. 1291 */ 1292 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1293 head_seq_num = ieee80211_sn_inc( 1294 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1295 /* release stored frames up to new head to stack */ 1296 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1297 head_seq_num, frames); 1298 } 1299 1300 /* Now the new frame is always in the range of the reordering buffer */ 1301 1302 index = mpdu_seq_num % tid_agg_rx->buf_size; 1303 1304 /* check if we already stored this frame */ 1305 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1306 dev_kfree_skb(skb); 1307 goto out; 1308 } 1309 1310 /* 1311 * If the current MPDU is in the right order and nothing else 1312 * is stored we can process it directly, no need to buffer it. 1313 * If it is first but there's something stored, we may be able 1314 * to release frames after this one. 1315 */ 1316 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1317 tid_agg_rx->stored_mpdu_num == 0) { 1318 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1319 tid_agg_rx->head_seq_num = 1320 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1321 ret = false; 1322 goto out; 1323 } 1324 1325 /* put the frame in the reordering buffer */ 1326 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1327 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1328 tid_agg_rx->reorder_time[index] = jiffies; 1329 tid_agg_rx->stored_mpdu_num++; 1330 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1331 } 1332 1333 out: 1334 spin_unlock(&tid_agg_rx->reorder_lock); 1335 return ret; 1336 } 1337 1338 /* 1339 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1340 * true if the MPDU was buffered, false if it should be processed. 1341 */ 1342 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1343 struct sk_buff_head *frames) 1344 { 1345 struct sk_buff *skb = rx->skb; 1346 struct ieee80211_local *local = rx->local; 1347 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1348 struct sta_info *sta = rx->sta; 1349 struct tid_ampdu_rx *tid_agg_rx; 1350 u16 sc; 1351 u8 tid, ack_policy; 1352 1353 if (!ieee80211_is_data_qos(hdr->frame_control) || 1354 is_multicast_ether_addr(hdr->addr1)) 1355 goto dont_reorder; 1356 1357 /* 1358 * filter the QoS data rx stream according to 1359 * STA/TID and check if this STA/TID is on aggregation 1360 */ 1361 1362 if (!sta) 1363 goto dont_reorder; 1364 1365 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1366 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1367 tid = ieee80211_get_tid(hdr); 1368 1369 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1370 if (!tid_agg_rx) { 1371 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1372 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1373 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1374 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1375 WLAN_BACK_RECIPIENT, 1376 WLAN_REASON_QSTA_REQUIRE_SETUP); 1377 goto dont_reorder; 1378 } 1379 1380 /* qos null data frames are excluded */ 1381 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1382 goto dont_reorder; 1383 1384 /* not part of a BA session */ 1385 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1386 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1387 goto dont_reorder; 1388 1389 /* new, potentially un-ordered, ampdu frame - process it */ 1390 1391 /* reset session timer */ 1392 if (tid_agg_rx->timeout) 1393 tid_agg_rx->last_rx = jiffies; 1394 1395 /* if this mpdu is fragmented - terminate rx aggregation session */ 1396 sc = le16_to_cpu(hdr->seq_ctrl); 1397 if (sc & IEEE80211_SCTL_FRAG) { 1398 skb_queue_tail(&rx->sdata->skb_queue, skb); 1399 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1400 return; 1401 } 1402 1403 /* 1404 * No locking needed -- we will only ever process one 1405 * RX packet at a time, and thus own tid_agg_rx. All 1406 * other code manipulating it needs to (and does) make 1407 * sure that we cannot get to it any more before doing 1408 * anything with it. 1409 */ 1410 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1411 frames)) 1412 return; 1413 1414 dont_reorder: 1415 __skb_queue_tail(frames, skb); 1416 } 1417 1418 static ieee80211_rx_result debug_noinline 1419 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1420 { 1421 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1422 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1423 1424 if (status->flag & RX_FLAG_DUP_VALIDATED) 1425 return RX_CONTINUE; 1426 1427 /* 1428 * Drop duplicate 802.11 retransmissions 1429 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1430 */ 1431 1432 if (rx->skb->len < 24) 1433 return RX_CONTINUE; 1434 1435 if (ieee80211_is_ctl(hdr->frame_control) || 1436 ieee80211_is_nullfunc(hdr->frame_control) || 1437 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1438 is_multicast_ether_addr(hdr->addr1)) 1439 return RX_CONTINUE; 1440 1441 if (!rx->sta) 1442 return RX_CONTINUE; 1443 1444 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1445 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1446 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1447 rx->sta->rx_stats.num_duplicates++; 1448 return RX_DROP_UNUSABLE; 1449 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1450 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1451 } 1452 1453 return RX_CONTINUE; 1454 } 1455 1456 static ieee80211_rx_result debug_noinline 1457 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1458 { 1459 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1460 1461 /* Drop disallowed frame classes based on STA auth/assoc state; 1462 * IEEE 802.11, Chap 5.5. 1463 * 1464 * mac80211 filters only based on association state, i.e. it drops 1465 * Class 3 frames from not associated stations. hostapd sends 1466 * deauth/disassoc frames when needed. In addition, hostapd is 1467 * responsible for filtering on both auth and assoc states. 1468 */ 1469 1470 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1471 return ieee80211_rx_mesh_check(rx); 1472 1473 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1474 ieee80211_is_pspoll(hdr->frame_control)) && 1475 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1476 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1477 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1478 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1479 /* 1480 * accept port control frames from the AP even when it's not 1481 * yet marked ASSOC to prevent a race where we don't set the 1482 * assoc bit quickly enough before it sends the first frame 1483 */ 1484 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1485 ieee80211_is_data_present(hdr->frame_control)) { 1486 unsigned int hdrlen; 1487 __be16 ethertype; 1488 1489 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1490 1491 if (rx->skb->len < hdrlen + 8) 1492 return RX_DROP_MONITOR; 1493 1494 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1495 if (ethertype == rx->sdata->control_port_protocol) 1496 return RX_CONTINUE; 1497 } 1498 1499 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1500 cfg80211_rx_spurious_frame(rx->sdata->dev, 1501 hdr->addr2, 1502 GFP_ATOMIC)) 1503 return RX_DROP_UNUSABLE; 1504 1505 return RX_DROP_MONITOR; 1506 } 1507 1508 return RX_CONTINUE; 1509 } 1510 1511 1512 static ieee80211_rx_result debug_noinline 1513 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1514 { 1515 struct ieee80211_local *local; 1516 struct ieee80211_hdr *hdr; 1517 struct sk_buff *skb; 1518 1519 local = rx->local; 1520 skb = rx->skb; 1521 hdr = (struct ieee80211_hdr *) skb->data; 1522 1523 if (!local->pspolling) 1524 return RX_CONTINUE; 1525 1526 if (!ieee80211_has_fromds(hdr->frame_control)) 1527 /* this is not from AP */ 1528 return RX_CONTINUE; 1529 1530 if (!ieee80211_is_data(hdr->frame_control)) 1531 return RX_CONTINUE; 1532 1533 if (!ieee80211_has_moredata(hdr->frame_control)) { 1534 /* AP has no more frames buffered for us */ 1535 local->pspolling = false; 1536 return RX_CONTINUE; 1537 } 1538 1539 /* more data bit is set, let's request a new frame from the AP */ 1540 ieee80211_send_pspoll(local, rx->sdata); 1541 1542 return RX_CONTINUE; 1543 } 1544 1545 static void sta_ps_start(struct sta_info *sta) 1546 { 1547 struct ieee80211_sub_if_data *sdata = sta->sdata; 1548 struct ieee80211_local *local = sdata->local; 1549 struct ps_data *ps; 1550 int tid; 1551 1552 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1553 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1554 ps = &sdata->bss->ps; 1555 else 1556 return; 1557 1558 atomic_inc(&ps->num_sta_ps); 1559 set_sta_flag(sta, WLAN_STA_PS_STA); 1560 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1561 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1562 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1563 sta->sta.addr, sta->sta.aid); 1564 1565 ieee80211_clear_fast_xmit(sta); 1566 1567 if (!sta->sta.txq[0]) 1568 return; 1569 1570 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1571 if (txq_has_queue(sta->sta.txq[tid])) 1572 set_bit(tid, &sta->txq_buffered_tids); 1573 else 1574 clear_bit(tid, &sta->txq_buffered_tids); 1575 } 1576 } 1577 1578 static void sta_ps_end(struct sta_info *sta) 1579 { 1580 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1581 sta->sta.addr, sta->sta.aid); 1582 1583 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1584 /* 1585 * Clear the flag only if the other one is still set 1586 * so that the TX path won't start TX'ing new frames 1587 * directly ... In the case that the driver flag isn't 1588 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1589 */ 1590 clear_sta_flag(sta, WLAN_STA_PS_STA); 1591 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1592 sta->sta.addr, sta->sta.aid); 1593 return; 1594 } 1595 1596 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1597 clear_sta_flag(sta, WLAN_STA_PS_STA); 1598 ieee80211_sta_ps_deliver_wakeup(sta); 1599 } 1600 1601 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1602 { 1603 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1604 bool in_ps; 1605 1606 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1607 1608 /* Don't let the same PS state be set twice */ 1609 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1610 if ((start && in_ps) || (!start && !in_ps)) 1611 return -EINVAL; 1612 1613 if (start) 1614 sta_ps_start(sta); 1615 else 1616 sta_ps_end(sta); 1617 1618 return 0; 1619 } 1620 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1621 1622 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1623 { 1624 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1625 1626 if (test_sta_flag(sta, WLAN_STA_SP)) 1627 return; 1628 1629 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1630 ieee80211_sta_ps_deliver_poll_response(sta); 1631 else 1632 set_sta_flag(sta, WLAN_STA_PSPOLL); 1633 } 1634 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1635 1636 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1637 { 1638 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1639 int ac = ieee80211_ac_from_tid(tid); 1640 1641 /* 1642 * If this AC is not trigger-enabled do nothing unless the 1643 * driver is calling us after it already checked. 1644 * 1645 * NB: This could/should check a separate bitmap of trigger- 1646 * enabled queues, but for now we only implement uAPSD w/o 1647 * TSPEC changes to the ACs, so they're always the same. 1648 */ 1649 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1650 tid != IEEE80211_NUM_TIDS) 1651 return; 1652 1653 /* if we are in a service period, do nothing */ 1654 if (test_sta_flag(sta, WLAN_STA_SP)) 1655 return; 1656 1657 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1658 ieee80211_sta_ps_deliver_uapsd(sta); 1659 else 1660 set_sta_flag(sta, WLAN_STA_UAPSD); 1661 } 1662 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1663 1664 static ieee80211_rx_result debug_noinline 1665 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1666 { 1667 struct ieee80211_sub_if_data *sdata = rx->sdata; 1668 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1669 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1670 1671 if (!rx->sta) 1672 return RX_CONTINUE; 1673 1674 if (sdata->vif.type != NL80211_IFTYPE_AP && 1675 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1676 return RX_CONTINUE; 1677 1678 /* 1679 * The device handles station powersave, so don't do anything about 1680 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1681 * it to mac80211 since they're handled.) 1682 */ 1683 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1684 return RX_CONTINUE; 1685 1686 /* 1687 * Don't do anything if the station isn't already asleep. In 1688 * the uAPSD case, the station will probably be marked asleep, 1689 * in the PS-Poll case the station must be confused ... 1690 */ 1691 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1692 return RX_CONTINUE; 1693 1694 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1695 ieee80211_sta_pspoll(&rx->sta->sta); 1696 1697 /* Free PS Poll skb here instead of returning RX_DROP that would 1698 * count as an dropped frame. */ 1699 dev_kfree_skb(rx->skb); 1700 1701 return RX_QUEUED; 1702 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1703 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1704 ieee80211_has_pm(hdr->frame_control) && 1705 (ieee80211_is_data_qos(hdr->frame_control) || 1706 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1707 u8 tid = ieee80211_get_tid(hdr); 1708 1709 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1710 } 1711 1712 return RX_CONTINUE; 1713 } 1714 1715 static ieee80211_rx_result debug_noinline 1716 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1717 { 1718 struct sta_info *sta = rx->sta; 1719 struct sk_buff *skb = rx->skb; 1720 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1721 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1722 int i; 1723 1724 if (!sta) 1725 return RX_CONTINUE; 1726 1727 /* 1728 * Update last_rx only for IBSS packets which are for the current 1729 * BSSID and for station already AUTHORIZED to avoid keeping the 1730 * current IBSS network alive in cases where other STAs start 1731 * using different BSSID. This will also give the station another 1732 * chance to restart the authentication/authorization in case 1733 * something went wrong the first time. 1734 */ 1735 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1736 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1737 NL80211_IFTYPE_ADHOC); 1738 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1739 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1740 sta->rx_stats.last_rx = jiffies; 1741 if (ieee80211_is_data(hdr->frame_control) && 1742 !is_multicast_ether_addr(hdr->addr1)) 1743 sta->rx_stats.last_rate = 1744 sta_stats_encode_rate(status); 1745 } 1746 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1747 sta->rx_stats.last_rx = jiffies; 1748 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1749 /* 1750 * Mesh beacons will update last_rx when if they are found to 1751 * match the current local configuration when processed. 1752 */ 1753 sta->rx_stats.last_rx = jiffies; 1754 if (ieee80211_is_data(hdr->frame_control)) 1755 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1756 } 1757 1758 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1759 ieee80211_sta_rx_notify(rx->sdata, hdr); 1760 1761 sta->rx_stats.fragments++; 1762 1763 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1764 sta->rx_stats.bytes += rx->skb->len; 1765 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1766 1767 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1768 sta->rx_stats.last_signal = status->signal; 1769 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1770 } 1771 1772 if (status->chains) { 1773 sta->rx_stats.chains = status->chains; 1774 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1775 int signal = status->chain_signal[i]; 1776 1777 if (!(status->chains & BIT(i))) 1778 continue; 1779 1780 sta->rx_stats.chain_signal_last[i] = signal; 1781 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1782 -signal); 1783 } 1784 } 1785 1786 /* 1787 * Change STA power saving mode only at the end of a frame 1788 * exchange sequence, and only for a data or management 1789 * frame as specified in IEEE 802.11-2016 11.2.3.2 1790 */ 1791 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1792 !ieee80211_has_morefrags(hdr->frame_control) && 1793 !is_multicast_ether_addr(hdr->addr1) && 1794 (ieee80211_is_mgmt(hdr->frame_control) || 1795 ieee80211_is_data(hdr->frame_control)) && 1796 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1797 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1798 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1799 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1800 if (!ieee80211_has_pm(hdr->frame_control)) 1801 sta_ps_end(sta); 1802 } else { 1803 if (ieee80211_has_pm(hdr->frame_control)) 1804 sta_ps_start(sta); 1805 } 1806 } 1807 1808 /* mesh power save support */ 1809 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1810 ieee80211_mps_rx_h_sta_process(sta, hdr); 1811 1812 /* 1813 * Drop (qos-)data::nullfunc frames silently, since they 1814 * are used only to control station power saving mode. 1815 */ 1816 if (ieee80211_is_nullfunc(hdr->frame_control) || 1817 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1818 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1819 1820 /* 1821 * If we receive a 4-addr nullfunc frame from a STA 1822 * that was not moved to a 4-addr STA vlan yet send 1823 * the event to userspace and for older hostapd drop 1824 * the frame to the monitor interface. 1825 */ 1826 if (ieee80211_has_a4(hdr->frame_control) && 1827 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1828 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1829 !rx->sdata->u.vlan.sta))) { 1830 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1831 cfg80211_rx_unexpected_4addr_frame( 1832 rx->sdata->dev, sta->sta.addr, 1833 GFP_ATOMIC); 1834 return RX_DROP_MONITOR; 1835 } 1836 /* 1837 * Update counter and free packet here to avoid 1838 * counting this as a dropped packed. 1839 */ 1840 sta->rx_stats.packets++; 1841 dev_kfree_skb(rx->skb); 1842 return RX_QUEUED; 1843 } 1844 1845 return RX_CONTINUE; 1846 } /* ieee80211_rx_h_sta_process */ 1847 1848 static ieee80211_rx_result debug_noinline 1849 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1850 { 1851 struct sk_buff *skb = rx->skb; 1852 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1853 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1854 int keyidx; 1855 int hdrlen; 1856 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1857 struct ieee80211_key *sta_ptk = NULL; 1858 int mmie_keyidx = -1; 1859 __le16 fc; 1860 const struct ieee80211_cipher_scheme *cs = NULL; 1861 1862 /* 1863 * Key selection 101 1864 * 1865 * There are four types of keys: 1866 * - GTK (group keys) 1867 * - IGTK (group keys for management frames) 1868 * - PTK (pairwise keys) 1869 * - STK (station-to-station pairwise keys) 1870 * 1871 * When selecting a key, we have to distinguish between multicast 1872 * (including broadcast) and unicast frames, the latter can only 1873 * use PTKs and STKs while the former always use GTKs and IGTKs. 1874 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1875 * unicast frames can also use key indices like GTKs. Hence, if we 1876 * don't have a PTK/STK we check the key index for a WEP key. 1877 * 1878 * Note that in a regular BSS, multicast frames are sent by the 1879 * AP only, associated stations unicast the frame to the AP first 1880 * which then multicasts it on their behalf. 1881 * 1882 * There is also a slight problem in IBSS mode: GTKs are negotiated 1883 * with each station, that is something we don't currently handle. 1884 * The spec seems to expect that one negotiates the same key with 1885 * every station but there's no such requirement; VLANs could be 1886 * possible. 1887 */ 1888 1889 /* start without a key */ 1890 rx->key = NULL; 1891 fc = hdr->frame_control; 1892 1893 if (rx->sta) { 1894 int keyid = rx->sta->ptk_idx; 1895 1896 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1897 cs = rx->sta->cipher_scheme; 1898 keyid = ieee80211_get_cs_keyid(cs, rx->skb); 1899 if (unlikely(keyid < 0)) 1900 return RX_DROP_UNUSABLE; 1901 } 1902 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1903 } 1904 1905 if (!ieee80211_has_protected(fc)) 1906 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1907 1908 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1909 rx->key = sta_ptk; 1910 if ((status->flag & RX_FLAG_DECRYPTED) && 1911 (status->flag & RX_FLAG_IV_STRIPPED)) 1912 return RX_CONTINUE; 1913 /* Skip decryption if the frame is not protected. */ 1914 if (!ieee80211_has_protected(fc)) 1915 return RX_CONTINUE; 1916 } else if (mmie_keyidx >= 0) { 1917 /* Broadcast/multicast robust management frame / BIP */ 1918 if ((status->flag & RX_FLAG_DECRYPTED) && 1919 (status->flag & RX_FLAG_IV_STRIPPED)) 1920 return RX_CONTINUE; 1921 1922 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1923 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1924 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1925 if (rx->sta) { 1926 if (ieee80211_is_group_privacy_action(skb) && 1927 test_sta_flag(rx->sta, WLAN_STA_MFP)) 1928 return RX_DROP_MONITOR; 1929 1930 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1931 } 1932 if (!rx->key) 1933 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1934 } else if (!ieee80211_has_protected(fc)) { 1935 /* 1936 * The frame was not protected, so skip decryption. However, we 1937 * need to set rx->key if there is a key that could have been 1938 * used so that the frame may be dropped if encryption would 1939 * have been expected. 1940 */ 1941 struct ieee80211_key *key = NULL; 1942 struct ieee80211_sub_if_data *sdata = rx->sdata; 1943 int i; 1944 1945 if (ieee80211_is_mgmt(fc) && 1946 is_multicast_ether_addr(hdr->addr1) && 1947 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1948 rx->key = key; 1949 else { 1950 if (rx->sta) { 1951 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1952 key = rcu_dereference(rx->sta->gtk[i]); 1953 if (key) 1954 break; 1955 } 1956 } 1957 if (!key) { 1958 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1959 key = rcu_dereference(sdata->keys[i]); 1960 if (key) 1961 break; 1962 } 1963 } 1964 if (key) 1965 rx->key = key; 1966 } 1967 return RX_CONTINUE; 1968 } else { 1969 u8 keyid; 1970 1971 /* 1972 * The device doesn't give us the IV so we won't be 1973 * able to look up the key. That's ok though, we 1974 * don't need to decrypt the frame, we just won't 1975 * be able to keep statistics accurate. 1976 * Except for key threshold notifications, should 1977 * we somehow allow the driver to tell us which key 1978 * the hardware used if this flag is set? 1979 */ 1980 if ((status->flag & RX_FLAG_DECRYPTED) && 1981 (status->flag & RX_FLAG_IV_STRIPPED)) 1982 return RX_CONTINUE; 1983 1984 hdrlen = ieee80211_hdrlen(fc); 1985 1986 if (cs) { 1987 keyidx = ieee80211_get_cs_keyid(cs, rx->skb); 1988 1989 if (unlikely(keyidx < 0)) 1990 return RX_DROP_UNUSABLE; 1991 } else { 1992 if (rx->skb->len < 8 + hdrlen) 1993 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1994 /* 1995 * no need to call ieee80211_wep_get_keyidx, 1996 * it verifies a bunch of things we've done already 1997 */ 1998 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1999 keyidx = keyid >> 6; 2000 } 2001 2002 /* check per-station GTK first, if multicast packet */ 2003 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 2004 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 2005 2006 /* if not found, try default key */ 2007 if (!rx->key) { 2008 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2009 2010 /* 2011 * RSNA-protected unicast frames should always be 2012 * sent with pairwise or station-to-station keys, 2013 * but for WEP we allow using a key index as well. 2014 */ 2015 if (rx->key && 2016 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2017 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2018 !is_multicast_ether_addr(hdr->addr1)) 2019 rx->key = NULL; 2020 } 2021 } 2022 2023 if (rx->key) { 2024 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2025 return RX_DROP_MONITOR; 2026 2027 /* TODO: add threshold stuff again */ 2028 } else { 2029 return RX_DROP_MONITOR; 2030 } 2031 2032 switch (rx->key->conf.cipher) { 2033 case WLAN_CIPHER_SUITE_WEP40: 2034 case WLAN_CIPHER_SUITE_WEP104: 2035 result = ieee80211_crypto_wep_decrypt(rx); 2036 break; 2037 case WLAN_CIPHER_SUITE_TKIP: 2038 result = ieee80211_crypto_tkip_decrypt(rx); 2039 break; 2040 case WLAN_CIPHER_SUITE_CCMP: 2041 result = ieee80211_crypto_ccmp_decrypt( 2042 rx, IEEE80211_CCMP_MIC_LEN); 2043 break; 2044 case WLAN_CIPHER_SUITE_CCMP_256: 2045 result = ieee80211_crypto_ccmp_decrypt( 2046 rx, IEEE80211_CCMP_256_MIC_LEN); 2047 break; 2048 case WLAN_CIPHER_SUITE_AES_CMAC: 2049 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2050 break; 2051 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2052 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2053 break; 2054 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2055 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2056 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2057 break; 2058 case WLAN_CIPHER_SUITE_GCMP: 2059 case WLAN_CIPHER_SUITE_GCMP_256: 2060 result = ieee80211_crypto_gcmp_decrypt(rx); 2061 break; 2062 default: 2063 result = ieee80211_crypto_hw_decrypt(rx); 2064 } 2065 2066 /* the hdr variable is invalid after the decrypt handlers */ 2067 2068 /* either the frame has been decrypted or will be dropped */ 2069 status->flag |= RX_FLAG_DECRYPTED; 2070 2071 return result; 2072 } 2073 2074 static inline struct ieee80211_fragment_entry * 2075 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 2076 unsigned int frag, unsigned int seq, int rx_queue, 2077 struct sk_buff **skb) 2078 { 2079 struct ieee80211_fragment_entry *entry; 2080 2081 entry = &sdata->fragments[sdata->fragment_next++]; 2082 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 2083 sdata->fragment_next = 0; 2084 2085 if (!skb_queue_empty(&entry->skb_list)) 2086 __skb_queue_purge(&entry->skb_list); 2087 2088 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2089 *skb = NULL; 2090 entry->first_frag_time = jiffies; 2091 entry->seq = seq; 2092 entry->rx_queue = rx_queue; 2093 entry->last_frag = frag; 2094 entry->check_sequential_pn = false; 2095 entry->extra_len = 0; 2096 2097 return entry; 2098 } 2099 2100 static inline struct ieee80211_fragment_entry * 2101 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 2102 unsigned int frag, unsigned int seq, 2103 int rx_queue, struct ieee80211_hdr *hdr) 2104 { 2105 struct ieee80211_fragment_entry *entry; 2106 int i, idx; 2107 2108 idx = sdata->fragment_next; 2109 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2110 struct ieee80211_hdr *f_hdr; 2111 struct sk_buff *f_skb; 2112 2113 idx--; 2114 if (idx < 0) 2115 idx = IEEE80211_FRAGMENT_MAX - 1; 2116 2117 entry = &sdata->fragments[idx]; 2118 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2119 entry->rx_queue != rx_queue || 2120 entry->last_frag + 1 != frag) 2121 continue; 2122 2123 f_skb = __skb_peek(&entry->skb_list); 2124 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2125 2126 /* 2127 * Check ftype and addresses are equal, else check next fragment 2128 */ 2129 if (((hdr->frame_control ^ f_hdr->frame_control) & 2130 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2131 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2132 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2133 continue; 2134 2135 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2136 __skb_queue_purge(&entry->skb_list); 2137 continue; 2138 } 2139 return entry; 2140 } 2141 2142 return NULL; 2143 } 2144 2145 static ieee80211_rx_result debug_noinline 2146 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2147 { 2148 struct ieee80211_hdr *hdr; 2149 u16 sc; 2150 __le16 fc; 2151 unsigned int frag, seq; 2152 struct ieee80211_fragment_entry *entry; 2153 struct sk_buff *skb; 2154 2155 hdr = (struct ieee80211_hdr *)rx->skb->data; 2156 fc = hdr->frame_control; 2157 2158 if (ieee80211_is_ctl(fc)) 2159 return RX_CONTINUE; 2160 2161 sc = le16_to_cpu(hdr->seq_ctrl); 2162 frag = sc & IEEE80211_SCTL_FRAG; 2163 2164 if (is_multicast_ether_addr(hdr->addr1)) { 2165 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 2166 goto out_no_led; 2167 } 2168 2169 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2170 goto out; 2171 2172 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2173 2174 if (skb_linearize(rx->skb)) 2175 return RX_DROP_UNUSABLE; 2176 2177 /* 2178 * skb_linearize() might change the skb->data and 2179 * previously cached variables (in this case, hdr) need to 2180 * be refreshed with the new data. 2181 */ 2182 hdr = (struct ieee80211_hdr *)rx->skb->data; 2183 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2184 2185 if (frag == 0) { 2186 /* This is the first fragment of a new frame. */ 2187 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 2188 rx->seqno_idx, &(rx->skb)); 2189 if (rx->key && 2190 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2191 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2192 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2193 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2194 ieee80211_has_protected(fc)) { 2195 int queue = rx->security_idx; 2196 2197 /* Store CCMP/GCMP PN so that we can verify that the 2198 * next fragment has a sequential PN value. 2199 */ 2200 entry->check_sequential_pn = true; 2201 memcpy(entry->last_pn, 2202 rx->key->u.ccmp.rx_pn[queue], 2203 IEEE80211_CCMP_PN_LEN); 2204 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2205 u.ccmp.rx_pn) != 2206 offsetof(struct ieee80211_key, 2207 u.gcmp.rx_pn)); 2208 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2209 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2210 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2211 IEEE80211_GCMP_PN_LEN); 2212 } 2213 return RX_QUEUED; 2214 } 2215 2216 /* This is a fragment for a frame that should already be pending in 2217 * fragment cache. Add this fragment to the end of the pending entry. 2218 */ 2219 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 2220 rx->seqno_idx, hdr); 2221 if (!entry) { 2222 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2223 return RX_DROP_MONITOR; 2224 } 2225 2226 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2227 * MPDU PN values are not incrementing in steps of 1." 2228 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2229 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2230 */ 2231 if (entry->check_sequential_pn) { 2232 int i; 2233 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2234 int queue; 2235 2236 if (!rx->key || 2237 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 2238 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 2239 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 2240 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 2241 return RX_DROP_UNUSABLE; 2242 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2243 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2244 pn[i]++; 2245 if (pn[i]) 2246 break; 2247 } 2248 queue = rx->security_idx; 2249 rpn = rx->key->u.ccmp.rx_pn[queue]; 2250 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2251 return RX_DROP_UNUSABLE; 2252 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2253 } 2254 2255 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2256 __skb_queue_tail(&entry->skb_list, rx->skb); 2257 entry->last_frag = frag; 2258 entry->extra_len += rx->skb->len; 2259 if (ieee80211_has_morefrags(fc)) { 2260 rx->skb = NULL; 2261 return RX_QUEUED; 2262 } 2263 2264 rx->skb = __skb_dequeue(&entry->skb_list); 2265 if (skb_tailroom(rx->skb) < entry->extra_len) { 2266 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2267 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2268 GFP_ATOMIC))) { 2269 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2270 __skb_queue_purge(&entry->skb_list); 2271 return RX_DROP_UNUSABLE; 2272 } 2273 } 2274 while ((skb = __skb_dequeue(&entry->skb_list))) { 2275 skb_put_data(rx->skb, skb->data, skb->len); 2276 dev_kfree_skb(skb); 2277 } 2278 2279 out: 2280 ieee80211_led_rx(rx->local); 2281 out_no_led: 2282 if (rx->sta) 2283 rx->sta->rx_stats.packets++; 2284 return RX_CONTINUE; 2285 } 2286 2287 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2288 { 2289 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2290 return -EACCES; 2291 2292 return 0; 2293 } 2294 2295 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2296 { 2297 struct sk_buff *skb = rx->skb; 2298 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2299 2300 /* 2301 * Pass through unencrypted frames if the hardware has 2302 * decrypted them already. 2303 */ 2304 if (status->flag & RX_FLAG_DECRYPTED) 2305 return 0; 2306 2307 /* Drop unencrypted frames if key is set. */ 2308 if (unlikely(!ieee80211_has_protected(fc) && 2309 !ieee80211_is_nullfunc(fc) && 2310 ieee80211_is_data(fc) && rx->key)) 2311 return -EACCES; 2312 2313 return 0; 2314 } 2315 2316 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2317 { 2318 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2319 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2320 __le16 fc = hdr->frame_control; 2321 2322 /* 2323 * Pass through unencrypted frames if the hardware has 2324 * decrypted them already. 2325 */ 2326 if (status->flag & RX_FLAG_DECRYPTED) 2327 return 0; 2328 2329 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2330 if (unlikely(!ieee80211_has_protected(fc) && 2331 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2332 rx->key)) { 2333 if (ieee80211_is_deauth(fc) || 2334 ieee80211_is_disassoc(fc)) 2335 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2336 rx->skb->data, 2337 rx->skb->len); 2338 return -EACCES; 2339 } 2340 /* BIP does not use Protected field, so need to check MMIE */ 2341 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2342 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2343 if (ieee80211_is_deauth(fc) || 2344 ieee80211_is_disassoc(fc)) 2345 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2346 rx->skb->data, 2347 rx->skb->len); 2348 return -EACCES; 2349 } 2350 /* 2351 * When using MFP, Action frames are not allowed prior to 2352 * having configured keys. 2353 */ 2354 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2355 ieee80211_is_robust_mgmt_frame(rx->skb))) 2356 return -EACCES; 2357 } 2358 2359 return 0; 2360 } 2361 2362 static int 2363 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2364 { 2365 struct ieee80211_sub_if_data *sdata = rx->sdata; 2366 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2367 bool check_port_control = false; 2368 struct ethhdr *ehdr; 2369 int ret; 2370 2371 *port_control = false; 2372 if (ieee80211_has_a4(hdr->frame_control) && 2373 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2374 return -1; 2375 2376 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2377 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2378 2379 if (!sdata->u.mgd.use_4addr) 2380 return -1; 2381 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2382 check_port_control = true; 2383 } 2384 2385 if (is_multicast_ether_addr(hdr->addr1) && 2386 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2387 return -1; 2388 2389 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2390 if (ret < 0) 2391 return ret; 2392 2393 ehdr = (struct ethhdr *) rx->skb->data; 2394 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2395 *port_control = true; 2396 else if (check_port_control) 2397 return -1; 2398 2399 return 0; 2400 } 2401 2402 /* 2403 * requires that rx->skb is a frame with ethernet header 2404 */ 2405 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2406 { 2407 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2408 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2409 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2410 2411 /* 2412 * Allow EAPOL frames to us/the PAE group address regardless 2413 * of whether the frame was encrypted or not. 2414 */ 2415 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2416 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2417 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2418 return true; 2419 2420 if (ieee80211_802_1x_port_control(rx) || 2421 ieee80211_drop_unencrypted(rx, fc)) 2422 return false; 2423 2424 return true; 2425 } 2426 2427 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2428 struct ieee80211_rx_data *rx) 2429 { 2430 struct ieee80211_sub_if_data *sdata = rx->sdata; 2431 struct net_device *dev = sdata->dev; 2432 2433 if (unlikely((skb->protocol == sdata->control_port_protocol || 2434 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2435 sdata->control_port_over_nl80211)) { 2436 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2437 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2438 2439 cfg80211_rx_control_port(dev, skb, noencrypt); 2440 dev_kfree_skb(skb); 2441 } else { 2442 /* deliver to local stack */ 2443 if (rx->napi) 2444 napi_gro_receive(rx->napi, skb); 2445 else 2446 netif_receive_skb(skb); 2447 } 2448 } 2449 2450 /* 2451 * requires that rx->skb is a frame with ethernet header 2452 */ 2453 static void 2454 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2455 { 2456 struct ieee80211_sub_if_data *sdata = rx->sdata; 2457 struct net_device *dev = sdata->dev; 2458 struct sk_buff *skb, *xmit_skb; 2459 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2460 struct sta_info *dsta; 2461 2462 skb = rx->skb; 2463 xmit_skb = NULL; 2464 2465 ieee80211_rx_stats(dev, skb->len); 2466 2467 if (rx->sta) { 2468 /* The seqno index has the same property as needed 2469 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2470 * for non-QoS-data frames. Here we know it's a data 2471 * frame, so count MSDUs. 2472 */ 2473 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2474 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2475 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2476 } 2477 2478 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2479 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2480 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2481 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2482 if (is_multicast_ether_addr(ehdr->h_dest) && 2483 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2484 /* 2485 * send multicast frames both to higher layers in 2486 * local net stack and back to the wireless medium 2487 */ 2488 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2489 if (!xmit_skb) 2490 net_info_ratelimited("%s: failed to clone multicast frame\n", 2491 dev->name); 2492 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2493 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2494 dsta = sta_info_get(sdata, ehdr->h_dest); 2495 if (dsta) { 2496 /* 2497 * The destination station is associated to 2498 * this AP (in this VLAN), so send the frame 2499 * directly to it and do not pass it to local 2500 * net stack. 2501 */ 2502 xmit_skb = skb; 2503 skb = NULL; 2504 } 2505 } 2506 } 2507 2508 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2509 if (skb) { 2510 /* 'align' will only take the values 0 or 2 here since all 2511 * frames are required to be aligned to 2-byte boundaries 2512 * when being passed to mac80211; the code here works just 2513 * as well if that isn't true, but mac80211 assumes it can 2514 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2515 */ 2516 int align; 2517 2518 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2519 if (align) { 2520 if (WARN_ON(skb_headroom(skb) < 3)) { 2521 dev_kfree_skb(skb); 2522 skb = NULL; 2523 } else { 2524 u8 *data = skb->data; 2525 size_t len = skb_headlen(skb); 2526 skb->data -= align; 2527 memmove(skb->data, data, len); 2528 skb_set_tail_pointer(skb, len); 2529 } 2530 } 2531 } 2532 #endif 2533 2534 if (skb) { 2535 skb->protocol = eth_type_trans(skb, dev); 2536 memset(skb->cb, 0, sizeof(skb->cb)); 2537 2538 ieee80211_deliver_skb_to_local_stack(skb, rx); 2539 } 2540 2541 if (xmit_skb) { 2542 /* 2543 * Send to wireless media and increase priority by 256 to 2544 * keep the received priority instead of reclassifying 2545 * the frame (see cfg80211_classify8021d). 2546 */ 2547 xmit_skb->priority += 256; 2548 xmit_skb->protocol = htons(ETH_P_802_3); 2549 skb_reset_network_header(xmit_skb); 2550 skb_reset_mac_header(xmit_skb); 2551 dev_queue_xmit(xmit_skb); 2552 } 2553 } 2554 2555 static ieee80211_rx_result debug_noinline 2556 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2557 { 2558 struct net_device *dev = rx->sdata->dev; 2559 struct sk_buff *skb = rx->skb; 2560 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2561 __le16 fc = hdr->frame_control; 2562 struct sk_buff_head frame_list; 2563 struct ethhdr ethhdr; 2564 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2565 2566 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2567 check_da = NULL; 2568 check_sa = NULL; 2569 } else switch (rx->sdata->vif.type) { 2570 case NL80211_IFTYPE_AP: 2571 case NL80211_IFTYPE_AP_VLAN: 2572 check_da = NULL; 2573 break; 2574 case NL80211_IFTYPE_STATION: 2575 if (!rx->sta || 2576 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2577 check_sa = NULL; 2578 break; 2579 case NL80211_IFTYPE_MESH_POINT: 2580 check_sa = NULL; 2581 break; 2582 default: 2583 break; 2584 } 2585 2586 skb->dev = dev; 2587 __skb_queue_head_init(&frame_list); 2588 2589 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2590 rx->sdata->vif.addr, 2591 rx->sdata->vif.type, 2592 data_offset)) 2593 return RX_DROP_UNUSABLE; 2594 2595 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2596 rx->sdata->vif.type, 2597 rx->local->hw.extra_tx_headroom, 2598 check_da, check_sa); 2599 2600 while (!skb_queue_empty(&frame_list)) { 2601 rx->skb = __skb_dequeue(&frame_list); 2602 2603 if (!ieee80211_frame_allowed(rx, fc)) { 2604 dev_kfree_skb(rx->skb); 2605 continue; 2606 } 2607 2608 ieee80211_deliver_skb(rx); 2609 } 2610 2611 return RX_QUEUED; 2612 } 2613 2614 static ieee80211_rx_result debug_noinline 2615 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2616 { 2617 struct sk_buff *skb = rx->skb; 2618 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2619 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2620 __le16 fc = hdr->frame_control; 2621 2622 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2623 return RX_CONTINUE; 2624 2625 if (unlikely(!ieee80211_is_data(fc))) 2626 return RX_CONTINUE; 2627 2628 if (unlikely(!ieee80211_is_data_present(fc))) 2629 return RX_DROP_MONITOR; 2630 2631 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2632 switch (rx->sdata->vif.type) { 2633 case NL80211_IFTYPE_AP_VLAN: 2634 if (!rx->sdata->u.vlan.sta) 2635 return RX_DROP_UNUSABLE; 2636 break; 2637 case NL80211_IFTYPE_STATION: 2638 if (!rx->sdata->u.mgd.use_4addr) 2639 return RX_DROP_UNUSABLE; 2640 break; 2641 default: 2642 return RX_DROP_UNUSABLE; 2643 } 2644 } 2645 2646 if (is_multicast_ether_addr(hdr->addr1)) 2647 return RX_DROP_UNUSABLE; 2648 2649 return __ieee80211_rx_h_amsdu(rx, 0); 2650 } 2651 2652 #ifdef CONFIG_MAC80211_MESH 2653 static ieee80211_rx_result 2654 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2655 { 2656 struct ieee80211_hdr *fwd_hdr, *hdr; 2657 struct ieee80211_tx_info *info; 2658 struct ieee80211s_hdr *mesh_hdr; 2659 struct sk_buff *skb = rx->skb, *fwd_skb; 2660 struct ieee80211_local *local = rx->local; 2661 struct ieee80211_sub_if_data *sdata = rx->sdata; 2662 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2663 u16 ac, q, hdrlen; 2664 int tailroom = 0; 2665 2666 hdr = (struct ieee80211_hdr *) skb->data; 2667 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2668 2669 /* make sure fixed part of mesh header is there, also checks skb len */ 2670 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2671 return RX_DROP_MONITOR; 2672 2673 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2674 2675 /* make sure full mesh header is there, also checks skb len */ 2676 if (!pskb_may_pull(rx->skb, 2677 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2678 return RX_DROP_MONITOR; 2679 2680 /* reload pointers */ 2681 hdr = (struct ieee80211_hdr *) skb->data; 2682 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2683 2684 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2685 return RX_DROP_MONITOR; 2686 2687 /* frame is in RMC, don't forward */ 2688 if (ieee80211_is_data(hdr->frame_control) && 2689 is_multicast_ether_addr(hdr->addr1) && 2690 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2691 return RX_DROP_MONITOR; 2692 2693 if (!ieee80211_is_data(hdr->frame_control)) 2694 return RX_CONTINUE; 2695 2696 if (!mesh_hdr->ttl) 2697 return RX_DROP_MONITOR; 2698 2699 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2700 struct mesh_path *mppath; 2701 char *proxied_addr; 2702 char *mpp_addr; 2703 2704 if (is_multicast_ether_addr(hdr->addr1)) { 2705 mpp_addr = hdr->addr3; 2706 proxied_addr = mesh_hdr->eaddr1; 2707 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2708 MESH_FLAGS_AE_A5_A6) { 2709 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2710 mpp_addr = hdr->addr4; 2711 proxied_addr = mesh_hdr->eaddr2; 2712 } else { 2713 return RX_DROP_MONITOR; 2714 } 2715 2716 rcu_read_lock(); 2717 mppath = mpp_path_lookup(sdata, proxied_addr); 2718 if (!mppath) { 2719 mpp_path_add(sdata, proxied_addr, mpp_addr); 2720 } else { 2721 spin_lock_bh(&mppath->state_lock); 2722 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2723 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2724 mppath->exp_time = jiffies; 2725 spin_unlock_bh(&mppath->state_lock); 2726 } 2727 rcu_read_unlock(); 2728 } 2729 2730 /* Frame has reached destination. Don't forward */ 2731 if (!is_multicast_ether_addr(hdr->addr1) && 2732 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2733 return RX_CONTINUE; 2734 2735 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2736 q = sdata->vif.hw_queue[ac]; 2737 if (ieee80211_queue_stopped(&local->hw, q)) { 2738 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2739 return RX_DROP_MONITOR; 2740 } 2741 skb_set_queue_mapping(skb, q); 2742 2743 if (!--mesh_hdr->ttl) { 2744 if (!is_multicast_ether_addr(hdr->addr1)) 2745 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, 2746 dropped_frames_ttl); 2747 goto out; 2748 } 2749 2750 if (!ifmsh->mshcfg.dot11MeshForwarding) 2751 goto out; 2752 2753 if (sdata->crypto_tx_tailroom_needed_cnt) 2754 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2755 2756 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2757 sdata->encrypt_headroom, 2758 tailroom, GFP_ATOMIC); 2759 if (!fwd_skb) 2760 goto out; 2761 2762 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2763 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2764 info = IEEE80211_SKB_CB(fwd_skb); 2765 memset(info, 0, sizeof(*info)); 2766 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2767 info->control.vif = &rx->sdata->vif; 2768 info->control.jiffies = jiffies; 2769 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2770 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2771 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2772 /* update power mode indication when forwarding */ 2773 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2774 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2775 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2776 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2777 } else { 2778 /* unable to resolve next hop */ 2779 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2780 fwd_hdr->addr3, 0, 2781 WLAN_REASON_MESH_PATH_NOFORWARD, 2782 fwd_hdr->addr2); 2783 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2784 kfree_skb(fwd_skb); 2785 return RX_DROP_MONITOR; 2786 } 2787 2788 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2789 ieee80211_add_pending_skb(local, fwd_skb); 2790 out: 2791 if (is_multicast_ether_addr(hdr->addr1)) 2792 return RX_CONTINUE; 2793 return RX_DROP_MONITOR; 2794 } 2795 #endif 2796 2797 static ieee80211_rx_result debug_noinline 2798 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2799 { 2800 struct ieee80211_sub_if_data *sdata = rx->sdata; 2801 struct ieee80211_local *local = rx->local; 2802 struct net_device *dev = sdata->dev; 2803 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2804 __le16 fc = hdr->frame_control; 2805 bool port_control; 2806 int err; 2807 2808 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2809 return RX_CONTINUE; 2810 2811 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2812 return RX_DROP_MONITOR; 2813 2814 /* 2815 * Send unexpected-4addr-frame event to hostapd. For older versions, 2816 * also drop the frame to cooked monitor interfaces. 2817 */ 2818 if (ieee80211_has_a4(hdr->frame_control) && 2819 sdata->vif.type == NL80211_IFTYPE_AP) { 2820 if (rx->sta && 2821 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2822 cfg80211_rx_unexpected_4addr_frame( 2823 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2824 return RX_DROP_MONITOR; 2825 } 2826 2827 err = __ieee80211_data_to_8023(rx, &port_control); 2828 if (unlikely(err)) 2829 return RX_DROP_UNUSABLE; 2830 2831 if (!ieee80211_frame_allowed(rx, fc)) 2832 return RX_DROP_MONITOR; 2833 2834 /* directly handle TDLS channel switch requests/responses */ 2835 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2836 cpu_to_be16(ETH_P_TDLS))) { 2837 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2838 2839 if (pskb_may_pull(rx->skb, 2840 offsetof(struct ieee80211_tdls_data, u)) && 2841 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2842 tf->category == WLAN_CATEGORY_TDLS && 2843 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2844 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2845 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2846 schedule_work(&local->tdls_chsw_work); 2847 if (rx->sta) 2848 rx->sta->rx_stats.packets++; 2849 2850 return RX_QUEUED; 2851 } 2852 } 2853 2854 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2855 unlikely(port_control) && sdata->bss) { 2856 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2857 u.ap); 2858 dev = sdata->dev; 2859 rx->sdata = sdata; 2860 } 2861 2862 rx->skb->dev = dev; 2863 2864 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 2865 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2866 !is_multicast_ether_addr( 2867 ((struct ethhdr *)rx->skb->data)->h_dest) && 2868 (!local->scanning && 2869 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 2870 mod_timer(&local->dynamic_ps_timer, jiffies + 2871 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2872 2873 ieee80211_deliver_skb(rx); 2874 2875 return RX_QUEUED; 2876 } 2877 2878 static ieee80211_rx_result debug_noinline 2879 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2880 { 2881 struct sk_buff *skb = rx->skb; 2882 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2883 struct tid_ampdu_rx *tid_agg_rx; 2884 u16 start_seq_num; 2885 u16 tid; 2886 2887 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2888 return RX_CONTINUE; 2889 2890 if (ieee80211_is_back_req(bar->frame_control)) { 2891 struct { 2892 __le16 control, start_seq_num; 2893 } __packed bar_data; 2894 struct ieee80211_event event = { 2895 .type = BAR_RX_EVENT, 2896 }; 2897 2898 if (!rx->sta) 2899 return RX_DROP_MONITOR; 2900 2901 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2902 &bar_data, sizeof(bar_data))) 2903 return RX_DROP_MONITOR; 2904 2905 tid = le16_to_cpu(bar_data.control) >> 12; 2906 2907 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 2908 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 2909 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 2910 WLAN_BACK_RECIPIENT, 2911 WLAN_REASON_QSTA_REQUIRE_SETUP); 2912 2913 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2914 if (!tid_agg_rx) 2915 return RX_DROP_MONITOR; 2916 2917 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2918 event.u.ba.tid = tid; 2919 event.u.ba.ssn = start_seq_num; 2920 event.u.ba.sta = &rx->sta->sta; 2921 2922 /* reset session timer */ 2923 if (tid_agg_rx->timeout) 2924 mod_timer(&tid_agg_rx->session_timer, 2925 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2926 2927 spin_lock(&tid_agg_rx->reorder_lock); 2928 /* release stored frames up to start of BAR */ 2929 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2930 start_seq_num, frames); 2931 spin_unlock(&tid_agg_rx->reorder_lock); 2932 2933 drv_event_callback(rx->local, rx->sdata, &event); 2934 2935 kfree_skb(skb); 2936 return RX_QUEUED; 2937 } 2938 2939 /* 2940 * After this point, we only want management frames, 2941 * so we can drop all remaining control frames to 2942 * cooked monitor interfaces. 2943 */ 2944 return RX_DROP_MONITOR; 2945 } 2946 2947 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2948 struct ieee80211_mgmt *mgmt, 2949 size_t len) 2950 { 2951 struct ieee80211_local *local = sdata->local; 2952 struct sk_buff *skb; 2953 struct ieee80211_mgmt *resp; 2954 2955 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2956 /* Not to own unicast address */ 2957 return; 2958 } 2959 2960 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2961 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2962 /* Not from the current AP or not associated yet. */ 2963 return; 2964 } 2965 2966 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2967 /* Too short SA Query request frame */ 2968 return; 2969 } 2970 2971 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2972 if (skb == NULL) 2973 return; 2974 2975 skb_reserve(skb, local->hw.extra_tx_headroom); 2976 resp = skb_put_zero(skb, 24); 2977 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2978 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2979 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2980 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2981 IEEE80211_STYPE_ACTION); 2982 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2983 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2984 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2985 memcpy(resp->u.action.u.sa_query.trans_id, 2986 mgmt->u.action.u.sa_query.trans_id, 2987 WLAN_SA_QUERY_TR_ID_LEN); 2988 2989 ieee80211_tx_skb(sdata, skb); 2990 } 2991 2992 static ieee80211_rx_result debug_noinline 2993 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2994 { 2995 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2996 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2997 2998 /* 2999 * From here on, look only at management frames. 3000 * Data and control frames are already handled, 3001 * and unknown (reserved) frames are useless. 3002 */ 3003 if (rx->skb->len < 24) 3004 return RX_DROP_MONITOR; 3005 3006 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3007 return RX_DROP_MONITOR; 3008 3009 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3010 ieee80211_is_beacon(mgmt->frame_control) && 3011 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3012 int sig = 0; 3013 3014 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3015 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3016 sig = status->signal; 3017 3018 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 3019 rx->skb->data, rx->skb->len, 3020 status->freq, sig); 3021 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3022 } 3023 3024 if (ieee80211_drop_unencrypted_mgmt(rx)) 3025 return RX_DROP_UNUSABLE; 3026 3027 return RX_CONTINUE; 3028 } 3029 3030 static ieee80211_rx_result debug_noinline 3031 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3032 { 3033 struct ieee80211_local *local = rx->local; 3034 struct ieee80211_sub_if_data *sdata = rx->sdata; 3035 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3036 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3037 int len = rx->skb->len; 3038 3039 if (!ieee80211_is_action(mgmt->frame_control)) 3040 return RX_CONTINUE; 3041 3042 /* drop too small frames */ 3043 if (len < IEEE80211_MIN_ACTION_SIZE) 3044 return RX_DROP_UNUSABLE; 3045 3046 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3047 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3048 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3049 return RX_DROP_UNUSABLE; 3050 3051 switch (mgmt->u.action.category) { 3052 case WLAN_CATEGORY_HT: 3053 /* reject HT action frames from stations not supporting HT */ 3054 if (!rx->sta->sta.ht_cap.ht_supported) 3055 goto invalid; 3056 3057 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3058 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3059 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3060 sdata->vif.type != NL80211_IFTYPE_AP && 3061 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3062 break; 3063 3064 /* verify action & smps_control/chanwidth are present */ 3065 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3066 goto invalid; 3067 3068 switch (mgmt->u.action.u.ht_smps.action) { 3069 case WLAN_HT_ACTION_SMPS: { 3070 struct ieee80211_supported_band *sband; 3071 enum ieee80211_smps_mode smps_mode; 3072 struct sta_opmode_info sta_opmode = {}; 3073 3074 /* convert to HT capability */ 3075 switch (mgmt->u.action.u.ht_smps.smps_control) { 3076 case WLAN_HT_SMPS_CONTROL_DISABLED: 3077 smps_mode = IEEE80211_SMPS_OFF; 3078 break; 3079 case WLAN_HT_SMPS_CONTROL_STATIC: 3080 smps_mode = IEEE80211_SMPS_STATIC; 3081 break; 3082 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3083 smps_mode = IEEE80211_SMPS_DYNAMIC; 3084 break; 3085 default: 3086 goto invalid; 3087 } 3088 3089 /* if no change do nothing */ 3090 if (rx->sta->sta.smps_mode == smps_mode) 3091 goto handled; 3092 rx->sta->sta.smps_mode = smps_mode; 3093 sta_opmode.smps_mode = 3094 ieee80211_smps_mode_to_smps_mode(smps_mode); 3095 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3096 3097 sband = rx->local->hw.wiphy->bands[status->band]; 3098 3099 rate_control_rate_update(local, sband, rx->sta, 3100 IEEE80211_RC_SMPS_CHANGED); 3101 cfg80211_sta_opmode_change_notify(sdata->dev, 3102 rx->sta->addr, 3103 &sta_opmode, 3104 GFP_ATOMIC); 3105 goto handled; 3106 } 3107 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3108 struct ieee80211_supported_band *sband; 3109 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3110 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3111 struct sta_opmode_info sta_opmode = {}; 3112 3113 /* If it doesn't support 40 MHz it can't change ... */ 3114 if (!(rx->sta->sta.ht_cap.cap & 3115 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3116 goto handled; 3117 3118 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3119 max_bw = IEEE80211_STA_RX_BW_20; 3120 else 3121 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3122 3123 /* set cur_max_bandwidth and recalc sta bw */ 3124 rx->sta->cur_max_bandwidth = max_bw; 3125 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3126 3127 if (rx->sta->sta.bandwidth == new_bw) 3128 goto handled; 3129 3130 rx->sta->sta.bandwidth = new_bw; 3131 sband = rx->local->hw.wiphy->bands[status->band]; 3132 sta_opmode.bw = 3133 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3134 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3135 3136 rate_control_rate_update(local, sband, rx->sta, 3137 IEEE80211_RC_BW_CHANGED); 3138 cfg80211_sta_opmode_change_notify(sdata->dev, 3139 rx->sta->addr, 3140 &sta_opmode, 3141 GFP_ATOMIC); 3142 goto handled; 3143 } 3144 default: 3145 goto invalid; 3146 } 3147 3148 break; 3149 case WLAN_CATEGORY_PUBLIC: 3150 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3151 goto invalid; 3152 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3153 break; 3154 if (!rx->sta) 3155 break; 3156 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3157 break; 3158 if (mgmt->u.action.u.ext_chan_switch.action_code != 3159 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3160 break; 3161 if (len < offsetof(struct ieee80211_mgmt, 3162 u.action.u.ext_chan_switch.variable)) 3163 goto invalid; 3164 goto queue; 3165 case WLAN_CATEGORY_VHT: 3166 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3167 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3168 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3169 sdata->vif.type != NL80211_IFTYPE_AP && 3170 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3171 break; 3172 3173 /* verify action code is present */ 3174 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3175 goto invalid; 3176 3177 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3178 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3179 /* verify opmode is present */ 3180 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3181 goto invalid; 3182 goto queue; 3183 } 3184 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3185 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3186 goto invalid; 3187 goto queue; 3188 } 3189 default: 3190 break; 3191 } 3192 break; 3193 case WLAN_CATEGORY_BACK: 3194 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3195 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3196 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3197 sdata->vif.type != NL80211_IFTYPE_AP && 3198 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3199 break; 3200 3201 /* verify action_code is present */ 3202 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3203 break; 3204 3205 switch (mgmt->u.action.u.addba_req.action_code) { 3206 case WLAN_ACTION_ADDBA_REQ: 3207 if (len < (IEEE80211_MIN_ACTION_SIZE + 3208 sizeof(mgmt->u.action.u.addba_req))) 3209 goto invalid; 3210 break; 3211 case WLAN_ACTION_ADDBA_RESP: 3212 if (len < (IEEE80211_MIN_ACTION_SIZE + 3213 sizeof(mgmt->u.action.u.addba_resp))) 3214 goto invalid; 3215 break; 3216 case WLAN_ACTION_DELBA: 3217 if (len < (IEEE80211_MIN_ACTION_SIZE + 3218 sizeof(mgmt->u.action.u.delba))) 3219 goto invalid; 3220 break; 3221 default: 3222 goto invalid; 3223 } 3224 3225 goto queue; 3226 case WLAN_CATEGORY_SPECTRUM_MGMT: 3227 /* verify action_code is present */ 3228 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3229 break; 3230 3231 switch (mgmt->u.action.u.measurement.action_code) { 3232 case WLAN_ACTION_SPCT_MSR_REQ: 3233 if (status->band != NL80211_BAND_5GHZ) 3234 break; 3235 3236 if (len < (IEEE80211_MIN_ACTION_SIZE + 3237 sizeof(mgmt->u.action.u.measurement))) 3238 break; 3239 3240 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3241 break; 3242 3243 ieee80211_process_measurement_req(sdata, mgmt, len); 3244 goto handled; 3245 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3246 u8 *bssid; 3247 if (len < (IEEE80211_MIN_ACTION_SIZE + 3248 sizeof(mgmt->u.action.u.chan_switch))) 3249 break; 3250 3251 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3252 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3253 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3254 break; 3255 3256 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3257 bssid = sdata->u.mgd.bssid; 3258 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3259 bssid = sdata->u.ibss.bssid; 3260 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3261 bssid = mgmt->sa; 3262 else 3263 break; 3264 3265 if (!ether_addr_equal(mgmt->bssid, bssid)) 3266 break; 3267 3268 goto queue; 3269 } 3270 } 3271 break; 3272 case WLAN_CATEGORY_SA_QUERY: 3273 if (len < (IEEE80211_MIN_ACTION_SIZE + 3274 sizeof(mgmt->u.action.u.sa_query))) 3275 break; 3276 3277 switch (mgmt->u.action.u.sa_query.action) { 3278 case WLAN_ACTION_SA_QUERY_REQUEST: 3279 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3280 break; 3281 ieee80211_process_sa_query_req(sdata, mgmt, len); 3282 goto handled; 3283 } 3284 break; 3285 case WLAN_CATEGORY_SELF_PROTECTED: 3286 if (len < (IEEE80211_MIN_ACTION_SIZE + 3287 sizeof(mgmt->u.action.u.self_prot.action_code))) 3288 break; 3289 3290 switch (mgmt->u.action.u.self_prot.action_code) { 3291 case WLAN_SP_MESH_PEERING_OPEN: 3292 case WLAN_SP_MESH_PEERING_CLOSE: 3293 case WLAN_SP_MESH_PEERING_CONFIRM: 3294 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3295 goto invalid; 3296 if (sdata->u.mesh.user_mpm) 3297 /* userspace handles this frame */ 3298 break; 3299 goto queue; 3300 case WLAN_SP_MGK_INFORM: 3301 case WLAN_SP_MGK_ACK: 3302 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3303 goto invalid; 3304 break; 3305 } 3306 break; 3307 case WLAN_CATEGORY_MESH_ACTION: 3308 if (len < (IEEE80211_MIN_ACTION_SIZE + 3309 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3310 break; 3311 3312 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3313 break; 3314 if (mesh_action_is_path_sel(mgmt) && 3315 !mesh_path_sel_is_hwmp(sdata)) 3316 break; 3317 goto queue; 3318 } 3319 3320 return RX_CONTINUE; 3321 3322 invalid: 3323 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3324 /* will return in the next handlers */ 3325 return RX_CONTINUE; 3326 3327 handled: 3328 if (rx->sta) 3329 rx->sta->rx_stats.packets++; 3330 dev_kfree_skb(rx->skb); 3331 return RX_QUEUED; 3332 3333 queue: 3334 skb_queue_tail(&sdata->skb_queue, rx->skb); 3335 ieee80211_queue_work(&local->hw, &sdata->work); 3336 if (rx->sta) 3337 rx->sta->rx_stats.packets++; 3338 return RX_QUEUED; 3339 } 3340 3341 static ieee80211_rx_result debug_noinline 3342 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3343 { 3344 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3345 int sig = 0; 3346 3347 /* skip known-bad action frames and return them in the next handler */ 3348 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3349 return RX_CONTINUE; 3350 3351 /* 3352 * Getting here means the kernel doesn't know how to handle 3353 * it, but maybe userspace does ... include returned frames 3354 * so userspace can register for those to know whether ones 3355 * it transmitted were processed or returned. 3356 */ 3357 3358 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3359 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3360 sig = status->signal; 3361 3362 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 3363 rx->skb->data, rx->skb->len, 0)) { 3364 if (rx->sta) 3365 rx->sta->rx_stats.packets++; 3366 dev_kfree_skb(rx->skb); 3367 return RX_QUEUED; 3368 } 3369 3370 return RX_CONTINUE; 3371 } 3372 3373 static ieee80211_rx_result debug_noinline 3374 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3375 { 3376 struct ieee80211_local *local = rx->local; 3377 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3378 struct sk_buff *nskb; 3379 struct ieee80211_sub_if_data *sdata = rx->sdata; 3380 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3381 3382 if (!ieee80211_is_action(mgmt->frame_control)) 3383 return RX_CONTINUE; 3384 3385 /* 3386 * For AP mode, hostapd is responsible for handling any action 3387 * frames that we didn't handle, including returning unknown 3388 * ones. For all other modes we will return them to the sender, 3389 * setting the 0x80 bit in the action category, as required by 3390 * 802.11-2012 9.24.4. 3391 * Newer versions of hostapd shall also use the management frame 3392 * registration mechanisms, but older ones still use cooked 3393 * monitor interfaces so push all frames there. 3394 */ 3395 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3396 (sdata->vif.type == NL80211_IFTYPE_AP || 3397 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3398 return RX_DROP_MONITOR; 3399 3400 if (is_multicast_ether_addr(mgmt->da)) 3401 return RX_DROP_MONITOR; 3402 3403 /* do not return rejected action frames */ 3404 if (mgmt->u.action.category & 0x80) 3405 return RX_DROP_UNUSABLE; 3406 3407 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3408 GFP_ATOMIC); 3409 if (nskb) { 3410 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3411 3412 nmgmt->u.action.category |= 0x80; 3413 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3414 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3415 3416 memset(nskb->cb, 0, sizeof(nskb->cb)); 3417 3418 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3419 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3420 3421 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3422 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3423 IEEE80211_TX_CTL_NO_CCK_RATE; 3424 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3425 info->hw_queue = 3426 local->hw.offchannel_tx_hw_queue; 3427 } 3428 3429 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3430 status->band, 0); 3431 } 3432 dev_kfree_skb(rx->skb); 3433 return RX_QUEUED; 3434 } 3435 3436 static ieee80211_rx_result debug_noinline 3437 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3438 { 3439 struct ieee80211_sub_if_data *sdata = rx->sdata; 3440 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3441 __le16 stype; 3442 3443 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3444 3445 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3446 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3447 sdata->vif.type != NL80211_IFTYPE_OCB && 3448 sdata->vif.type != NL80211_IFTYPE_STATION) 3449 return RX_DROP_MONITOR; 3450 3451 switch (stype) { 3452 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3453 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3454 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3455 /* process for all: mesh, mlme, ibss */ 3456 break; 3457 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3458 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3459 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3460 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3461 if (is_multicast_ether_addr(mgmt->da) && 3462 !is_broadcast_ether_addr(mgmt->da)) 3463 return RX_DROP_MONITOR; 3464 3465 /* process only for station */ 3466 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3467 return RX_DROP_MONITOR; 3468 break; 3469 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3470 /* process only for ibss and mesh */ 3471 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3472 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3473 return RX_DROP_MONITOR; 3474 break; 3475 default: 3476 return RX_DROP_MONITOR; 3477 } 3478 3479 /* queue up frame and kick off work to process it */ 3480 skb_queue_tail(&sdata->skb_queue, rx->skb); 3481 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3482 if (rx->sta) 3483 rx->sta->rx_stats.packets++; 3484 3485 return RX_QUEUED; 3486 } 3487 3488 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3489 struct ieee80211_rate *rate) 3490 { 3491 struct ieee80211_sub_if_data *sdata; 3492 struct ieee80211_local *local = rx->local; 3493 struct sk_buff *skb = rx->skb, *skb2; 3494 struct net_device *prev_dev = NULL; 3495 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3496 int needed_headroom; 3497 3498 /* 3499 * If cooked monitor has been processed already, then 3500 * don't do it again. If not, set the flag. 3501 */ 3502 if (rx->flags & IEEE80211_RX_CMNTR) 3503 goto out_free_skb; 3504 rx->flags |= IEEE80211_RX_CMNTR; 3505 3506 /* If there are no cooked monitor interfaces, just free the SKB */ 3507 if (!local->cooked_mntrs) 3508 goto out_free_skb; 3509 3510 /* vendor data is long removed here */ 3511 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3512 /* room for the radiotap header based on driver features */ 3513 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3514 3515 if (skb_headroom(skb) < needed_headroom && 3516 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3517 goto out_free_skb; 3518 3519 /* prepend radiotap information */ 3520 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3521 false); 3522 3523 skb_reset_mac_header(skb); 3524 skb->ip_summed = CHECKSUM_UNNECESSARY; 3525 skb->pkt_type = PACKET_OTHERHOST; 3526 skb->protocol = htons(ETH_P_802_2); 3527 3528 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3529 if (!ieee80211_sdata_running(sdata)) 3530 continue; 3531 3532 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3533 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3534 continue; 3535 3536 if (prev_dev) { 3537 skb2 = skb_clone(skb, GFP_ATOMIC); 3538 if (skb2) { 3539 skb2->dev = prev_dev; 3540 netif_receive_skb(skb2); 3541 } 3542 } 3543 3544 prev_dev = sdata->dev; 3545 ieee80211_rx_stats(sdata->dev, skb->len); 3546 } 3547 3548 if (prev_dev) { 3549 skb->dev = prev_dev; 3550 netif_receive_skb(skb); 3551 return; 3552 } 3553 3554 out_free_skb: 3555 dev_kfree_skb(skb); 3556 } 3557 3558 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3559 ieee80211_rx_result res) 3560 { 3561 switch (res) { 3562 case RX_DROP_MONITOR: 3563 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3564 if (rx->sta) 3565 rx->sta->rx_stats.dropped++; 3566 /* fall through */ 3567 case RX_CONTINUE: { 3568 struct ieee80211_rate *rate = NULL; 3569 struct ieee80211_supported_band *sband; 3570 struct ieee80211_rx_status *status; 3571 3572 status = IEEE80211_SKB_RXCB((rx->skb)); 3573 3574 sband = rx->local->hw.wiphy->bands[status->band]; 3575 if (status->encoding == RX_ENC_LEGACY) 3576 rate = &sband->bitrates[status->rate_idx]; 3577 3578 ieee80211_rx_cooked_monitor(rx, rate); 3579 break; 3580 } 3581 case RX_DROP_UNUSABLE: 3582 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3583 if (rx->sta) 3584 rx->sta->rx_stats.dropped++; 3585 dev_kfree_skb(rx->skb); 3586 break; 3587 case RX_QUEUED: 3588 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3589 break; 3590 } 3591 } 3592 3593 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3594 struct sk_buff_head *frames) 3595 { 3596 ieee80211_rx_result res = RX_DROP_MONITOR; 3597 struct sk_buff *skb; 3598 3599 #define CALL_RXH(rxh) \ 3600 do { \ 3601 res = rxh(rx); \ 3602 if (res != RX_CONTINUE) \ 3603 goto rxh_next; \ 3604 } while (0) 3605 3606 /* Lock here to avoid hitting all of the data used in the RX 3607 * path (e.g. key data, station data, ...) concurrently when 3608 * a frame is released from the reorder buffer due to timeout 3609 * from the timer, potentially concurrently with RX from the 3610 * driver. 3611 */ 3612 spin_lock_bh(&rx->local->rx_path_lock); 3613 3614 while ((skb = __skb_dequeue(frames))) { 3615 /* 3616 * all the other fields are valid across frames 3617 * that belong to an aMPDU since they are on the 3618 * same TID from the same station 3619 */ 3620 rx->skb = skb; 3621 3622 CALL_RXH(ieee80211_rx_h_check_more_data); 3623 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3624 CALL_RXH(ieee80211_rx_h_sta_process); 3625 CALL_RXH(ieee80211_rx_h_decrypt); 3626 CALL_RXH(ieee80211_rx_h_defragment); 3627 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3628 /* must be after MMIC verify so header is counted in MPDU mic */ 3629 #ifdef CONFIG_MAC80211_MESH 3630 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3631 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3632 #endif 3633 CALL_RXH(ieee80211_rx_h_amsdu); 3634 CALL_RXH(ieee80211_rx_h_data); 3635 3636 /* special treatment -- needs the queue */ 3637 res = ieee80211_rx_h_ctrl(rx, frames); 3638 if (res != RX_CONTINUE) 3639 goto rxh_next; 3640 3641 CALL_RXH(ieee80211_rx_h_mgmt_check); 3642 CALL_RXH(ieee80211_rx_h_action); 3643 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3644 CALL_RXH(ieee80211_rx_h_action_return); 3645 CALL_RXH(ieee80211_rx_h_mgmt); 3646 3647 rxh_next: 3648 ieee80211_rx_handlers_result(rx, res); 3649 3650 #undef CALL_RXH 3651 } 3652 3653 spin_unlock_bh(&rx->local->rx_path_lock); 3654 } 3655 3656 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3657 { 3658 struct sk_buff_head reorder_release; 3659 ieee80211_rx_result res = RX_DROP_MONITOR; 3660 3661 __skb_queue_head_init(&reorder_release); 3662 3663 #define CALL_RXH(rxh) \ 3664 do { \ 3665 res = rxh(rx); \ 3666 if (res != RX_CONTINUE) \ 3667 goto rxh_next; \ 3668 } while (0) 3669 3670 CALL_RXH(ieee80211_rx_h_check_dup); 3671 CALL_RXH(ieee80211_rx_h_check); 3672 3673 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3674 3675 ieee80211_rx_handlers(rx, &reorder_release); 3676 return; 3677 3678 rxh_next: 3679 ieee80211_rx_handlers_result(rx, res); 3680 3681 #undef CALL_RXH 3682 } 3683 3684 /* 3685 * This function makes calls into the RX path, therefore 3686 * it has to be invoked under RCU read lock. 3687 */ 3688 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3689 { 3690 struct sk_buff_head frames; 3691 struct ieee80211_rx_data rx = { 3692 .sta = sta, 3693 .sdata = sta->sdata, 3694 .local = sta->local, 3695 /* This is OK -- must be QoS data frame */ 3696 .security_idx = tid, 3697 .seqno_idx = tid, 3698 .napi = NULL, /* must be NULL to not have races */ 3699 }; 3700 struct tid_ampdu_rx *tid_agg_rx; 3701 3702 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3703 if (!tid_agg_rx) 3704 return; 3705 3706 __skb_queue_head_init(&frames); 3707 3708 spin_lock(&tid_agg_rx->reorder_lock); 3709 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3710 spin_unlock(&tid_agg_rx->reorder_lock); 3711 3712 if (!skb_queue_empty(&frames)) { 3713 struct ieee80211_event event = { 3714 .type = BA_FRAME_TIMEOUT, 3715 .u.ba.tid = tid, 3716 .u.ba.sta = &sta->sta, 3717 }; 3718 drv_event_callback(rx.local, rx.sdata, &event); 3719 } 3720 3721 ieee80211_rx_handlers(&rx, &frames); 3722 } 3723 3724 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3725 u16 ssn, u64 filtered, 3726 u16 received_mpdus) 3727 { 3728 struct sta_info *sta; 3729 struct tid_ampdu_rx *tid_agg_rx; 3730 struct sk_buff_head frames; 3731 struct ieee80211_rx_data rx = { 3732 /* This is OK -- must be QoS data frame */ 3733 .security_idx = tid, 3734 .seqno_idx = tid, 3735 }; 3736 int i, diff; 3737 3738 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3739 return; 3740 3741 __skb_queue_head_init(&frames); 3742 3743 sta = container_of(pubsta, struct sta_info, sta); 3744 3745 rx.sta = sta; 3746 rx.sdata = sta->sdata; 3747 rx.local = sta->local; 3748 3749 rcu_read_lock(); 3750 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3751 if (!tid_agg_rx) 3752 goto out; 3753 3754 spin_lock_bh(&tid_agg_rx->reorder_lock); 3755 3756 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3757 int release; 3758 3759 /* release all frames in the reorder buffer */ 3760 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3761 IEEE80211_SN_MODULO; 3762 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3763 release, &frames); 3764 /* update ssn to match received ssn */ 3765 tid_agg_rx->head_seq_num = ssn; 3766 } else { 3767 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3768 &frames); 3769 } 3770 3771 /* handle the case that received ssn is behind the mac ssn. 3772 * it can be tid_agg_rx->buf_size behind and still be valid */ 3773 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 3774 if (diff >= tid_agg_rx->buf_size) { 3775 tid_agg_rx->reorder_buf_filtered = 0; 3776 goto release; 3777 } 3778 filtered = filtered >> diff; 3779 ssn += diff; 3780 3781 /* update bitmap */ 3782 for (i = 0; i < tid_agg_rx->buf_size; i++) { 3783 int index = (ssn + i) % tid_agg_rx->buf_size; 3784 3785 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 3786 if (filtered & BIT_ULL(i)) 3787 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 3788 } 3789 3790 /* now process also frames that the filter marking released */ 3791 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3792 3793 release: 3794 spin_unlock_bh(&tid_agg_rx->reorder_lock); 3795 3796 ieee80211_rx_handlers(&rx, &frames); 3797 3798 out: 3799 rcu_read_unlock(); 3800 } 3801 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 3802 3803 /* main receive path */ 3804 3805 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3806 { 3807 struct ieee80211_sub_if_data *sdata = rx->sdata; 3808 struct sk_buff *skb = rx->skb; 3809 struct ieee80211_hdr *hdr = (void *)skb->data; 3810 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3811 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3812 bool multicast = is_multicast_ether_addr(hdr->addr1); 3813 3814 switch (sdata->vif.type) { 3815 case NL80211_IFTYPE_STATION: 3816 if (!bssid && !sdata->u.mgd.use_4addr) 3817 return false; 3818 if (multicast) 3819 return true; 3820 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3821 case NL80211_IFTYPE_ADHOC: 3822 if (!bssid) 3823 return false; 3824 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3825 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3826 return false; 3827 if (ieee80211_is_beacon(hdr->frame_control)) 3828 return true; 3829 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3830 return false; 3831 if (!multicast && 3832 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3833 return false; 3834 if (!rx->sta) { 3835 int rate_idx; 3836 if (status->encoding != RX_ENC_LEGACY) 3837 rate_idx = 0; /* TODO: HT/VHT rates */ 3838 else 3839 rate_idx = status->rate_idx; 3840 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3841 BIT(rate_idx)); 3842 } 3843 return true; 3844 case NL80211_IFTYPE_OCB: 3845 if (!bssid) 3846 return false; 3847 if (!ieee80211_is_data_present(hdr->frame_control)) 3848 return false; 3849 if (!is_broadcast_ether_addr(bssid)) 3850 return false; 3851 if (!multicast && 3852 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3853 return false; 3854 if (!rx->sta) { 3855 int rate_idx; 3856 if (status->encoding != RX_ENC_LEGACY) 3857 rate_idx = 0; /* TODO: HT rates */ 3858 else 3859 rate_idx = status->rate_idx; 3860 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3861 BIT(rate_idx)); 3862 } 3863 return true; 3864 case NL80211_IFTYPE_MESH_POINT: 3865 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 3866 return false; 3867 if (multicast) 3868 return true; 3869 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3870 case NL80211_IFTYPE_AP_VLAN: 3871 case NL80211_IFTYPE_AP: 3872 if (!bssid) 3873 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3874 3875 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3876 /* 3877 * Accept public action frames even when the 3878 * BSSID doesn't match, this is used for P2P 3879 * and location updates. Note that mac80211 3880 * itself never looks at these frames. 3881 */ 3882 if (!multicast && 3883 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3884 return false; 3885 if (ieee80211_is_public_action(hdr, skb->len)) 3886 return true; 3887 return ieee80211_is_beacon(hdr->frame_control); 3888 } 3889 3890 if (!ieee80211_has_tods(hdr->frame_control)) { 3891 /* ignore data frames to TDLS-peers */ 3892 if (ieee80211_is_data(hdr->frame_control)) 3893 return false; 3894 /* ignore action frames to TDLS-peers */ 3895 if (ieee80211_is_action(hdr->frame_control) && 3896 !is_broadcast_ether_addr(bssid) && 3897 !ether_addr_equal(bssid, hdr->addr1)) 3898 return false; 3899 } 3900 3901 /* 3902 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 3903 * the BSSID - we've checked that already but may have accepted 3904 * the wildcard (ff:ff:ff:ff:ff:ff). 3905 * 3906 * It also says: 3907 * The BSSID of the Data frame is determined as follows: 3908 * a) If the STA is contained within an AP or is associated 3909 * with an AP, the BSSID is the address currently in use 3910 * by the STA contained in the AP. 3911 * 3912 * So we should not accept data frames with an address that's 3913 * multicast. 3914 * 3915 * Accepting it also opens a security problem because stations 3916 * could encrypt it with the GTK and inject traffic that way. 3917 */ 3918 if (ieee80211_is_data(hdr->frame_control) && multicast) 3919 return false; 3920 3921 return true; 3922 case NL80211_IFTYPE_WDS: 3923 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3924 return false; 3925 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3926 case NL80211_IFTYPE_P2P_DEVICE: 3927 return ieee80211_is_public_action(hdr, skb->len) || 3928 ieee80211_is_probe_req(hdr->frame_control) || 3929 ieee80211_is_probe_resp(hdr->frame_control) || 3930 ieee80211_is_beacon(hdr->frame_control); 3931 case NL80211_IFTYPE_NAN: 3932 /* Currently no frames on NAN interface are allowed */ 3933 return false; 3934 default: 3935 break; 3936 } 3937 3938 WARN_ON_ONCE(1); 3939 return false; 3940 } 3941 3942 void ieee80211_check_fast_rx(struct sta_info *sta) 3943 { 3944 struct ieee80211_sub_if_data *sdata = sta->sdata; 3945 struct ieee80211_local *local = sdata->local; 3946 struct ieee80211_key *key; 3947 struct ieee80211_fast_rx fastrx = { 3948 .dev = sdata->dev, 3949 .vif_type = sdata->vif.type, 3950 .control_port_protocol = sdata->control_port_protocol, 3951 }, *old, *new = NULL; 3952 bool assign = false; 3953 3954 /* use sparse to check that we don't return without updating */ 3955 __acquire(check_fast_rx); 3956 3957 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 3958 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 3959 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 3960 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 3961 3962 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 3963 3964 /* fast-rx doesn't do reordering */ 3965 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 3966 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 3967 goto clear; 3968 3969 switch (sdata->vif.type) { 3970 case NL80211_IFTYPE_STATION: 3971 if (sta->sta.tdls) { 3972 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3973 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 3974 fastrx.expected_ds_bits = 0; 3975 } else { 3976 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0; 3977 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3978 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 3979 fastrx.expected_ds_bits = 3980 cpu_to_le16(IEEE80211_FCTL_FROMDS); 3981 } 3982 3983 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 3984 fastrx.expected_ds_bits |= 3985 cpu_to_le16(IEEE80211_FCTL_TODS); 3986 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 3987 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 3988 } 3989 3990 if (!sdata->u.mgd.powersave) 3991 break; 3992 3993 /* software powersave is a huge mess, avoid all of it */ 3994 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 3995 goto clear; 3996 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 3997 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 3998 goto clear; 3999 break; 4000 case NL80211_IFTYPE_AP_VLAN: 4001 case NL80211_IFTYPE_AP: 4002 /* parallel-rx requires this, at least with calls to 4003 * ieee80211_sta_ps_transition() 4004 */ 4005 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4006 goto clear; 4007 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4008 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4009 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4010 4011 fastrx.internal_forward = 4012 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4013 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4014 !sdata->u.vlan.sta); 4015 4016 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4017 sdata->u.vlan.sta) { 4018 fastrx.expected_ds_bits |= 4019 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4020 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4021 fastrx.internal_forward = 0; 4022 } 4023 4024 break; 4025 default: 4026 goto clear; 4027 } 4028 4029 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4030 goto clear; 4031 4032 rcu_read_lock(); 4033 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4034 if (key) { 4035 switch (key->conf.cipher) { 4036 case WLAN_CIPHER_SUITE_TKIP: 4037 /* we don't want to deal with MMIC in fast-rx */ 4038 goto clear_rcu; 4039 case WLAN_CIPHER_SUITE_CCMP: 4040 case WLAN_CIPHER_SUITE_CCMP_256: 4041 case WLAN_CIPHER_SUITE_GCMP: 4042 case WLAN_CIPHER_SUITE_GCMP_256: 4043 break; 4044 default: 4045 /* we also don't want to deal with WEP or cipher scheme 4046 * since those require looking up the key idx in the 4047 * frame, rather than assuming the PTK is used 4048 * (we need to revisit this once we implement the real 4049 * PTK index, which is now valid in the spec, but we 4050 * haven't implemented that part yet) 4051 */ 4052 goto clear_rcu; 4053 } 4054 4055 fastrx.key = true; 4056 fastrx.icv_len = key->conf.icv_len; 4057 } 4058 4059 assign = true; 4060 clear_rcu: 4061 rcu_read_unlock(); 4062 clear: 4063 __release(check_fast_rx); 4064 4065 if (assign) 4066 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4067 4068 spin_lock_bh(&sta->lock); 4069 old = rcu_dereference_protected(sta->fast_rx, true); 4070 rcu_assign_pointer(sta->fast_rx, new); 4071 spin_unlock_bh(&sta->lock); 4072 4073 if (old) 4074 kfree_rcu(old, rcu_head); 4075 } 4076 4077 void ieee80211_clear_fast_rx(struct sta_info *sta) 4078 { 4079 struct ieee80211_fast_rx *old; 4080 4081 spin_lock_bh(&sta->lock); 4082 old = rcu_dereference_protected(sta->fast_rx, true); 4083 RCU_INIT_POINTER(sta->fast_rx, NULL); 4084 spin_unlock_bh(&sta->lock); 4085 4086 if (old) 4087 kfree_rcu(old, rcu_head); 4088 } 4089 4090 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4091 { 4092 struct ieee80211_local *local = sdata->local; 4093 struct sta_info *sta; 4094 4095 lockdep_assert_held(&local->sta_mtx); 4096 4097 list_for_each_entry_rcu(sta, &local->sta_list, list) { 4098 if (sdata != sta->sdata && 4099 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4100 continue; 4101 ieee80211_check_fast_rx(sta); 4102 } 4103 } 4104 4105 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4106 { 4107 struct ieee80211_local *local = sdata->local; 4108 4109 mutex_lock(&local->sta_mtx); 4110 __ieee80211_check_fast_rx_iface(sdata); 4111 mutex_unlock(&local->sta_mtx); 4112 } 4113 4114 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4115 struct ieee80211_fast_rx *fast_rx) 4116 { 4117 struct sk_buff *skb = rx->skb; 4118 struct ieee80211_hdr *hdr = (void *)skb->data; 4119 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4120 struct sta_info *sta = rx->sta; 4121 int orig_len = skb->len; 4122 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4123 int snap_offs = hdrlen; 4124 struct { 4125 u8 snap[sizeof(rfc1042_header)]; 4126 __be16 proto; 4127 } *payload __aligned(2); 4128 struct { 4129 u8 da[ETH_ALEN]; 4130 u8 sa[ETH_ALEN]; 4131 } addrs __aligned(2); 4132 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4133 4134 if (fast_rx->uses_rss) 4135 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4136 4137 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4138 * to a common data structure; drivers can implement that per queue 4139 * but we don't have that information in mac80211 4140 */ 4141 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4142 return false; 4143 4144 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4145 4146 /* If using encryption, we also need to have: 4147 * - PN_VALIDATED: similar, but the implementation is tricky 4148 * - DECRYPTED: necessary for PN_VALIDATED 4149 */ 4150 if (fast_rx->key && 4151 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4152 return false; 4153 4154 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4155 return false; 4156 4157 if (unlikely(ieee80211_is_frag(hdr))) 4158 return false; 4159 4160 /* Since our interface address cannot be multicast, this 4161 * implicitly also rejects multicast frames without the 4162 * explicit check. 4163 * 4164 * We shouldn't get any *data* frames not addressed to us 4165 * (AP mode will accept multicast *management* frames), but 4166 * punting here will make it go through the full checks in 4167 * ieee80211_accept_frame(). 4168 */ 4169 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4170 return false; 4171 4172 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4173 IEEE80211_FCTL_TODS)) != 4174 fast_rx->expected_ds_bits) 4175 return false; 4176 4177 /* assign the key to drop unencrypted frames (later) 4178 * and strip the IV/MIC if necessary 4179 */ 4180 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4181 /* GCMP header length is the same */ 4182 snap_offs += IEEE80211_CCMP_HDR_LEN; 4183 } 4184 4185 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4186 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4187 goto drop; 4188 4189 payload = (void *)(skb->data + snap_offs); 4190 4191 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4192 return false; 4193 4194 /* Don't handle these here since they require special code. 4195 * Accept AARP and IPX even though they should come with a 4196 * bridge-tunnel header - but if we get them this way then 4197 * there's little point in discarding them. 4198 */ 4199 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4200 payload->proto == fast_rx->control_port_protocol)) 4201 return false; 4202 } 4203 4204 /* after this point, don't punt to the slowpath! */ 4205 4206 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4207 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4208 goto drop; 4209 4210 if (unlikely(fast_rx->sta_notify)) { 4211 ieee80211_sta_rx_notify(rx->sdata, hdr); 4212 fast_rx->sta_notify = false; 4213 } 4214 4215 /* statistics part of ieee80211_rx_h_sta_process() */ 4216 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4217 stats->last_signal = status->signal; 4218 if (!fast_rx->uses_rss) 4219 ewma_signal_add(&sta->rx_stats_avg.signal, 4220 -status->signal); 4221 } 4222 4223 if (status->chains) { 4224 int i; 4225 4226 stats->chains = status->chains; 4227 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4228 int signal = status->chain_signal[i]; 4229 4230 if (!(status->chains & BIT(i))) 4231 continue; 4232 4233 stats->chain_signal_last[i] = signal; 4234 if (!fast_rx->uses_rss) 4235 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4236 -signal); 4237 } 4238 } 4239 /* end of statistics */ 4240 4241 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4242 goto drop; 4243 4244 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4245 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4246 RX_QUEUED) 4247 goto drop; 4248 4249 return true; 4250 } 4251 4252 stats->last_rx = jiffies; 4253 stats->last_rate = sta_stats_encode_rate(status); 4254 4255 stats->fragments++; 4256 stats->packets++; 4257 4258 /* do the header conversion - first grab the addresses */ 4259 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4260 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4261 /* remove the SNAP but leave the ethertype */ 4262 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4263 /* push the addresses in front */ 4264 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4265 4266 skb->dev = fast_rx->dev; 4267 4268 ieee80211_rx_stats(fast_rx->dev, skb->len); 4269 4270 /* The seqno index has the same property as needed 4271 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4272 * for non-QoS-data frames. Here we know it's a data 4273 * frame, so count MSDUs. 4274 */ 4275 u64_stats_update_begin(&stats->syncp); 4276 stats->msdu[rx->seqno_idx]++; 4277 stats->bytes += orig_len; 4278 u64_stats_update_end(&stats->syncp); 4279 4280 if (fast_rx->internal_forward) { 4281 struct sk_buff *xmit_skb = NULL; 4282 if (is_multicast_ether_addr(addrs.da)) { 4283 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4284 } else if (!ether_addr_equal(addrs.da, addrs.sa) && 4285 sta_info_get(rx->sdata, addrs.da)) { 4286 xmit_skb = skb; 4287 skb = NULL; 4288 } 4289 4290 if (xmit_skb) { 4291 /* 4292 * Send to wireless media and increase priority by 256 4293 * to keep the received priority instead of 4294 * reclassifying the frame (see cfg80211_classify8021d). 4295 */ 4296 xmit_skb->priority += 256; 4297 xmit_skb->protocol = htons(ETH_P_802_3); 4298 skb_reset_network_header(xmit_skb); 4299 skb_reset_mac_header(xmit_skb); 4300 dev_queue_xmit(xmit_skb); 4301 } 4302 4303 if (!skb) 4304 return true; 4305 } 4306 4307 /* deliver to local stack */ 4308 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4309 memset(skb->cb, 0, sizeof(skb->cb)); 4310 if (rx->napi) 4311 napi_gro_receive(rx->napi, skb); 4312 else 4313 netif_receive_skb(skb); 4314 4315 return true; 4316 drop: 4317 dev_kfree_skb(skb); 4318 stats->dropped++; 4319 return true; 4320 } 4321 4322 /* 4323 * This function returns whether or not the SKB 4324 * was destined for RX processing or not, which, 4325 * if consume is true, is equivalent to whether 4326 * or not the skb was consumed. 4327 */ 4328 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4329 struct sk_buff *skb, bool consume) 4330 { 4331 struct ieee80211_local *local = rx->local; 4332 struct ieee80211_sub_if_data *sdata = rx->sdata; 4333 4334 rx->skb = skb; 4335 4336 /* See if we can do fast-rx; if we have to copy we already lost, 4337 * so punt in that case. We should never have to deliver a data 4338 * frame to multiple interfaces anyway. 4339 * 4340 * We skip the ieee80211_accept_frame() call and do the necessary 4341 * checking inside ieee80211_invoke_fast_rx(). 4342 */ 4343 if (consume && rx->sta) { 4344 struct ieee80211_fast_rx *fast_rx; 4345 4346 fast_rx = rcu_dereference(rx->sta->fast_rx); 4347 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4348 return true; 4349 } 4350 4351 if (!ieee80211_accept_frame(rx)) 4352 return false; 4353 4354 if (!consume) { 4355 skb = skb_copy(skb, GFP_ATOMIC); 4356 if (!skb) { 4357 if (net_ratelimit()) 4358 wiphy_debug(local->hw.wiphy, 4359 "failed to copy skb for %s\n", 4360 sdata->name); 4361 return true; 4362 } 4363 4364 rx->skb = skb; 4365 } 4366 4367 ieee80211_invoke_rx_handlers(rx); 4368 return true; 4369 } 4370 4371 /* 4372 * This is the actual Rx frames handler. as it belongs to Rx path it must 4373 * be called with rcu_read_lock protection. 4374 */ 4375 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4376 struct ieee80211_sta *pubsta, 4377 struct sk_buff *skb, 4378 struct napi_struct *napi) 4379 { 4380 struct ieee80211_local *local = hw_to_local(hw); 4381 struct ieee80211_sub_if_data *sdata; 4382 struct ieee80211_hdr *hdr; 4383 __le16 fc; 4384 struct ieee80211_rx_data rx; 4385 struct ieee80211_sub_if_data *prev; 4386 struct rhlist_head *tmp; 4387 int err = 0; 4388 4389 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4390 memset(&rx, 0, sizeof(rx)); 4391 rx.skb = skb; 4392 rx.local = local; 4393 rx.napi = napi; 4394 4395 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4396 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4397 4398 if (ieee80211_is_mgmt(fc)) { 4399 /* drop frame if too short for header */ 4400 if (skb->len < ieee80211_hdrlen(fc)) 4401 err = -ENOBUFS; 4402 else 4403 err = skb_linearize(skb); 4404 } else { 4405 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4406 } 4407 4408 if (err) { 4409 dev_kfree_skb(skb); 4410 return; 4411 } 4412 4413 hdr = (struct ieee80211_hdr *)skb->data; 4414 ieee80211_parse_qos(&rx); 4415 ieee80211_verify_alignment(&rx); 4416 4417 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4418 ieee80211_is_beacon(hdr->frame_control))) 4419 ieee80211_scan_rx(local, skb); 4420 4421 if (ieee80211_is_data(fc)) { 4422 struct sta_info *sta, *prev_sta; 4423 4424 if (pubsta) { 4425 rx.sta = container_of(pubsta, struct sta_info, sta); 4426 rx.sdata = rx.sta->sdata; 4427 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4428 return; 4429 goto out; 4430 } 4431 4432 prev_sta = NULL; 4433 4434 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4435 if (!prev_sta) { 4436 prev_sta = sta; 4437 continue; 4438 } 4439 4440 rx.sta = prev_sta; 4441 rx.sdata = prev_sta->sdata; 4442 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4443 4444 prev_sta = sta; 4445 } 4446 4447 if (prev_sta) { 4448 rx.sta = prev_sta; 4449 rx.sdata = prev_sta->sdata; 4450 4451 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4452 return; 4453 goto out; 4454 } 4455 } 4456 4457 prev = NULL; 4458 4459 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4460 if (!ieee80211_sdata_running(sdata)) 4461 continue; 4462 4463 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4464 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4465 continue; 4466 4467 /* 4468 * frame is destined for this interface, but if it's 4469 * not also for the previous one we handle that after 4470 * the loop to avoid copying the SKB once too much 4471 */ 4472 4473 if (!prev) { 4474 prev = sdata; 4475 continue; 4476 } 4477 4478 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4479 rx.sdata = prev; 4480 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4481 4482 prev = sdata; 4483 } 4484 4485 if (prev) { 4486 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4487 rx.sdata = prev; 4488 4489 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4490 return; 4491 } 4492 4493 out: 4494 dev_kfree_skb(skb); 4495 } 4496 4497 /* 4498 * This is the receive path handler. It is called by a low level driver when an 4499 * 802.11 MPDU is received from the hardware. 4500 */ 4501 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4502 struct sk_buff *skb, struct napi_struct *napi) 4503 { 4504 struct ieee80211_local *local = hw_to_local(hw); 4505 struct ieee80211_rate *rate = NULL; 4506 struct ieee80211_supported_band *sband; 4507 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4508 4509 WARN_ON_ONCE(softirq_count() == 0); 4510 4511 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4512 goto drop; 4513 4514 sband = local->hw.wiphy->bands[status->band]; 4515 if (WARN_ON(!sband)) 4516 goto drop; 4517 4518 /* 4519 * If we're suspending, it is possible although not too likely 4520 * that we'd be receiving frames after having already partially 4521 * quiesced the stack. We can't process such frames then since 4522 * that might, for example, cause stations to be added or other 4523 * driver callbacks be invoked. 4524 */ 4525 if (unlikely(local->quiescing || local->suspended)) 4526 goto drop; 4527 4528 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4529 if (unlikely(local->in_reconfig)) 4530 goto drop; 4531 4532 /* 4533 * The same happens when we're not even started, 4534 * but that's worth a warning. 4535 */ 4536 if (WARN_ON(!local->started)) 4537 goto drop; 4538 4539 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4540 /* 4541 * Validate the rate, unless a PLCP error means that 4542 * we probably can't have a valid rate here anyway. 4543 */ 4544 4545 switch (status->encoding) { 4546 case RX_ENC_HT: 4547 /* 4548 * rate_idx is MCS index, which can be [0-76] 4549 * as documented on: 4550 * 4551 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 4552 * 4553 * Anything else would be some sort of driver or 4554 * hardware error. The driver should catch hardware 4555 * errors. 4556 */ 4557 if (WARN(status->rate_idx > 76, 4558 "Rate marked as an HT rate but passed " 4559 "status->rate_idx is not " 4560 "an MCS index [0-76]: %d (0x%02x)\n", 4561 status->rate_idx, 4562 status->rate_idx)) 4563 goto drop; 4564 break; 4565 case RX_ENC_VHT: 4566 if (WARN_ONCE(status->rate_idx > 9 || 4567 !status->nss || 4568 status->nss > 8, 4569 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4570 status->rate_idx, status->nss)) 4571 goto drop; 4572 break; 4573 case RX_ENC_HE: 4574 if (WARN_ONCE(status->rate_idx > 11 || 4575 !status->nss || 4576 status->nss > 8, 4577 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4578 status->rate_idx, status->nss)) 4579 goto drop; 4580 break; 4581 default: 4582 WARN_ON_ONCE(1); 4583 /* fall through */ 4584 case RX_ENC_LEGACY: 4585 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4586 goto drop; 4587 rate = &sband->bitrates[status->rate_idx]; 4588 } 4589 } 4590 4591 status->rx_flags = 0; 4592 4593 /* 4594 * key references and virtual interfaces are protected using RCU 4595 * and this requires that we are in a read-side RCU section during 4596 * receive processing 4597 */ 4598 rcu_read_lock(); 4599 4600 /* 4601 * Frames with failed FCS/PLCP checksum are not returned, 4602 * all other frames are returned without radiotap header 4603 * if it was previously present. 4604 * Also, frames with less than 16 bytes are dropped. 4605 */ 4606 skb = ieee80211_rx_monitor(local, skb, rate); 4607 if (!skb) { 4608 rcu_read_unlock(); 4609 return; 4610 } 4611 4612 ieee80211_tpt_led_trig_rx(local, 4613 ((struct ieee80211_hdr *)skb->data)->frame_control, 4614 skb->len); 4615 4616 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); 4617 4618 rcu_read_unlock(); 4619 4620 return; 4621 drop: 4622 kfree_skb(skb); 4623 } 4624 EXPORT_SYMBOL(ieee80211_rx_napi); 4625 4626 /* This is a version of the rx handler that can be called from hard irq 4627 * context. Post the skb on the queue and schedule the tasklet */ 4628 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4629 { 4630 struct ieee80211_local *local = hw_to_local(hw); 4631 4632 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4633 4634 skb->pkt_type = IEEE80211_RX_MSG; 4635 skb_queue_tail(&local->skb_queue, skb); 4636 tasklet_schedule(&local->tasklet); 4637 } 4638 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4639