1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2020 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/bitops.h> 21 #include <net/mac80211.h> 22 #include <net/ieee80211_radiotap.h> 23 #include <asm/unaligned.h> 24 25 #include "ieee80211_i.h" 26 #include "driver-ops.h" 27 #include "led.h" 28 #include "mesh.h" 29 #include "wep.h" 30 #include "wpa.h" 31 #include "tkip.h" 32 #include "wme.h" 33 #include "rate.h" 34 35 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 36 { 37 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 38 39 u64_stats_update_begin(&tstats->syncp); 40 tstats->rx_packets++; 41 tstats->rx_bytes += len; 42 u64_stats_update_end(&tstats->syncp); 43 } 44 45 /* 46 * monitor mode reception 47 * 48 * This function cleans up the SKB, i.e. it removes all the stuff 49 * only useful for monitoring. 50 */ 51 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 52 unsigned int present_fcs_len, 53 unsigned int rtap_space) 54 { 55 struct ieee80211_hdr *hdr; 56 unsigned int hdrlen; 57 __le16 fc; 58 59 if (present_fcs_len) 60 __pskb_trim(skb, skb->len - present_fcs_len); 61 __pskb_pull(skb, rtap_space); 62 63 hdr = (void *)skb->data; 64 fc = hdr->frame_control; 65 66 /* 67 * Remove the HT-Control field (if present) on management 68 * frames after we've sent the frame to monitoring. We 69 * (currently) don't need it, and don't properly parse 70 * frames with it present, due to the assumption of a 71 * fixed management header length. 72 */ 73 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 74 return skb; 75 76 hdrlen = ieee80211_hdrlen(fc); 77 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 78 79 if (!pskb_may_pull(skb, hdrlen)) { 80 dev_kfree_skb(skb); 81 return NULL; 82 } 83 84 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 85 hdrlen - IEEE80211_HT_CTL_LEN); 86 __pskb_pull(skb, IEEE80211_HT_CTL_LEN); 87 88 return skb; 89 } 90 91 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 92 unsigned int rtap_space) 93 { 94 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 95 struct ieee80211_hdr *hdr; 96 97 hdr = (void *)(skb->data + rtap_space); 98 99 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 100 RX_FLAG_FAILED_PLCP_CRC | 101 RX_FLAG_ONLY_MONITOR | 102 RX_FLAG_NO_PSDU)) 103 return true; 104 105 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 106 return true; 107 108 if (ieee80211_is_ctl(hdr->frame_control) && 109 !ieee80211_is_pspoll(hdr->frame_control) && 110 !ieee80211_is_back_req(hdr->frame_control)) 111 return true; 112 113 return false; 114 } 115 116 static int 117 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 118 struct ieee80211_rx_status *status, 119 struct sk_buff *skb) 120 { 121 int len; 122 123 /* always present fields */ 124 len = sizeof(struct ieee80211_radiotap_header) + 8; 125 126 /* allocate extra bitmaps */ 127 if (status->chains) 128 len += 4 * hweight8(status->chains); 129 /* vendor presence bitmap */ 130 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) 131 len += 4; 132 133 if (ieee80211_have_rx_timestamp(status)) { 134 len = ALIGN(len, 8); 135 len += 8; 136 } 137 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 138 len += 1; 139 140 /* antenna field, if we don't have per-chain info */ 141 if (!status->chains) 142 len += 1; 143 144 /* padding for RX_FLAGS if necessary */ 145 len = ALIGN(len, 2); 146 147 if (status->encoding == RX_ENC_HT) /* HT info */ 148 len += 3; 149 150 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 151 len = ALIGN(len, 4); 152 len += 8; 153 } 154 155 if (status->encoding == RX_ENC_VHT) { 156 len = ALIGN(len, 2); 157 len += 12; 158 } 159 160 if (local->hw.radiotap_timestamp.units_pos >= 0) { 161 len = ALIGN(len, 8); 162 len += 12; 163 } 164 165 if (status->encoding == RX_ENC_HE && 166 status->flag & RX_FLAG_RADIOTAP_HE) { 167 len = ALIGN(len, 2); 168 len += 12; 169 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 170 } 171 172 if (status->encoding == RX_ENC_HE && 173 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 174 len = ALIGN(len, 2); 175 len += 12; 176 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 177 } 178 179 if (status->flag & RX_FLAG_NO_PSDU) 180 len += 1; 181 182 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 183 len = ALIGN(len, 2); 184 len += 4; 185 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 186 } 187 188 if (status->chains) { 189 /* antenna and antenna signal fields */ 190 len += 2 * hweight8(status->chains); 191 } 192 193 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 194 struct ieee80211_vendor_radiotap *rtap; 195 int vendor_data_offset = 0; 196 197 /* 198 * The position to look at depends on the existence (or non- 199 * existence) of other elements, so take that into account... 200 */ 201 if (status->flag & RX_FLAG_RADIOTAP_HE) 202 vendor_data_offset += 203 sizeof(struct ieee80211_radiotap_he); 204 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 205 vendor_data_offset += 206 sizeof(struct ieee80211_radiotap_he_mu); 207 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 208 vendor_data_offset += 209 sizeof(struct ieee80211_radiotap_lsig); 210 211 rtap = (void *)&skb->data[vendor_data_offset]; 212 213 /* alignment for fixed 6-byte vendor data header */ 214 len = ALIGN(len, 2); 215 /* vendor data header */ 216 len += 6; 217 if (WARN_ON(rtap->align == 0)) 218 rtap->align = 1; 219 len = ALIGN(len, rtap->align); 220 len += rtap->len + rtap->pad; 221 } 222 223 return len; 224 } 225 226 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 227 struct sk_buff *skb, 228 int rtap_space) 229 { 230 struct { 231 struct ieee80211_hdr_3addr hdr; 232 u8 category; 233 u8 action_code; 234 } __packed __aligned(2) action; 235 236 if (!sdata) 237 return; 238 239 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 240 241 if (skb->len < rtap_space + sizeof(action) + 242 VHT_MUMIMO_GROUPS_DATA_LEN) 243 return; 244 245 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 246 return; 247 248 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 249 250 if (!ieee80211_is_action(action.hdr.frame_control)) 251 return; 252 253 if (action.category != WLAN_CATEGORY_VHT) 254 return; 255 256 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 257 return; 258 259 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 260 return; 261 262 skb = skb_copy(skb, GFP_ATOMIC); 263 if (!skb) 264 return; 265 266 skb_queue_tail(&sdata->skb_queue, skb); 267 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 268 } 269 270 /* 271 * ieee80211_add_rx_radiotap_header - add radiotap header 272 * 273 * add a radiotap header containing all the fields which the hardware provided. 274 */ 275 static void 276 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 277 struct sk_buff *skb, 278 struct ieee80211_rate *rate, 279 int rtap_len, bool has_fcs) 280 { 281 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 282 struct ieee80211_radiotap_header *rthdr; 283 unsigned char *pos; 284 __le32 *it_present; 285 u32 it_present_val; 286 u16 rx_flags = 0; 287 u16 channel_flags = 0; 288 int mpdulen, chain; 289 unsigned long chains = status->chains; 290 struct ieee80211_vendor_radiotap rtap = {}; 291 struct ieee80211_radiotap_he he = {}; 292 struct ieee80211_radiotap_he_mu he_mu = {}; 293 struct ieee80211_radiotap_lsig lsig = {}; 294 295 if (status->flag & RX_FLAG_RADIOTAP_HE) { 296 he = *(struct ieee80211_radiotap_he *)skb->data; 297 skb_pull(skb, sizeof(he)); 298 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 299 } 300 301 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 302 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 303 skb_pull(skb, sizeof(he_mu)); 304 } 305 306 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 307 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 308 skb_pull(skb, sizeof(lsig)); 309 } 310 311 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 312 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 313 /* rtap.len and rtap.pad are undone immediately */ 314 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 315 } 316 317 mpdulen = skb->len; 318 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 319 mpdulen += FCS_LEN; 320 321 rthdr = skb_push(skb, rtap_len); 322 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 323 it_present = &rthdr->it_present; 324 325 /* radiotap header, set always present flags */ 326 rthdr->it_len = cpu_to_le16(rtap_len); 327 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 328 BIT(IEEE80211_RADIOTAP_CHANNEL) | 329 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 330 331 if (!status->chains) 332 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 333 334 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 335 it_present_val |= 336 BIT(IEEE80211_RADIOTAP_EXT) | 337 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 338 put_unaligned_le32(it_present_val, it_present); 339 it_present++; 340 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 341 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 342 } 343 344 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 345 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 346 BIT(IEEE80211_RADIOTAP_EXT); 347 put_unaligned_le32(it_present_val, it_present); 348 it_present++; 349 it_present_val = rtap.present; 350 } 351 352 put_unaligned_le32(it_present_val, it_present); 353 354 pos = (void *)(it_present + 1); 355 356 /* the order of the following fields is important */ 357 358 /* IEEE80211_RADIOTAP_TSFT */ 359 if (ieee80211_have_rx_timestamp(status)) { 360 /* padding */ 361 while ((pos - (u8 *)rthdr) & 7) 362 *pos++ = 0; 363 put_unaligned_le64( 364 ieee80211_calculate_rx_timestamp(local, status, 365 mpdulen, 0), 366 pos); 367 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 368 pos += 8; 369 } 370 371 /* IEEE80211_RADIOTAP_FLAGS */ 372 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 373 *pos |= IEEE80211_RADIOTAP_F_FCS; 374 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 375 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 376 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 377 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 378 pos++; 379 380 /* IEEE80211_RADIOTAP_RATE */ 381 if (!rate || status->encoding != RX_ENC_LEGACY) { 382 /* 383 * Without rate information don't add it. If we have, 384 * MCS information is a separate field in radiotap, 385 * added below. The byte here is needed as padding 386 * for the channel though, so initialise it to 0. 387 */ 388 *pos = 0; 389 } else { 390 int shift = 0; 391 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 392 if (status->bw == RATE_INFO_BW_10) 393 shift = 1; 394 else if (status->bw == RATE_INFO_BW_5) 395 shift = 2; 396 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 397 } 398 pos++; 399 400 /* IEEE80211_RADIOTAP_CHANNEL */ 401 /* TODO: frequency offset in KHz */ 402 put_unaligned_le16(status->freq, pos); 403 pos += 2; 404 if (status->bw == RATE_INFO_BW_10) 405 channel_flags |= IEEE80211_CHAN_HALF; 406 else if (status->bw == RATE_INFO_BW_5) 407 channel_flags |= IEEE80211_CHAN_QUARTER; 408 409 if (status->band == NL80211_BAND_5GHZ || 410 status->band == NL80211_BAND_6GHZ) 411 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 412 else if (status->encoding != RX_ENC_LEGACY) 413 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 414 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 415 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 416 else if (rate) 417 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 418 else 419 channel_flags |= IEEE80211_CHAN_2GHZ; 420 put_unaligned_le16(channel_flags, pos); 421 pos += 2; 422 423 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 424 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 425 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 426 *pos = status->signal; 427 rthdr->it_present |= 428 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 429 pos++; 430 } 431 432 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 433 434 if (!status->chains) { 435 /* IEEE80211_RADIOTAP_ANTENNA */ 436 *pos = status->antenna; 437 pos++; 438 } 439 440 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 441 442 /* IEEE80211_RADIOTAP_RX_FLAGS */ 443 /* ensure 2 byte alignment for the 2 byte field as required */ 444 if ((pos - (u8 *)rthdr) & 1) 445 *pos++ = 0; 446 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 447 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 448 put_unaligned_le16(rx_flags, pos); 449 pos += 2; 450 451 if (status->encoding == RX_ENC_HT) { 452 unsigned int stbc; 453 454 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 455 *pos++ = local->hw.radiotap_mcs_details; 456 *pos = 0; 457 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 458 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 459 if (status->bw == RATE_INFO_BW_40) 460 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 461 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 462 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 463 if (status->enc_flags & RX_ENC_FLAG_LDPC) 464 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 465 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 466 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 467 pos++; 468 *pos++ = status->rate_idx; 469 } 470 471 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 472 u16 flags = 0; 473 474 /* ensure 4 byte alignment */ 475 while ((pos - (u8 *)rthdr) & 3) 476 pos++; 477 rthdr->it_present |= 478 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 479 put_unaligned_le32(status->ampdu_reference, pos); 480 pos += 4; 481 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 482 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 483 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 484 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 485 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 486 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 487 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 488 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 489 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 490 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 491 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 492 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 493 put_unaligned_le16(flags, pos); 494 pos += 2; 495 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 496 *pos++ = status->ampdu_delimiter_crc; 497 else 498 *pos++ = 0; 499 *pos++ = 0; 500 } 501 502 if (status->encoding == RX_ENC_VHT) { 503 u16 known = local->hw.radiotap_vht_details; 504 505 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 506 put_unaligned_le16(known, pos); 507 pos += 2; 508 /* flags */ 509 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 510 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 511 /* in VHT, STBC is binary */ 512 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 513 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 514 if (status->enc_flags & RX_ENC_FLAG_BF) 515 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 516 pos++; 517 /* bandwidth */ 518 switch (status->bw) { 519 case RATE_INFO_BW_80: 520 *pos++ = 4; 521 break; 522 case RATE_INFO_BW_160: 523 *pos++ = 11; 524 break; 525 case RATE_INFO_BW_40: 526 *pos++ = 1; 527 break; 528 default: 529 *pos++ = 0; 530 } 531 /* MCS/NSS */ 532 *pos = (status->rate_idx << 4) | status->nss; 533 pos += 4; 534 /* coding field */ 535 if (status->enc_flags & RX_ENC_FLAG_LDPC) 536 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 537 pos++; 538 /* group ID */ 539 pos++; 540 /* partial_aid */ 541 pos += 2; 542 } 543 544 if (local->hw.radiotap_timestamp.units_pos >= 0) { 545 u16 accuracy = 0; 546 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 547 548 rthdr->it_present |= 549 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 550 551 /* ensure 8 byte alignment */ 552 while ((pos - (u8 *)rthdr) & 7) 553 pos++; 554 555 put_unaligned_le64(status->device_timestamp, pos); 556 pos += sizeof(u64); 557 558 if (local->hw.radiotap_timestamp.accuracy >= 0) { 559 accuracy = local->hw.radiotap_timestamp.accuracy; 560 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 561 } 562 put_unaligned_le16(accuracy, pos); 563 pos += sizeof(u16); 564 565 *pos++ = local->hw.radiotap_timestamp.units_pos; 566 *pos++ = flags; 567 } 568 569 if (status->encoding == RX_ENC_HE && 570 status->flag & RX_FLAG_RADIOTAP_HE) { 571 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 572 573 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 574 he.data6 |= HE_PREP(DATA6_NSTS, 575 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 576 status->enc_flags)); 577 he.data3 |= HE_PREP(DATA3_STBC, 1); 578 } else { 579 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 580 } 581 582 #define CHECK_GI(s) \ 583 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 584 (int)NL80211_RATE_INFO_HE_GI_##s) 585 586 CHECK_GI(0_8); 587 CHECK_GI(1_6); 588 CHECK_GI(3_2); 589 590 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 591 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 592 he.data3 |= HE_PREP(DATA3_CODING, 593 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 594 595 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 596 597 switch (status->bw) { 598 case RATE_INFO_BW_20: 599 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 600 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 601 break; 602 case RATE_INFO_BW_40: 603 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 604 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 605 break; 606 case RATE_INFO_BW_80: 607 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 608 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 609 break; 610 case RATE_INFO_BW_160: 611 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 612 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 613 break; 614 case RATE_INFO_BW_HE_RU: 615 #define CHECK_RU_ALLOC(s) \ 616 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 617 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 618 619 CHECK_RU_ALLOC(26); 620 CHECK_RU_ALLOC(52); 621 CHECK_RU_ALLOC(106); 622 CHECK_RU_ALLOC(242); 623 CHECK_RU_ALLOC(484); 624 CHECK_RU_ALLOC(996); 625 CHECK_RU_ALLOC(2x996); 626 627 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 628 status->he_ru + 4); 629 break; 630 default: 631 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 632 } 633 634 /* ensure 2 byte alignment */ 635 while ((pos - (u8 *)rthdr) & 1) 636 pos++; 637 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 638 memcpy(pos, &he, sizeof(he)); 639 pos += sizeof(he); 640 } 641 642 if (status->encoding == RX_ENC_HE && 643 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 644 /* ensure 2 byte alignment */ 645 while ((pos - (u8 *)rthdr) & 1) 646 pos++; 647 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 648 memcpy(pos, &he_mu, sizeof(he_mu)); 649 pos += sizeof(he_mu); 650 } 651 652 if (status->flag & RX_FLAG_NO_PSDU) { 653 rthdr->it_present |= 654 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 655 *pos++ = status->zero_length_psdu_type; 656 } 657 658 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 659 /* ensure 2 byte alignment */ 660 while ((pos - (u8 *)rthdr) & 1) 661 pos++; 662 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 663 memcpy(pos, &lsig, sizeof(lsig)); 664 pos += sizeof(lsig); 665 } 666 667 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 668 *pos++ = status->chain_signal[chain]; 669 *pos++ = chain; 670 } 671 672 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 673 /* ensure 2 byte alignment for the vendor field as required */ 674 if ((pos - (u8 *)rthdr) & 1) 675 *pos++ = 0; 676 *pos++ = rtap.oui[0]; 677 *pos++ = rtap.oui[1]; 678 *pos++ = rtap.oui[2]; 679 *pos++ = rtap.subns; 680 put_unaligned_le16(rtap.len, pos); 681 pos += 2; 682 /* align the actual payload as requested */ 683 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 684 *pos++ = 0; 685 /* data (and possible padding) already follows */ 686 } 687 } 688 689 static struct sk_buff * 690 ieee80211_make_monitor_skb(struct ieee80211_local *local, 691 struct sk_buff **origskb, 692 struct ieee80211_rate *rate, 693 int rtap_space, bool use_origskb) 694 { 695 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 696 int rt_hdrlen, needed_headroom; 697 struct sk_buff *skb; 698 699 /* room for the radiotap header based on driver features */ 700 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 701 needed_headroom = rt_hdrlen - rtap_space; 702 703 if (use_origskb) { 704 /* only need to expand headroom if necessary */ 705 skb = *origskb; 706 *origskb = NULL; 707 708 /* 709 * This shouldn't trigger often because most devices have an 710 * RX header they pull before we get here, and that should 711 * be big enough for our radiotap information. We should 712 * probably export the length to drivers so that we can have 713 * them allocate enough headroom to start with. 714 */ 715 if (skb_headroom(skb) < needed_headroom && 716 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 717 dev_kfree_skb(skb); 718 return NULL; 719 } 720 } else { 721 /* 722 * Need to make a copy and possibly remove radiotap header 723 * and FCS from the original. 724 */ 725 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 726 727 if (!skb) 728 return NULL; 729 } 730 731 /* prepend radiotap information */ 732 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 733 734 skb_reset_mac_header(skb); 735 skb->ip_summed = CHECKSUM_UNNECESSARY; 736 skb->pkt_type = PACKET_OTHERHOST; 737 skb->protocol = htons(ETH_P_802_2); 738 739 return skb; 740 } 741 742 /* 743 * This function copies a received frame to all monitor interfaces and 744 * returns a cleaned-up SKB that no longer includes the FCS nor the 745 * radiotap header the driver might have added. 746 */ 747 static struct sk_buff * 748 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 749 struct ieee80211_rate *rate) 750 { 751 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 752 struct ieee80211_sub_if_data *sdata; 753 struct sk_buff *monskb = NULL; 754 int present_fcs_len = 0; 755 unsigned int rtap_space = 0; 756 struct ieee80211_sub_if_data *monitor_sdata = 757 rcu_dereference(local->monitor_sdata); 758 bool only_monitor = false; 759 unsigned int min_head_len; 760 761 if (status->flag & RX_FLAG_RADIOTAP_HE) 762 rtap_space += sizeof(struct ieee80211_radiotap_he); 763 764 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 765 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 766 767 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 768 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 769 770 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 771 struct ieee80211_vendor_radiotap *rtap = 772 (void *)(origskb->data + rtap_space); 773 774 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 775 } 776 777 min_head_len = rtap_space; 778 779 /* 780 * First, we may need to make a copy of the skb because 781 * (1) we need to modify it for radiotap (if not present), and 782 * (2) the other RX handlers will modify the skb we got. 783 * 784 * We don't need to, of course, if we aren't going to return 785 * the SKB because it has a bad FCS/PLCP checksum. 786 */ 787 788 if (!(status->flag & RX_FLAG_NO_PSDU)) { 789 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 790 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 791 /* driver bug */ 792 WARN_ON(1); 793 dev_kfree_skb(origskb); 794 return NULL; 795 } 796 present_fcs_len = FCS_LEN; 797 } 798 799 /* also consider the hdr->frame_control */ 800 min_head_len += 2; 801 } 802 803 /* ensure that the expected data elements are in skb head */ 804 if (!pskb_may_pull(origskb, min_head_len)) { 805 dev_kfree_skb(origskb); 806 return NULL; 807 } 808 809 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 810 811 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 812 if (only_monitor) { 813 dev_kfree_skb(origskb); 814 return NULL; 815 } 816 817 return ieee80211_clean_skb(origskb, present_fcs_len, 818 rtap_space); 819 } 820 821 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 822 823 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 824 bool last_monitor = list_is_last(&sdata->u.mntr.list, 825 &local->mon_list); 826 827 if (!monskb) 828 monskb = ieee80211_make_monitor_skb(local, &origskb, 829 rate, rtap_space, 830 only_monitor && 831 last_monitor); 832 833 if (monskb) { 834 struct sk_buff *skb; 835 836 if (last_monitor) { 837 skb = monskb; 838 monskb = NULL; 839 } else { 840 skb = skb_clone(monskb, GFP_ATOMIC); 841 } 842 843 if (skb) { 844 skb->dev = sdata->dev; 845 ieee80211_rx_stats(skb->dev, skb->len); 846 netif_receive_skb(skb); 847 } 848 } 849 850 if (last_monitor) 851 break; 852 } 853 854 /* this happens if last_monitor was erroneously false */ 855 dev_kfree_skb(monskb); 856 857 /* ditto */ 858 if (!origskb) 859 return NULL; 860 861 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 862 } 863 864 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 865 { 866 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 867 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 868 int tid, seqno_idx, security_idx; 869 870 /* does the frame have a qos control field? */ 871 if (ieee80211_is_data_qos(hdr->frame_control)) { 872 u8 *qc = ieee80211_get_qos_ctl(hdr); 873 /* frame has qos control */ 874 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 875 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 876 status->rx_flags |= IEEE80211_RX_AMSDU; 877 878 seqno_idx = tid; 879 security_idx = tid; 880 } else { 881 /* 882 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 883 * 884 * Sequence numbers for management frames, QoS data 885 * frames with a broadcast/multicast address in the 886 * Address 1 field, and all non-QoS data frames sent 887 * by QoS STAs are assigned using an additional single 888 * modulo-4096 counter, [...] 889 * 890 * We also use that counter for non-QoS STAs. 891 */ 892 seqno_idx = IEEE80211_NUM_TIDS; 893 security_idx = 0; 894 if (ieee80211_is_mgmt(hdr->frame_control)) 895 security_idx = IEEE80211_NUM_TIDS; 896 tid = 0; 897 } 898 899 rx->seqno_idx = seqno_idx; 900 rx->security_idx = security_idx; 901 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 902 * For now, set skb->priority to 0 for other cases. */ 903 rx->skb->priority = (tid > 7) ? 0 : tid; 904 } 905 906 /** 907 * DOC: Packet alignment 908 * 909 * Drivers always need to pass packets that are aligned to two-byte boundaries 910 * to the stack. 911 * 912 * Additionally, should, if possible, align the payload data in a way that 913 * guarantees that the contained IP header is aligned to a four-byte 914 * boundary. In the case of regular frames, this simply means aligning the 915 * payload to a four-byte boundary (because either the IP header is directly 916 * contained, or IV/RFC1042 headers that have a length divisible by four are 917 * in front of it). If the payload data is not properly aligned and the 918 * architecture doesn't support efficient unaligned operations, mac80211 919 * will align the data. 920 * 921 * With A-MSDU frames, however, the payload data address must yield two modulo 922 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 923 * push the IP header further back to a multiple of four again. Thankfully, the 924 * specs were sane enough this time around to require padding each A-MSDU 925 * subframe to a length that is a multiple of four. 926 * 927 * Padding like Atheros hardware adds which is between the 802.11 header and 928 * the payload is not supported, the driver is required to move the 802.11 929 * header to be directly in front of the payload in that case. 930 */ 931 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 932 { 933 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 934 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 935 #endif 936 } 937 938 939 /* rx handlers */ 940 941 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 942 { 943 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 944 945 if (is_multicast_ether_addr(hdr->addr1)) 946 return 0; 947 948 return ieee80211_is_robust_mgmt_frame(skb); 949 } 950 951 952 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 953 { 954 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 955 956 if (!is_multicast_ether_addr(hdr->addr1)) 957 return 0; 958 959 return ieee80211_is_robust_mgmt_frame(skb); 960 } 961 962 963 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 964 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 965 { 966 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 967 struct ieee80211_mmie *mmie; 968 struct ieee80211_mmie_16 *mmie16; 969 970 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 971 return -1; 972 973 if (!ieee80211_is_robust_mgmt_frame(skb) && 974 !ieee80211_is_beacon(hdr->frame_control)) 975 return -1; /* not a robust management frame */ 976 977 mmie = (struct ieee80211_mmie *) 978 (skb->data + skb->len - sizeof(*mmie)); 979 if (mmie->element_id == WLAN_EID_MMIE && 980 mmie->length == sizeof(*mmie) - 2) 981 return le16_to_cpu(mmie->key_id); 982 983 mmie16 = (struct ieee80211_mmie_16 *) 984 (skb->data + skb->len - sizeof(*mmie16)); 985 if (skb->len >= 24 + sizeof(*mmie16) && 986 mmie16->element_id == WLAN_EID_MMIE && 987 mmie16->length == sizeof(*mmie16) - 2) 988 return le16_to_cpu(mmie16->key_id); 989 990 return -1; 991 } 992 993 static int ieee80211_get_keyid(struct sk_buff *skb, 994 const struct ieee80211_cipher_scheme *cs) 995 { 996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 997 __le16 fc; 998 int hdrlen; 999 int minlen; 1000 u8 key_idx_off; 1001 u8 key_idx_shift; 1002 u8 keyid; 1003 1004 fc = hdr->frame_control; 1005 hdrlen = ieee80211_hdrlen(fc); 1006 1007 if (cs) { 1008 minlen = hdrlen + cs->hdr_len; 1009 key_idx_off = hdrlen + cs->key_idx_off; 1010 key_idx_shift = cs->key_idx_shift; 1011 } else { 1012 /* WEP, TKIP, CCMP and GCMP */ 1013 minlen = hdrlen + IEEE80211_WEP_IV_LEN; 1014 key_idx_off = hdrlen + 3; 1015 key_idx_shift = 6; 1016 } 1017 1018 if (unlikely(skb->len < minlen)) 1019 return -EINVAL; 1020 1021 skb_copy_bits(skb, key_idx_off, &keyid, 1); 1022 1023 if (cs) 1024 keyid &= cs->key_idx_mask; 1025 keyid >>= key_idx_shift; 1026 1027 /* cs could use more than the usual two bits for the keyid */ 1028 if (unlikely(keyid >= NUM_DEFAULT_KEYS)) 1029 return -EINVAL; 1030 1031 return keyid; 1032 } 1033 1034 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1035 { 1036 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1037 char *dev_addr = rx->sdata->vif.addr; 1038 1039 if (ieee80211_is_data(hdr->frame_control)) { 1040 if (is_multicast_ether_addr(hdr->addr1)) { 1041 if (ieee80211_has_tods(hdr->frame_control) || 1042 !ieee80211_has_fromds(hdr->frame_control)) 1043 return RX_DROP_MONITOR; 1044 if (ether_addr_equal(hdr->addr3, dev_addr)) 1045 return RX_DROP_MONITOR; 1046 } else { 1047 if (!ieee80211_has_a4(hdr->frame_control)) 1048 return RX_DROP_MONITOR; 1049 if (ether_addr_equal(hdr->addr4, dev_addr)) 1050 return RX_DROP_MONITOR; 1051 } 1052 } 1053 1054 /* If there is not an established peer link and this is not a peer link 1055 * establisment frame, beacon or probe, drop the frame. 1056 */ 1057 1058 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1059 struct ieee80211_mgmt *mgmt; 1060 1061 if (!ieee80211_is_mgmt(hdr->frame_control)) 1062 return RX_DROP_MONITOR; 1063 1064 if (ieee80211_is_action(hdr->frame_control)) { 1065 u8 category; 1066 1067 /* make sure category field is present */ 1068 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1069 return RX_DROP_MONITOR; 1070 1071 mgmt = (struct ieee80211_mgmt *)hdr; 1072 category = mgmt->u.action.category; 1073 if (category != WLAN_CATEGORY_MESH_ACTION && 1074 category != WLAN_CATEGORY_SELF_PROTECTED) 1075 return RX_DROP_MONITOR; 1076 return RX_CONTINUE; 1077 } 1078 1079 if (ieee80211_is_probe_req(hdr->frame_control) || 1080 ieee80211_is_probe_resp(hdr->frame_control) || 1081 ieee80211_is_beacon(hdr->frame_control) || 1082 ieee80211_is_auth(hdr->frame_control)) 1083 return RX_CONTINUE; 1084 1085 return RX_DROP_MONITOR; 1086 } 1087 1088 return RX_CONTINUE; 1089 } 1090 1091 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1092 int index) 1093 { 1094 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1095 struct sk_buff *tail = skb_peek_tail(frames); 1096 struct ieee80211_rx_status *status; 1097 1098 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1099 return true; 1100 1101 if (!tail) 1102 return false; 1103 1104 status = IEEE80211_SKB_RXCB(tail); 1105 if (status->flag & RX_FLAG_AMSDU_MORE) 1106 return false; 1107 1108 return true; 1109 } 1110 1111 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1112 struct tid_ampdu_rx *tid_agg_rx, 1113 int index, 1114 struct sk_buff_head *frames) 1115 { 1116 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1117 struct sk_buff *skb; 1118 struct ieee80211_rx_status *status; 1119 1120 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1121 1122 if (skb_queue_empty(skb_list)) 1123 goto no_frame; 1124 1125 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1126 __skb_queue_purge(skb_list); 1127 goto no_frame; 1128 } 1129 1130 /* release frames from the reorder ring buffer */ 1131 tid_agg_rx->stored_mpdu_num--; 1132 while ((skb = __skb_dequeue(skb_list))) { 1133 status = IEEE80211_SKB_RXCB(skb); 1134 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1135 __skb_queue_tail(frames, skb); 1136 } 1137 1138 no_frame: 1139 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1140 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1141 } 1142 1143 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1144 struct tid_ampdu_rx *tid_agg_rx, 1145 u16 head_seq_num, 1146 struct sk_buff_head *frames) 1147 { 1148 int index; 1149 1150 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1151 1152 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1153 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1154 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1155 frames); 1156 } 1157 } 1158 1159 /* 1160 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1161 * the skb was added to the buffer longer than this time ago, the earlier 1162 * frames that have not yet been received are assumed to be lost and the skb 1163 * can be released for processing. This may also release other skb's from the 1164 * reorder buffer if there are no additional gaps between the frames. 1165 * 1166 * Callers must hold tid_agg_rx->reorder_lock. 1167 */ 1168 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1169 1170 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1171 struct tid_ampdu_rx *tid_agg_rx, 1172 struct sk_buff_head *frames) 1173 { 1174 int index, i, j; 1175 1176 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1177 1178 /* release the buffer until next missing frame */ 1179 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1180 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1181 tid_agg_rx->stored_mpdu_num) { 1182 /* 1183 * No buffers ready to be released, but check whether any 1184 * frames in the reorder buffer have timed out. 1185 */ 1186 int skipped = 1; 1187 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1188 j = (j + 1) % tid_agg_rx->buf_size) { 1189 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1190 skipped++; 1191 continue; 1192 } 1193 if (skipped && 1194 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1195 HT_RX_REORDER_BUF_TIMEOUT)) 1196 goto set_release_timer; 1197 1198 /* don't leave incomplete A-MSDUs around */ 1199 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1200 i = (i + 1) % tid_agg_rx->buf_size) 1201 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1202 1203 ht_dbg_ratelimited(sdata, 1204 "release an RX reorder frame due to timeout on earlier frames\n"); 1205 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1206 frames); 1207 1208 /* 1209 * Increment the head seq# also for the skipped slots. 1210 */ 1211 tid_agg_rx->head_seq_num = 1212 (tid_agg_rx->head_seq_num + 1213 skipped) & IEEE80211_SN_MASK; 1214 skipped = 0; 1215 } 1216 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1217 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1218 frames); 1219 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1220 } 1221 1222 if (tid_agg_rx->stored_mpdu_num) { 1223 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1224 1225 for (; j != (index - 1) % tid_agg_rx->buf_size; 1226 j = (j + 1) % tid_agg_rx->buf_size) { 1227 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1228 break; 1229 } 1230 1231 set_release_timer: 1232 1233 if (!tid_agg_rx->removed) 1234 mod_timer(&tid_agg_rx->reorder_timer, 1235 tid_agg_rx->reorder_time[j] + 1 + 1236 HT_RX_REORDER_BUF_TIMEOUT); 1237 } else { 1238 del_timer(&tid_agg_rx->reorder_timer); 1239 } 1240 } 1241 1242 /* 1243 * As this function belongs to the RX path it must be under 1244 * rcu_read_lock protection. It returns false if the frame 1245 * can be processed immediately, true if it was consumed. 1246 */ 1247 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1248 struct tid_ampdu_rx *tid_agg_rx, 1249 struct sk_buff *skb, 1250 struct sk_buff_head *frames) 1251 { 1252 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1253 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1254 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1255 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1256 u16 head_seq_num, buf_size; 1257 int index; 1258 bool ret = true; 1259 1260 spin_lock(&tid_agg_rx->reorder_lock); 1261 1262 /* 1263 * Offloaded BA sessions have no known starting sequence number so pick 1264 * one from first Rxed frame for this tid after BA was started. 1265 */ 1266 if (unlikely(tid_agg_rx->auto_seq)) { 1267 tid_agg_rx->auto_seq = false; 1268 tid_agg_rx->ssn = mpdu_seq_num; 1269 tid_agg_rx->head_seq_num = mpdu_seq_num; 1270 } 1271 1272 buf_size = tid_agg_rx->buf_size; 1273 head_seq_num = tid_agg_rx->head_seq_num; 1274 1275 /* 1276 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1277 * be reordered. 1278 */ 1279 if (unlikely(!tid_agg_rx->started)) { 1280 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1281 ret = false; 1282 goto out; 1283 } 1284 tid_agg_rx->started = true; 1285 } 1286 1287 /* frame with out of date sequence number */ 1288 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1289 dev_kfree_skb(skb); 1290 goto out; 1291 } 1292 1293 /* 1294 * If frame the sequence number exceeds our buffering window 1295 * size release some previous frames to make room for this one. 1296 */ 1297 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1298 head_seq_num = ieee80211_sn_inc( 1299 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1300 /* release stored frames up to new head to stack */ 1301 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1302 head_seq_num, frames); 1303 } 1304 1305 /* Now the new frame is always in the range of the reordering buffer */ 1306 1307 index = mpdu_seq_num % tid_agg_rx->buf_size; 1308 1309 /* check if we already stored this frame */ 1310 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1311 dev_kfree_skb(skb); 1312 goto out; 1313 } 1314 1315 /* 1316 * If the current MPDU is in the right order and nothing else 1317 * is stored we can process it directly, no need to buffer it. 1318 * If it is first but there's something stored, we may be able 1319 * to release frames after this one. 1320 */ 1321 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1322 tid_agg_rx->stored_mpdu_num == 0) { 1323 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1324 tid_agg_rx->head_seq_num = 1325 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1326 ret = false; 1327 goto out; 1328 } 1329 1330 /* put the frame in the reordering buffer */ 1331 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1332 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1333 tid_agg_rx->reorder_time[index] = jiffies; 1334 tid_agg_rx->stored_mpdu_num++; 1335 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1336 } 1337 1338 out: 1339 spin_unlock(&tid_agg_rx->reorder_lock); 1340 return ret; 1341 } 1342 1343 /* 1344 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1345 * true if the MPDU was buffered, false if it should be processed. 1346 */ 1347 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1348 struct sk_buff_head *frames) 1349 { 1350 struct sk_buff *skb = rx->skb; 1351 struct ieee80211_local *local = rx->local; 1352 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1353 struct sta_info *sta = rx->sta; 1354 struct tid_ampdu_rx *tid_agg_rx; 1355 u16 sc; 1356 u8 tid, ack_policy; 1357 1358 if (!ieee80211_is_data_qos(hdr->frame_control) || 1359 is_multicast_ether_addr(hdr->addr1)) 1360 goto dont_reorder; 1361 1362 /* 1363 * filter the QoS data rx stream according to 1364 * STA/TID and check if this STA/TID is on aggregation 1365 */ 1366 1367 if (!sta) 1368 goto dont_reorder; 1369 1370 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1371 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1372 tid = ieee80211_get_tid(hdr); 1373 1374 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1375 if (!tid_agg_rx) { 1376 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1377 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1378 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1379 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1380 WLAN_BACK_RECIPIENT, 1381 WLAN_REASON_QSTA_REQUIRE_SETUP); 1382 goto dont_reorder; 1383 } 1384 1385 /* qos null data frames are excluded */ 1386 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1387 goto dont_reorder; 1388 1389 /* not part of a BA session */ 1390 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1391 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1392 goto dont_reorder; 1393 1394 /* new, potentially un-ordered, ampdu frame - process it */ 1395 1396 /* reset session timer */ 1397 if (tid_agg_rx->timeout) 1398 tid_agg_rx->last_rx = jiffies; 1399 1400 /* if this mpdu is fragmented - terminate rx aggregation session */ 1401 sc = le16_to_cpu(hdr->seq_ctrl); 1402 if (sc & IEEE80211_SCTL_FRAG) { 1403 skb_queue_tail(&rx->sdata->skb_queue, skb); 1404 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1405 return; 1406 } 1407 1408 /* 1409 * No locking needed -- we will only ever process one 1410 * RX packet at a time, and thus own tid_agg_rx. All 1411 * other code manipulating it needs to (and does) make 1412 * sure that we cannot get to it any more before doing 1413 * anything with it. 1414 */ 1415 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1416 frames)) 1417 return; 1418 1419 dont_reorder: 1420 __skb_queue_tail(frames, skb); 1421 } 1422 1423 static ieee80211_rx_result debug_noinline 1424 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1425 { 1426 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1427 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1428 1429 if (status->flag & RX_FLAG_DUP_VALIDATED) 1430 return RX_CONTINUE; 1431 1432 /* 1433 * Drop duplicate 802.11 retransmissions 1434 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1435 */ 1436 1437 if (rx->skb->len < 24) 1438 return RX_CONTINUE; 1439 1440 if (ieee80211_is_ctl(hdr->frame_control) || 1441 ieee80211_is_any_nullfunc(hdr->frame_control) || 1442 is_multicast_ether_addr(hdr->addr1)) 1443 return RX_CONTINUE; 1444 1445 if (!rx->sta) 1446 return RX_CONTINUE; 1447 1448 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1449 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1450 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1451 rx->sta->rx_stats.num_duplicates++; 1452 return RX_DROP_UNUSABLE; 1453 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1454 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1455 } 1456 1457 return RX_CONTINUE; 1458 } 1459 1460 static ieee80211_rx_result debug_noinline 1461 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1462 { 1463 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1464 1465 /* Drop disallowed frame classes based on STA auth/assoc state; 1466 * IEEE 802.11, Chap 5.5. 1467 * 1468 * mac80211 filters only based on association state, i.e. it drops 1469 * Class 3 frames from not associated stations. hostapd sends 1470 * deauth/disassoc frames when needed. In addition, hostapd is 1471 * responsible for filtering on both auth and assoc states. 1472 */ 1473 1474 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1475 return ieee80211_rx_mesh_check(rx); 1476 1477 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1478 ieee80211_is_pspoll(hdr->frame_control)) && 1479 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1480 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1481 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1482 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1483 /* 1484 * accept port control frames from the AP even when it's not 1485 * yet marked ASSOC to prevent a race where we don't set the 1486 * assoc bit quickly enough before it sends the first frame 1487 */ 1488 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1489 ieee80211_is_data_present(hdr->frame_control)) { 1490 unsigned int hdrlen; 1491 __be16 ethertype; 1492 1493 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1494 1495 if (rx->skb->len < hdrlen + 8) 1496 return RX_DROP_MONITOR; 1497 1498 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1499 if (ethertype == rx->sdata->control_port_protocol) 1500 return RX_CONTINUE; 1501 } 1502 1503 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1504 cfg80211_rx_spurious_frame(rx->sdata->dev, 1505 hdr->addr2, 1506 GFP_ATOMIC)) 1507 return RX_DROP_UNUSABLE; 1508 1509 return RX_DROP_MONITOR; 1510 } 1511 1512 return RX_CONTINUE; 1513 } 1514 1515 1516 static ieee80211_rx_result debug_noinline 1517 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1518 { 1519 struct ieee80211_local *local; 1520 struct ieee80211_hdr *hdr; 1521 struct sk_buff *skb; 1522 1523 local = rx->local; 1524 skb = rx->skb; 1525 hdr = (struct ieee80211_hdr *) skb->data; 1526 1527 if (!local->pspolling) 1528 return RX_CONTINUE; 1529 1530 if (!ieee80211_has_fromds(hdr->frame_control)) 1531 /* this is not from AP */ 1532 return RX_CONTINUE; 1533 1534 if (!ieee80211_is_data(hdr->frame_control)) 1535 return RX_CONTINUE; 1536 1537 if (!ieee80211_has_moredata(hdr->frame_control)) { 1538 /* AP has no more frames buffered for us */ 1539 local->pspolling = false; 1540 return RX_CONTINUE; 1541 } 1542 1543 /* more data bit is set, let's request a new frame from the AP */ 1544 ieee80211_send_pspoll(local, rx->sdata); 1545 1546 return RX_CONTINUE; 1547 } 1548 1549 static void sta_ps_start(struct sta_info *sta) 1550 { 1551 struct ieee80211_sub_if_data *sdata = sta->sdata; 1552 struct ieee80211_local *local = sdata->local; 1553 struct ps_data *ps; 1554 int tid; 1555 1556 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1557 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1558 ps = &sdata->bss->ps; 1559 else 1560 return; 1561 1562 atomic_inc(&ps->num_sta_ps); 1563 set_sta_flag(sta, WLAN_STA_PS_STA); 1564 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1565 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1566 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1567 sta->sta.addr, sta->sta.aid); 1568 1569 ieee80211_clear_fast_xmit(sta); 1570 1571 if (!sta->sta.txq[0]) 1572 return; 1573 1574 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1575 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1576 struct txq_info *txqi = to_txq_info(txq); 1577 1578 spin_lock(&local->active_txq_lock[txq->ac]); 1579 if (!list_empty(&txqi->schedule_order)) 1580 list_del_init(&txqi->schedule_order); 1581 spin_unlock(&local->active_txq_lock[txq->ac]); 1582 1583 if (txq_has_queue(txq)) 1584 set_bit(tid, &sta->txq_buffered_tids); 1585 else 1586 clear_bit(tid, &sta->txq_buffered_tids); 1587 } 1588 } 1589 1590 static void sta_ps_end(struct sta_info *sta) 1591 { 1592 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1593 sta->sta.addr, sta->sta.aid); 1594 1595 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1596 /* 1597 * Clear the flag only if the other one is still set 1598 * so that the TX path won't start TX'ing new frames 1599 * directly ... In the case that the driver flag isn't 1600 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1601 */ 1602 clear_sta_flag(sta, WLAN_STA_PS_STA); 1603 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1604 sta->sta.addr, sta->sta.aid); 1605 return; 1606 } 1607 1608 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1609 clear_sta_flag(sta, WLAN_STA_PS_STA); 1610 ieee80211_sta_ps_deliver_wakeup(sta); 1611 } 1612 1613 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1614 { 1615 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1616 bool in_ps; 1617 1618 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1619 1620 /* Don't let the same PS state be set twice */ 1621 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1622 if ((start && in_ps) || (!start && !in_ps)) 1623 return -EINVAL; 1624 1625 if (start) 1626 sta_ps_start(sta); 1627 else 1628 sta_ps_end(sta); 1629 1630 return 0; 1631 } 1632 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1633 1634 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1635 { 1636 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1637 1638 if (test_sta_flag(sta, WLAN_STA_SP)) 1639 return; 1640 1641 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1642 ieee80211_sta_ps_deliver_poll_response(sta); 1643 else 1644 set_sta_flag(sta, WLAN_STA_PSPOLL); 1645 } 1646 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1647 1648 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1649 { 1650 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1651 int ac = ieee80211_ac_from_tid(tid); 1652 1653 /* 1654 * If this AC is not trigger-enabled do nothing unless the 1655 * driver is calling us after it already checked. 1656 * 1657 * NB: This could/should check a separate bitmap of trigger- 1658 * enabled queues, but for now we only implement uAPSD w/o 1659 * TSPEC changes to the ACs, so they're always the same. 1660 */ 1661 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1662 tid != IEEE80211_NUM_TIDS) 1663 return; 1664 1665 /* if we are in a service period, do nothing */ 1666 if (test_sta_flag(sta, WLAN_STA_SP)) 1667 return; 1668 1669 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1670 ieee80211_sta_ps_deliver_uapsd(sta); 1671 else 1672 set_sta_flag(sta, WLAN_STA_UAPSD); 1673 } 1674 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1675 1676 static ieee80211_rx_result debug_noinline 1677 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1678 { 1679 struct ieee80211_sub_if_data *sdata = rx->sdata; 1680 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1681 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1682 1683 if (!rx->sta) 1684 return RX_CONTINUE; 1685 1686 if (sdata->vif.type != NL80211_IFTYPE_AP && 1687 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1688 return RX_CONTINUE; 1689 1690 /* 1691 * The device handles station powersave, so don't do anything about 1692 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1693 * it to mac80211 since they're handled.) 1694 */ 1695 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1696 return RX_CONTINUE; 1697 1698 /* 1699 * Don't do anything if the station isn't already asleep. In 1700 * the uAPSD case, the station will probably be marked asleep, 1701 * in the PS-Poll case the station must be confused ... 1702 */ 1703 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1704 return RX_CONTINUE; 1705 1706 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1707 ieee80211_sta_pspoll(&rx->sta->sta); 1708 1709 /* Free PS Poll skb here instead of returning RX_DROP that would 1710 * count as an dropped frame. */ 1711 dev_kfree_skb(rx->skb); 1712 1713 return RX_QUEUED; 1714 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1715 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1716 ieee80211_has_pm(hdr->frame_control) && 1717 (ieee80211_is_data_qos(hdr->frame_control) || 1718 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1719 u8 tid = ieee80211_get_tid(hdr); 1720 1721 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1722 } 1723 1724 return RX_CONTINUE; 1725 } 1726 1727 static ieee80211_rx_result debug_noinline 1728 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1729 { 1730 struct sta_info *sta = rx->sta; 1731 struct sk_buff *skb = rx->skb; 1732 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1733 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1734 int i; 1735 1736 if (!sta) 1737 return RX_CONTINUE; 1738 1739 /* 1740 * Update last_rx only for IBSS packets which are for the current 1741 * BSSID and for station already AUTHORIZED to avoid keeping the 1742 * current IBSS network alive in cases where other STAs start 1743 * using different BSSID. This will also give the station another 1744 * chance to restart the authentication/authorization in case 1745 * something went wrong the first time. 1746 */ 1747 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1748 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1749 NL80211_IFTYPE_ADHOC); 1750 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1751 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1752 sta->rx_stats.last_rx = jiffies; 1753 if (ieee80211_is_data(hdr->frame_control) && 1754 !is_multicast_ether_addr(hdr->addr1)) 1755 sta->rx_stats.last_rate = 1756 sta_stats_encode_rate(status); 1757 } 1758 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1759 sta->rx_stats.last_rx = jiffies; 1760 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1761 is_multicast_ether_addr(hdr->addr1)) { 1762 /* 1763 * Mesh beacons will update last_rx when if they are found to 1764 * match the current local configuration when processed. 1765 */ 1766 sta->rx_stats.last_rx = jiffies; 1767 if (ieee80211_is_data(hdr->frame_control)) 1768 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1769 } 1770 1771 sta->rx_stats.fragments++; 1772 1773 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1774 sta->rx_stats.bytes += rx->skb->len; 1775 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1776 1777 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1778 sta->rx_stats.last_signal = status->signal; 1779 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1780 } 1781 1782 if (status->chains) { 1783 sta->rx_stats.chains = status->chains; 1784 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1785 int signal = status->chain_signal[i]; 1786 1787 if (!(status->chains & BIT(i))) 1788 continue; 1789 1790 sta->rx_stats.chain_signal_last[i] = signal; 1791 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1792 -signal); 1793 } 1794 } 1795 1796 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1797 return RX_CONTINUE; 1798 1799 /* 1800 * Change STA power saving mode only at the end of a frame 1801 * exchange sequence, and only for a data or management 1802 * frame as specified in IEEE 802.11-2016 11.2.3.2 1803 */ 1804 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1805 !ieee80211_has_morefrags(hdr->frame_control) && 1806 !is_multicast_ether_addr(hdr->addr1) && 1807 (ieee80211_is_mgmt(hdr->frame_control) || 1808 ieee80211_is_data(hdr->frame_control)) && 1809 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1810 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1811 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1812 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1813 if (!ieee80211_has_pm(hdr->frame_control)) 1814 sta_ps_end(sta); 1815 } else { 1816 if (ieee80211_has_pm(hdr->frame_control)) 1817 sta_ps_start(sta); 1818 } 1819 } 1820 1821 /* mesh power save support */ 1822 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1823 ieee80211_mps_rx_h_sta_process(sta, hdr); 1824 1825 /* 1826 * Drop (qos-)data::nullfunc frames silently, since they 1827 * are used only to control station power saving mode. 1828 */ 1829 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1830 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1831 1832 /* 1833 * If we receive a 4-addr nullfunc frame from a STA 1834 * that was not moved to a 4-addr STA vlan yet send 1835 * the event to userspace and for older hostapd drop 1836 * the frame to the monitor interface. 1837 */ 1838 if (ieee80211_has_a4(hdr->frame_control) && 1839 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1840 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1841 !rx->sdata->u.vlan.sta))) { 1842 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1843 cfg80211_rx_unexpected_4addr_frame( 1844 rx->sdata->dev, sta->sta.addr, 1845 GFP_ATOMIC); 1846 return RX_DROP_MONITOR; 1847 } 1848 /* 1849 * Update counter and free packet here to avoid 1850 * counting this as a dropped packed. 1851 */ 1852 sta->rx_stats.packets++; 1853 dev_kfree_skb(rx->skb); 1854 return RX_QUEUED; 1855 } 1856 1857 return RX_CONTINUE; 1858 } /* ieee80211_rx_h_sta_process */ 1859 1860 static struct ieee80211_key * 1861 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1862 { 1863 struct ieee80211_key *key = NULL; 1864 struct ieee80211_sub_if_data *sdata = rx->sdata; 1865 int idx2; 1866 1867 /* Make sure key gets set if either BIGTK key index is set so that 1868 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1869 * Beacon frames and Beacon frames that claim to use another BIGTK key 1870 * index (i.e., a key that we do not have). 1871 */ 1872 1873 if (idx < 0) { 1874 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1875 idx2 = idx + 1; 1876 } else { 1877 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1878 idx2 = idx + 1; 1879 else 1880 idx2 = idx - 1; 1881 } 1882 1883 if (rx->sta) 1884 key = rcu_dereference(rx->sta->gtk[idx]); 1885 if (!key) 1886 key = rcu_dereference(sdata->keys[idx]); 1887 if (!key && rx->sta) 1888 key = rcu_dereference(rx->sta->gtk[idx2]); 1889 if (!key) 1890 key = rcu_dereference(sdata->keys[idx2]); 1891 1892 return key; 1893 } 1894 1895 static ieee80211_rx_result debug_noinline 1896 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1897 { 1898 struct sk_buff *skb = rx->skb; 1899 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1900 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1901 int keyidx; 1902 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1903 struct ieee80211_key *sta_ptk = NULL; 1904 struct ieee80211_key *ptk_idx = NULL; 1905 int mmie_keyidx = -1; 1906 __le16 fc; 1907 const struct ieee80211_cipher_scheme *cs = NULL; 1908 1909 if (ieee80211_is_ext(hdr->frame_control)) 1910 return RX_CONTINUE; 1911 1912 /* 1913 * Key selection 101 1914 * 1915 * There are five types of keys: 1916 * - GTK (group keys) 1917 * - IGTK (group keys for management frames) 1918 * - BIGTK (group keys for Beacon frames) 1919 * - PTK (pairwise keys) 1920 * - STK (station-to-station pairwise keys) 1921 * 1922 * When selecting a key, we have to distinguish between multicast 1923 * (including broadcast) and unicast frames, the latter can only 1924 * use PTKs and STKs while the former always use GTKs, IGTKs, and 1925 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 1926 * then unicast frames can also use key indices like GTKs. Hence, if we 1927 * don't have a PTK/STK we check the key index for a WEP key. 1928 * 1929 * Note that in a regular BSS, multicast frames are sent by the 1930 * AP only, associated stations unicast the frame to the AP first 1931 * which then multicasts it on their behalf. 1932 * 1933 * There is also a slight problem in IBSS mode: GTKs are negotiated 1934 * with each station, that is something we don't currently handle. 1935 * The spec seems to expect that one negotiates the same key with 1936 * every station but there's no such requirement; VLANs could be 1937 * possible. 1938 */ 1939 1940 /* start without a key */ 1941 rx->key = NULL; 1942 fc = hdr->frame_control; 1943 1944 if (rx->sta) { 1945 int keyid = rx->sta->ptk_idx; 1946 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1947 1948 if (ieee80211_has_protected(fc)) { 1949 cs = rx->sta->cipher_scheme; 1950 keyid = ieee80211_get_keyid(rx->skb, cs); 1951 1952 if (unlikely(keyid < 0)) 1953 return RX_DROP_UNUSABLE; 1954 1955 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 1956 } 1957 } 1958 1959 if (!ieee80211_has_protected(fc)) 1960 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1961 1962 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1963 rx->key = ptk_idx ? ptk_idx : sta_ptk; 1964 if ((status->flag & RX_FLAG_DECRYPTED) && 1965 (status->flag & RX_FLAG_IV_STRIPPED)) 1966 return RX_CONTINUE; 1967 /* Skip decryption if the frame is not protected. */ 1968 if (!ieee80211_has_protected(fc)) 1969 return RX_CONTINUE; 1970 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 1971 /* Broadcast/multicast robust management frame / BIP */ 1972 if ((status->flag & RX_FLAG_DECRYPTED) && 1973 (status->flag & RX_FLAG_IV_STRIPPED)) 1974 return RX_CONTINUE; 1975 1976 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 1977 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 1978 NUM_DEFAULT_BEACON_KEYS) { 1979 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1980 skb->data, 1981 skb->len); 1982 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1983 } 1984 1985 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 1986 if (!rx->key) 1987 return RX_CONTINUE; /* Beacon protection not in use */ 1988 } else if (mmie_keyidx >= 0) { 1989 /* Broadcast/multicast robust management frame / BIP */ 1990 if ((status->flag & RX_FLAG_DECRYPTED) && 1991 (status->flag & RX_FLAG_IV_STRIPPED)) 1992 return RX_CONTINUE; 1993 1994 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1995 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1996 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1997 if (rx->sta) { 1998 if (ieee80211_is_group_privacy_action(skb) && 1999 test_sta_flag(rx->sta, WLAN_STA_MFP)) 2000 return RX_DROP_MONITOR; 2001 2002 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 2003 } 2004 if (!rx->key) 2005 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 2006 } else if (!ieee80211_has_protected(fc)) { 2007 /* 2008 * The frame was not protected, so skip decryption. However, we 2009 * need to set rx->key if there is a key that could have been 2010 * used so that the frame may be dropped if encryption would 2011 * have been expected. 2012 */ 2013 struct ieee80211_key *key = NULL; 2014 struct ieee80211_sub_if_data *sdata = rx->sdata; 2015 int i; 2016 2017 if (ieee80211_is_beacon(fc)) { 2018 key = ieee80211_rx_get_bigtk(rx, -1); 2019 } else if (ieee80211_is_mgmt(fc) && 2020 is_multicast_ether_addr(hdr->addr1)) { 2021 key = rcu_dereference(rx->sdata->default_mgmt_key); 2022 } else { 2023 if (rx->sta) { 2024 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2025 key = rcu_dereference(rx->sta->gtk[i]); 2026 if (key) 2027 break; 2028 } 2029 } 2030 if (!key) { 2031 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2032 key = rcu_dereference(sdata->keys[i]); 2033 if (key) 2034 break; 2035 } 2036 } 2037 } 2038 if (key) 2039 rx->key = key; 2040 return RX_CONTINUE; 2041 } else { 2042 /* 2043 * The device doesn't give us the IV so we won't be 2044 * able to look up the key. That's ok though, we 2045 * don't need to decrypt the frame, we just won't 2046 * be able to keep statistics accurate. 2047 * Except for key threshold notifications, should 2048 * we somehow allow the driver to tell us which key 2049 * the hardware used if this flag is set? 2050 */ 2051 if ((status->flag & RX_FLAG_DECRYPTED) && 2052 (status->flag & RX_FLAG_IV_STRIPPED)) 2053 return RX_CONTINUE; 2054 2055 keyidx = ieee80211_get_keyid(rx->skb, cs); 2056 2057 if (unlikely(keyidx < 0)) 2058 return RX_DROP_UNUSABLE; 2059 2060 /* check per-station GTK first, if multicast packet */ 2061 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 2062 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 2063 2064 /* if not found, try default key */ 2065 if (!rx->key) { 2066 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2067 2068 /* 2069 * RSNA-protected unicast frames should always be 2070 * sent with pairwise or station-to-station keys, 2071 * but for WEP we allow using a key index as well. 2072 */ 2073 if (rx->key && 2074 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2075 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2076 !is_multicast_ether_addr(hdr->addr1)) 2077 rx->key = NULL; 2078 } 2079 } 2080 2081 if (rx->key) { 2082 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2083 return RX_DROP_MONITOR; 2084 2085 /* TODO: add threshold stuff again */ 2086 } else { 2087 return RX_DROP_MONITOR; 2088 } 2089 2090 switch (rx->key->conf.cipher) { 2091 case WLAN_CIPHER_SUITE_WEP40: 2092 case WLAN_CIPHER_SUITE_WEP104: 2093 result = ieee80211_crypto_wep_decrypt(rx); 2094 break; 2095 case WLAN_CIPHER_SUITE_TKIP: 2096 result = ieee80211_crypto_tkip_decrypt(rx); 2097 break; 2098 case WLAN_CIPHER_SUITE_CCMP: 2099 result = ieee80211_crypto_ccmp_decrypt( 2100 rx, IEEE80211_CCMP_MIC_LEN); 2101 break; 2102 case WLAN_CIPHER_SUITE_CCMP_256: 2103 result = ieee80211_crypto_ccmp_decrypt( 2104 rx, IEEE80211_CCMP_256_MIC_LEN); 2105 break; 2106 case WLAN_CIPHER_SUITE_AES_CMAC: 2107 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2108 break; 2109 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2110 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2111 break; 2112 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2113 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2114 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2115 break; 2116 case WLAN_CIPHER_SUITE_GCMP: 2117 case WLAN_CIPHER_SUITE_GCMP_256: 2118 result = ieee80211_crypto_gcmp_decrypt(rx); 2119 break; 2120 default: 2121 result = ieee80211_crypto_hw_decrypt(rx); 2122 } 2123 2124 /* the hdr variable is invalid after the decrypt handlers */ 2125 2126 /* either the frame has been decrypted or will be dropped */ 2127 status->flag |= RX_FLAG_DECRYPTED; 2128 2129 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) 2130 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2131 skb->data, skb->len); 2132 2133 return result; 2134 } 2135 2136 static inline struct ieee80211_fragment_entry * 2137 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 2138 unsigned int frag, unsigned int seq, int rx_queue, 2139 struct sk_buff **skb) 2140 { 2141 struct ieee80211_fragment_entry *entry; 2142 2143 entry = &sdata->fragments[sdata->fragment_next++]; 2144 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 2145 sdata->fragment_next = 0; 2146 2147 if (!skb_queue_empty(&entry->skb_list)) 2148 __skb_queue_purge(&entry->skb_list); 2149 2150 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2151 *skb = NULL; 2152 entry->first_frag_time = jiffies; 2153 entry->seq = seq; 2154 entry->rx_queue = rx_queue; 2155 entry->last_frag = frag; 2156 entry->check_sequential_pn = false; 2157 entry->extra_len = 0; 2158 2159 return entry; 2160 } 2161 2162 static inline struct ieee80211_fragment_entry * 2163 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 2164 unsigned int frag, unsigned int seq, 2165 int rx_queue, struct ieee80211_hdr *hdr) 2166 { 2167 struct ieee80211_fragment_entry *entry; 2168 int i, idx; 2169 2170 idx = sdata->fragment_next; 2171 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2172 struct ieee80211_hdr *f_hdr; 2173 struct sk_buff *f_skb; 2174 2175 idx--; 2176 if (idx < 0) 2177 idx = IEEE80211_FRAGMENT_MAX - 1; 2178 2179 entry = &sdata->fragments[idx]; 2180 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2181 entry->rx_queue != rx_queue || 2182 entry->last_frag + 1 != frag) 2183 continue; 2184 2185 f_skb = __skb_peek(&entry->skb_list); 2186 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2187 2188 /* 2189 * Check ftype and addresses are equal, else check next fragment 2190 */ 2191 if (((hdr->frame_control ^ f_hdr->frame_control) & 2192 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2193 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2194 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2195 continue; 2196 2197 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2198 __skb_queue_purge(&entry->skb_list); 2199 continue; 2200 } 2201 return entry; 2202 } 2203 2204 return NULL; 2205 } 2206 2207 static ieee80211_rx_result debug_noinline 2208 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2209 { 2210 struct ieee80211_hdr *hdr; 2211 u16 sc; 2212 __le16 fc; 2213 unsigned int frag, seq; 2214 struct ieee80211_fragment_entry *entry; 2215 struct sk_buff *skb; 2216 2217 hdr = (struct ieee80211_hdr *)rx->skb->data; 2218 fc = hdr->frame_control; 2219 2220 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2221 return RX_CONTINUE; 2222 2223 sc = le16_to_cpu(hdr->seq_ctrl); 2224 frag = sc & IEEE80211_SCTL_FRAG; 2225 2226 if (is_multicast_ether_addr(hdr->addr1)) { 2227 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 2228 goto out_no_led; 2229 } 2230 2231 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2232 goto out; 2233 2234 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2235 2236 if (skb_linearize(rx->skb)) 2237 return RX_DROP_UNUSABLE; 2238 2239 /* 2240 * skb_linearize() might change the skb->data and 2241 * previously cached variables (in this case, hdr) need to 2242 * be refreshed with the new data. 2243 */ 2244 hdr = (struct ieee80211_hdr *)rx->skb->data; 2245 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2246 2247 if (frag == 0) { 2248 /* This is the first fragment of a new frame. */ 2249 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 2250 rx->seqno_idx, &(rx->skb)); 2251 if (rx->key && 2252 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2253 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2254 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2255 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2256 ieee80211_has_protected(fc)) { 2257 int queue = rx->security_idx; 2258 2259 /* Store CCMP/GCMP PN so that we can verify that the 2260 * next fragment has a sequential PN value. 2261 */ 2262 entry->check_sequential_pn = true; 2263 memcpy(entry->last_pn, 2264 rx->key->u.ccmp.rx_pn[queue], 2265 IEEE80211_CCMP_PN_LEN); 2266 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2267 u.ccmp.rx_pn) != 2268 offsetof(struct ieee80211_key, 2269 u.gcmp.rx_pn)); 2270 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2271 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2272 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2273 IEEE80211_GCMP_PN_LEN); 2274 } 2275 return RX_QUEUED; 2276 } 2277 2278 /* This is a fragment for a frame that should already be pending in 2279 * fragment cache. Add this fragment to the end of the pending entry. 2280 */ 2281 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 2282 rx->seqno_idx, hdr); 2283 if (!entry) { 2284 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2285 return RX_DROP_MONITOR; 2286 } 2287 2288 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2289 * MPDU PN values are not incrementing in steps of 1." 2290 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2291 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2292 */ 2293 if (entry->check_sequential_pn) { 2294 int i; 2295 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2296 int queue; 2297 2298 if (!rx->key || 2299 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 2300 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 2301 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 2302 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 2303 return RX_DROP_UNUSABLE; 2304 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2305 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2306 pn[i]++; 2307 if (pn[i]) 2308 break; 2309 } 2310 queue = rx->security_idx; 2311 rpn = rx->key->u.ccmp.rx_pn[queue]; 2312 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2313 return RX_DROP_UNUSABLE; 2314 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2315 } 2316 2317 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2318 __skb_queue_tail(&entry->skb_list, rx->skb); 2319 entry->last_frag = frag; 2320 entry->extra_len += rx->skb->len; 2321 if (ieee80211_has_morefrags(fc)) { 2322 rx->skb = NULL; 2323 return RX_QUEUED; 2324 } 2325 2326 rx->skb = __skb_dequeue(&entry->skb_list); 2327 if (skb_tailroom(rx->skb) < entry->extra_len) { 2328 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2329 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2330 GFP_ATOMIC))) { 2331 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2332 __skb_queue_purge(&entry->skb_list); 2333 return RX_DROP_UNUSABLE; 2334 } 2335 } 2336 while ((skb = __skb_dequeue(&entry->skb_list))) { 2337 skb_put_data(rx->skb, skb->data, skb->len); 2338 dev_kfree_skb(skb); 2339 } 2340 2341 out: 2342 ieee80211_led_rx(rx->local); 2343 out_no_led: 2344 if (rx->sta) 2345 rx->sta->rx_stats.packets++; 2346 return RX_CONTINUE; 2347 } 2348 2349 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2350 { 2351 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2352 return -EACCES; 2353 2354 return 0; 2355 } 2356 2357 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2358 { 2359 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 2360 struct sk_buff *skb = rx->skb; 2361 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2362 2363 /* 2364 * Pass through unencrypted frames if the hardware has 2365 * decrypted them already. 2366 */ 2367 if (status->flag & RX_FLAG_DECRYPTED) 2368 return 0; 2369 2370 /* check mesh EAPOL frames first */ 2371 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && 2372 ieee80211_is_data(fc))) { 2373 struct ieee80211s_hdr *mesh_hdr; 2374 u16 hdr_len = ieee80211_hdrlen(fc); 2375 u16 ethertype_offset; 2376 __be16 ethertype; 2377 2378 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) 2379 goto drop_check; 2380 2381 /* make sure fixed part of mesh header is there, also checks skb len */ 2382 if (!pskb_may_pull(rx->skb, hdr_len + 6)) 2383 goto drop_check; 2384 2385 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); 2386 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + 2387 sizeof(rfc1042_header); 2388 2389 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && 2390 ethertype == rx->sdata->control_port_protocol) 2391 return 0; 2392 } 2393 2394 drop_check: 2395 /* Drop unencrypted frames if key is set. */ 2396 if (unlikely(!ieee80211_has_protected(fc) && 2397 !ieee80211_is_any_nullfunc(fc) && 2398 ieee80211_is_data(fc) && rx->key)) 2399 return -EACCES; 2400 2401 return 0; 2402 } 2403 2404 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2405 { 2406 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2407 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2408 __le16 fc = hdr->frame_control; 2409 2410 /* 2411 * Pass through unencrypted frames if the hardware has 2412 * decrypted them already. 2413 */ 2414 if (status->flag & RX_FLAG_DECRYPTED) 2415 return 0; 2416 2417 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2418 if (unlikely(!ieee80211_has_protected(fc) && 2419 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2420 rx->key)) { 2421 if (ieee80211_is_deauth(fc) || 2422 ieee80211_is_disassoc(fc)) 2423 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2424 rx->skb->data, 2425 rx->skb->len); 2426 return -EACCES; 2427 } 2428 /* BIP does not use Protected field, so need to check MMIE */ 2429 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2430 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2431 if (ieee80211_is_deauth(fc) || 2432 ieee80211_is_disassoc(fc)) 2433 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2434 rx->skb->data, 2435 rx->skb->len); 2436 return -EACCES; 2437 } 2438 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2439 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2440 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2441 rx->skb->data, 2442 rx->skb->len); 2443 return -EACCES; 2444 } 2445 /* 2446 * When using MFP, Action frames are not allowed prior to 2447 * having configured keys. 2448 */ 2449 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2450 ieee80211_is_robust_mgmt_frame(rx->skb))) 2451 return -EACCES; 2452 } 2453 2454 return 0; 2455 } 2456 2457 static int 2458 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2459 { 2460 struct ieee80211_sub_if_data *sdata = rx->sdata; 2461 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2462 bool check_port_control = false; 2463 struct ethhdr *ehdr; 2464 int ret; 2465 2466 *port_control = false; 2467 if (ieee80211_has_a4(hdr->frame_control) && 2468 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2469 return -1; 2470 2471 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2472 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2473 2474 if (!sdata->u.mgd.use_4addr) 2475 return -1; 2476 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2477 check_port_control = true; 2478 } 2479 2480 if (is_multicast_ether_addr(hdr->addr1) && 2481 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2482 return -1; 2483 2484 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2485 if (ret < 0) 2486 return ret; 2487 2488 ehdr = (struct ethhdr *) rx->skb->data; 2489 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2490 *port_control = true; 2491 else if (check_port_control) 2492 return -1; 2493 2494 return 0; 2495 } 2496 2497 /* 2498 * requires that rx->skb is a frame with ethernet header 2499 */ 2500 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2501 { 2502 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2503 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2504 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2505 2506 /* 2507 * Allow EAPOL frames to us/the PAE group address regardless 2508 * of whether the frame was encrypted or not. 2509 */ 2510 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2511 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2512 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2513 return true; 2514 2515 if (ieee80211_802_1x_port_control(rx) || 2516 ieee80211_drop_unencrypted(rx, fc)) 2517 return false; 2518 2519 return true; 2520 } 2521 2522 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2523 struct ieee80211_rx_data *rx) 2524 { 2525 struct ieee80211_sub_if_data *sdata = rx->sdata; 2526 struct net_device *dev = sdata->dev; 2527 2528 if (unlikely((skb->protocol == sdata->control_port_protocol || 2529 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2530 !sdata->control_port_no_preauth)) && 2531 sdata->control_port_over_nl80211)) { 2532 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2533 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2534 2535 cfg80211_rx_control_port(dev, skb, noencrypt); 2536 dev_kfree_skb(skb); 2537 } else { 2538 memset(skb->cb, 0, sizeof(skb->cb)); 2539 2540 /* deliver to local stack */ 2541 if (rx->list) 2542 list_add_tail(&skb->list, rx->list); 2543 else 2544 netif_receive_skb(skb); 2545 } 2546 } 2547 2548 /* 2549 * requires that rx->skb is a frame with ethernet header 2550 */ 2551 static void 2552 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2553 { 2554 struct ieee80211_sub_if_data *sdata = rx->sdata; 2555 struct net_device *dev = sdata->dev; 2556 struct sk_buff *skb, *xmit_skb; 2557 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2558 struct sta_info *dsta; 2559 2560 skb = rx->skb; 2561 xmit_skb = NULL; 2562 2563 ieee80211_rx_stats(dev, skb->len); 2564 2565 if (rx->sta) { 2566 /* The seqno index has the same property as needed 2567 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2568 * for non-QoS-data frames. Here we know it's a data 2569 * frame, so count MSDUs. 2570 */ 2571 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2572 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2573 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2574 } 2575 2576 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2577 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2578 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2579 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2580 if (is_multicast_ether_addr(ehdr->h_dest) && 2581 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2582 /* 2583 * send multicast frames both to higher layers in 2584 * local net stack and back to the wireless medium 2585 */ 2586 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2587 if (!xmit_skb) 2588 net_info_ratelimited("%s: failed to clone multicast frame\n", 2589 dev->name); 2590 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2591 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2592 dsta = sta_info_get(sdata, ehdr->h_dest); 2593 if (dsta) { 2594 /* 2595 * The destination station is associated to 2596 * this AP (in this VLAN), so send the frame 2597 * directly to it and do not pass it to local 2598 * net stack. 2599 */ 2600 xmit_skb = skb; 2601 skb = NULL; 2602 } 2603 } 2604 } 2605 2606 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2607 if (skb) { 2608 /* 'align' will only take the values 0 or 2 here since all 2609 * frames are required to be aligned to 2-byte boundaries 2610 * when being passed to mac80211; the code here works just 2611 * as well if that isn't true, but mac80211 assumes it can 2612 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2613 */ 2614 int align; 2615 2616 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2617 if (align) { 2618 if (WARN_ON(skb_headroom(skb) < 3)) { 2619 dev_kfree_skb(skb); 2620 skb = NULL; 2621 } else { 2622 u8 *data = skb->data; 2623 size_t len = skb_headlen(skb); 2624 skb->data -= align; 2625 memmove(skb->data, data, len); 2626 skb_set_tail_pointer(skb, len); 2627 } 2628 } 2629 } 2630 #endif 2631 2632 if (skb) { 2633 skb->protocol = eth_type_trans(skb, dev); 2634 ieee80211_deliver_skb_to_local_stack(skb, rx); 2635 } 2636 2637 if (xmit_skb) { 2638 /* 2639 * Send to wireless media and increase priority by 256 to 2640 * keep the received priority instead of reclassifying 2641 * the frame (see cfg80211_classify8021d). 2642 */ 2643 xmit_skb->priority += 256; 2644 xmit_skb->protocol = htons(ETH_P_802_3); 2645 skb_reset_network_header(xmit_skb); 2646 skb_reset_mac_header(xmit_skb); 2647 dev_queue_xmit(xmit_skb); 2648 } 2649 } 2650 2651 static ieee80211_rx_result debug_noinline 2652 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2653 { 2654 struct net_device *dev = rx->sdata->dev; 2655 struct sk_buff *skb = rx->skb; 2656 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2657 __le16 fc = hdr->frame_control; 2658 struct sk_buff_head frame_list; 2659 struct ethhdr ethhdr; 2660 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2661 2662 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2663 check_da = NULL; 2664 check_sa = NULL; 2665 } else switch (rx->sdata->vif.type) { 2666 case NL80211_IFTYPE_AP: 2667 case NL80211_IFTYPE_AP_VLAN: 2668 check_da = NULL; 2669 break; 2670 case NL80211_IFTYPE_STATION: 2671 if (!rx->sta || 2672 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2673 check_sa = NULL; 2674 break; 2675 case NL80211_IFTYPE_MESH_POINT: 2676 check_sa = NULL; 2677 break; 2678 default: 2679 break; 2680 } 2681 2682 skb->dev = dev; 2683 __skb_queue_head_init(&frame_list); 2684 2685 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2686 rx->sdata->vif.addr, 2687 rx->sdata->vif.type, 2688 data_offset)) 2689 return RX_DROP_UNUSABLE; 2690 2691 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2692 rx->sdata->vif.type, 2693 rx->local->hw.extra_tx_headroom, 2694 check_da, check_sa); 2695 2696 while (!skb_queue_empty(&frame_list)) { 2697 rx->skb = __skb_dequeue(&frame_list); 2698 2699 if (!ieee80211_frame_allowed(rx, fc)) { 2700 dev_kfree_skb(rx->skb); 2701 continue; 2702 } 2703 2704 ieee80211_deliver_skb(rx); 2705 } 2706 2707 return RX_QUEUED; 2708 } 2709 2710 static ieee80211_rx_result debug_noinline 2711 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2712 { 2713 struct sk_buff *skb = rx->skb; 2714 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2715 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2716 __le16 fc = hdr->frame_control; 2717 2718 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2719 return RX_CONTINUE; 2720 2721 if (unlikely(!ieee80211_is_data(fc))) 2722 return RX_CONTINUE; 2723 2724 if (unlikely(!ieee80211_is_data_present(fc))) 2725 return RX_DROP_MONITOR; 2726 2727 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2728 switch (rx->sdata->vif.type) { 2729 case NL80211_IFTYPE_AP_VLAN: 2730 if (!rx->sdata->u.vlan.sta) 2731 return RX_DROP_UNUSABLE; 2732 break; 2733 case NL80211_IFTYPE_STATION: 2734 if (!rx->sdata->u.mgd.use_4addr) 2735 return RX_DROP_UNUSABLE; 2736 break; 2737 default: 2738 return RX_DROP_UNUSABLE; 2739 } 2740 } 2741 2742 if (is_multicast_ether_addr(hdr->addr1)) 2743 return RX_DROP_UNUSABLE; 2744 2745 return __ieee80211_rx_h_amsdu(rx, 0); 2746 } 2747 2748 #ifdef CONFIG_MAC80211_MESH 2749 static ieee80211_rx_result 2750 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2751 { 2752 struct ieee80211_hdr *fwd_hdr, *hdr; 2753 struct ieee80211_tx_info *info; 2754 struct ieee80211s_hdr *mesh_hdr; 2755 struct sk_buff *skb = rx->skb, *fwd_skb; 2756 struct ieee80211_local *local = rx->local; 2757 struct ieee80211_sub_if_data *sdata = rx->sdata; 2758 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2759 u16 ac, q, hdrlen; 2760 int tailroom = 0; 2761 2762 hdr = (struct ieee80211_hdr *) skb->data; 2763 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2764 2765 /* make sure fixed part of mesh header is there, also checks skb len */ 2766 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2767 return RX_DROP_MONITOR; 2768 2769 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2770 2771 /* make sure full mesh header is there, also checks skb len */ 2772 if (!pskb_may_pull(rx->skb, 2773 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2774 return RX_DROP_MONITOR; 2775 2776 /* reload pointers */ 2777 hdr = (struct ieee80211_hdr *) skb->data; 2778 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2779 2780 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2781 return RX_DROP_MONITOR; 2782 2783 /* frame is in RMC, don't forward */ 2784 if (ieee80211_is_data(hdr->frame_control) && 2785 is_multicast_ether_addr(hdr->addr1) && 2786 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2787 return RX_DROP_MONITOR; 2788 2789 if (!ieee80211_is_data(hdr->frame_control)) 2790 return RX_CONTINUE; 2791 2792 if (!mesh_hdr->ttl) 2793 return RX_DROP_MONITOR; 2794 2795 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2796 struct mesh_path *mppath; 2797 char *proxied_addr; 2798 char *mpp_addr; 2799 2800 if (is_multicast_ether_addr(hdr->addr1)) { 2801 mpp_addr = hdr->addr3; 2802 proxied_addr = mesh_hdr->eaddr1; 2803 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2804 MESH_FLAGS_AE_A5_A6) { 2805 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2806 mpp_addr = hdr->addr4; 2807 proxied_addr = mesh_hdr->eaddr2; 2808 } else { 2809 return RX_DROP_MONITOR; 2810 } 2811 2812 rcu_read_lock(); 2813 mppath = mpp_path_lookup(sdata, proxied_addr); 2814 if (!mppath) { 2815 mpp_path_add(sdata, proxied_addr, mpp_addr); 2816 } else { 2817 spin_lock_bh(&mppath->state_lock); 2818 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2819 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2820 mppath->exp_time = jiffies; 2821 spin_unlock_bh(&mppath->state_lock); 2822 } 2823 rcu_read_unlock(); 2824 } 2825 2826 /* Frame has reached destination. Don't forward */ 2827 if (!is_multicast_ether_addr(hdr->addr1) && 2828 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2829 return RX_CONTINUE; 2830 2831 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2832 q = sdata->vif.hw_queue[ac]; 2833 if (ieee80211_queue_stopped(&local->hw, q)) { 2834 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2835 return RX_DROP_MONITOR; 2836 } 2837 skb_set_queue_mapping(skb, q); 2838 2839 if (!--mesh_hdr->ttl) { 2840 if (!is_multicast_ether_addr(hdr->addr1)) 2841 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, 2842 dropped_frames_ttl); 2843 goto out; 2844 } 2845 2846 if (!ifmsh->mshcfg.dot11MeshForwarding) 2847 goto out; 2848 2849 if (sdata->crypto_tx_tailroom_needed_cnt) 2850 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2851 2852 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2853 sdata->encrypt_headroom, 2854 tailroom, GFP_ATOMIC); 2855 if (!fwd_skb) 2856 goto out; 2857 2858 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2859 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2860 info = IEEE80211_SKB_CB(fwd_skb); 2861 memset(info, 0, sizeof(*info)); 2862 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 2863 info->control.vif = &rx->sdata->vif; 2864 info->control.jiffies = jiffies; 2865 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2866 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2867 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2868 /* update power mode indication when forwarding */ 2869 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2870 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2871 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2872 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2873 } else { 2874 /* unable to resolve next hop */ 2875 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2876 fwd_hdr->addr3, 0, 2877 WLAN_REASON_MESH_PATH_NOFORWARD, 2878 fwd_hdr->addr2); 2879 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2880 kfree_skb(fwd_skb); 2881 return RX_DROP_MONITOR; 2882 } 2883 2884 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2885 ieee80211_add_pending_skb(local, fwd_skb); 2886 out: 2887 if (is_multicast_ether_addr(hdr->addr1)) 2888 return RX_CONTINUE; 2889 return RX_DROP_MONITOR; 2890 } 2891 #endif 2892 2893 static ieee80211_rx_result debug_noinline 2894 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2895 { 2896 struct ieee80211_sub_if_data *sdata = rx->sdata; 2897 struct ieee80211_local *local = rx->local; 2898 struct net_device *dev = sdata->dev; 2899 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2900 __le16 fc = hdr->frame_control; 2901 bool port_control; 2902 int err; 2903 2904 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2905 return RX_CONTINUE; 2906 2907 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2908 return RX_DROP_MONITOR; 2909 2910 /* 2911 * Send unexpected-4addr-frame event to hostapd. For older versions, 2912 * also drop the frame to cooked monitor interfaces. 2913 */ 2914 if (ieee80211_has_a4(hdr->frame_control) && 2915 sdata->vif.type == NL80211_IFTYPE_AP) { 2916 if (rx->sta && 2917 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2918 cfg80211_rx_unexpected_4addr_frame( 2919 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2920 return RX_DROP_MONITOR; 2921 } 2922 2923 err = __ieee80211_data_to_8023(rx, &port_control); 2924 if (unlikely(err)) 2925 return RX_DROP_UNUSABLE; 2926 2927 if (!ieee80211_frame_allowed(rx, fc)) 2928 return RX_DROP_MONITOR; 2929 2930 /* directly handle TDLS channel switch requests/responses */ 2931 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2932 cpu_to_be16(ETH_P_TDLS))) { 2933 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2934 2935 if (pskb_may_pull(rx->skb, 2936 offsetof(struct ieee80211_tdls_data, u)) && 2937 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2938 tf->category == WLAN_CATEGORY_TDLS && 2939 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2940 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2941 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2942 schedule_work(&local->tdls_chsw_work); 2943 if (rx->sta) 2944 rx->sta->rx_stats.packets++; 2945 2946 return RX_QUEUED; 2947 } 2948 } 2949 2950 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2951 unlikely(port_control) && sdata->bss) { 2952 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2953 u.ap); 2954 dev = sdata->dev; 2955 rx->sdata = sdata; 2956 } 2957 2958 rx->skb->dev = dev; 2959 2960 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 2961 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2962 !is_multicast_ether_addr( 2963 ((struct ethhdr *)rx->skb->data)->h_dest) && 2964 (!local->scanning && 2965 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 2966 mod_timer(&local->dynamic_ps_timer, jiffies + 2967 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2968 2969 ieee80211_deliver_skb(rx); 2970 2971 return RX_QUEUED; 2972 } 2973 2974 static ieee80211_rx_result debug_noinline 2975 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2976 { 2977 struct sk_buff *skb = rx->skb; 2978 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2979 struct tid_ampdu_rx *tid_agg_rx; 2980 u16 start_seq_num; 2981 u16 tid; 2982 2983 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2984 return RX_CONTINUE; 2985 2986 if (ieee80211_is_back_req(bar->frame_control)) { 2987 struct { 2988 __le16 control, start_seq_num; 2989 } __packed bar_data; 2990 struct ieee80211_event event = { 2991 .type = BAR_RX_EVENT, 2992 }; 2993 2994 if (!rx->sta) 2995 return RX_DROP_MONITOR; 2996 2997 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2998 &bar_data, sizeof(bar_data))) 2999 return RX_DROP_MONITOR; 3000 3001 tid = le16_to_cpu(bar_data.control) >> 12; 3002 3003 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3004 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3005 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3006 WLAN_BACK_RECIPIENT, 3007 WLAN_REASON_QSTA_REQUIRE_SETUP); 3008 3009 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3010 if (!tid_agg_rx) 3011 return RX_DROP_MONITOR; 3012 3013 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3014 event.u.ba.tid = tid; 3015 event.u.ba.ssn = start_seq_num; 3016 event.u.ba.sta = &rx->sta->sta; 3017 3018 /* reset session timer */ 3019 if (tid_agg_rx->timeout) 3020 mod_timer(&tid_agg_rx->session_timer, 3021 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3022 3023 spin_lock(&tid_agg_rx->reorder_lock); 3024 /* release stored frames up to start of BAR */ 3025 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3026 start_seq_num, frames); 3027 spin_unlock(&tid_agg_rx->reorder_lock); 3028 3029 drv_event_callback(rx->local, rx->sdata, &event); 3030 3031 kfree_skb(skb); 3032 return RX_QUEUED; 3033 } 3034 3035 /* 3036 * After this point, we only want management frames, 3037 * so we can drop all remaining control frames to 3038 * cooked monitor interfaces. 3039 */ 3040 return RX_DROP_MONITOR; 3041 } 3042 3043 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3044 struct ieee80211_mgmt *mgmt, 3045 size_t len) 3046 { 3047 struct ieee80211_local *local = sdata->local; 3048 struct sk_buff *skb; 3049 struct ieee80211_mgmt *resp; 3050 3051 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3052 /* Not to own unicast address */ 3053 return; 3054 } 3055 3056 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 3057 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 3058 /* Not from the current AP or not associated yet. */ 3059 return; 3060 } 3061 3062 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3063 /* Too short SA Query request frame */ 3064 return; 3065 } 3066 3067 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3068 if (skb == NULL) 3069 return; 3070 3071 skb_reserve(skb, local->hw.extra_tx_headroom); 3072 resp = skb_put_zero(skb, 24); 3073 memcpy(resp->da, mgmt->sa, ETH_ALEN); 3074 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3075 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 3076 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3077 IEEE80211_STYPE_ACTION); 3078 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3079 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3080 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3081 memcpy(resp->u.action.u.sa_query.trans_id, 3082 mgmt->u.action.u.sa_query.trans_id, 3083 WLAN_SA_QUERY_TR_ID_LEN); 3084 3085 ieee80211_tx_skb(sdata, skb); 3086 } 3087 3088 static ieee80211_rx_result debug_noinline 3089 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3090 { 3091 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3092 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3093 3094 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3095 return RX_CONTINUE; 3096 3097 /* 3098 * From here on, look only at management frames. 3099 * Data and control frames are already handled, 3100 * and unknown (reserved) frames are useless. 3101 */ 3102 if (rx->skb->len < 24) 3103 return RX_DROP_MONITOR; 3104 3105 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3106 return RX_DROP_MONITOR; 3107 3108 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3109 ieee80211_is_beacon(mgmt->frame_control) && 3110 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3111 int sig = 0; 3112 3113 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3114 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3115 sig = status->signal; 3116 3117 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3118 rx->skb->data, rx->skb->len, 3119 ieee80211_rx_status_to_khz(status), 3120 sig); 3121 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3122 } 3123 3124 if (ieee80211_drop_unencrypted_mgmt(rx)) 3125 return RX_DROP_UNUSABLE; 3126 3127 return RX_CONTINUE; 3128 } 3129 3130 static ieee80211_rx_result debug_noinline 3131 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3132 { 3133 struct ieee80211_local *local = rx->local; 3134 struct ieee80211_sub_if_data *sdata = rx->sdata; 3135 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3136 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3137 int len = rx->skb->len; 3138 3139 if (!ieee80211_is_action(mgmt->frame_control)) 3140 return RX_CONTINUE; 3141 3142 /* drop too small frames */ 3143 if (len < IEEE80211_MIN_ACTION_SIZE) 3144 return RX_DROP_UNUSABLE; 3145 3146 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3147 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3148 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3149 return RX_DROP_UNUSABLE; 3150 3151 switch (mgmt->u.action.category) { 3152 case WLAN_CATEGORY_HT: 3153 /* reject HT action frames from stations not supporting HT */ 3154 if (!rx->sta->sta.ht_cap.ht_supported) 3155 goto invalid; 3156 3157 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3158 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3159 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3160 sdata->vif.type != NL80211_IFTYPE_AP && 3161 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3162 break; 3163 3164 /* verify action & smps_control/chanwidth are present */ 3165 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3166 goto invalid; 3167 3168 switch (mgmt->u.action.u.ht_smps.action) { 3169 case WLAN_HT_ACTION_SMPS: { 3170 struct ieee80211_supported_band *sband; 3171 enum ieee80211_smps_mode smps_mode; 3172 struct sta_opmode_info sta_opmode = {}; 3173 3174 if (sdata->vif.type != NL80211_IFTYPE_AP && 3175 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3176 goto handled; 3177 3178 /* convert to HT capability */ 3179 switch (mgmt->u.action.u.ht_smps.smps_control) { 3180 case WLAN_HT_SMPS_CONTROL_DISABLED: 3181 smps_mode = IEEE80211_SMPS_OFF; 3182 break; 3183 case WLAN_HT_SMPS_CONTROL_STATIC: 3184 smps_mode = IEEE80211_SMPS_STATIC; 3185 break; 3186 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3187 smps_mode = IEEE80211_SMPS_DYNAMIC; 3188 break; 3189 default: 3190 goto invalid; 3191 } 3192 3193 /* if no change do nothing */ 3194 if (rx->sta->sta.smps_mode == smps_mode) 3195 goto handled; 3196 rx->sta->sta.smps_mode = smps_mode; 3197 sta_opmode.smps_mode = 3198 ieee80211_smps_mode_to_smps_mode(smps_mode); 3199 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3200 3201 sband = rx->local->hw.wiphy->bands[status->band]; 3202 3203 rate_control_rate_update(local, sband, rx->sta, 3204 IEEE80211_RC_SMPS_CHANGED); 3205 cfg80211_sta_opmode_change_notify(sdata->dev, 3206 rx->sta->addr, 3207 &sta_opmode, 3208 GFP_ATOMIC); 3209 goto handled; 3210 } 3211 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3212 struct ieee80211_supported_band *sband; 3213 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3214 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3215 struct sta_opmode_info sta_opmode = {}; 3216 3217 /* If it doesn't support 40 MHz it can't change ... */ 3218 if (!(rx->sta->sta.ht_cap.cap & 3219 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3220 goto handled; 3221 3222 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3223 max_bw = IEEE80211_STA_RX_BW_20; 3224 else 3225 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3226 3227 /* set cur_max_bandwidth and recalc sta bw */ 3228 rx->sta->cur_max_bandwidth = max_bw; 3229 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3230 3231 if (rx->sta->sta.bandwidth == new_bw) 3232 goto handled; 3233 3234 rx->sta->sta.bandwidth = new_bw; 3235 sband = rx->local->hw.wiphy->bands[status->band]; 3236 sta_opmode.bw = 3237 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3238 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3239 3240 rate_control_rate_update(local, sband, rx->sta, 3241 IEEE80211_RC_BW_CHANGED); 3242 cfg80211_sta_opmode_change_notify(sdata->dev, 3243 rx->sta->addr, 3244 &sta_opmode, 3245 GFP_ATOMIC); 3246 goto handled; 3247 } 3248 default: 3249 goto invalid; 3250 } 3251 3252 break; 3253 case WLAN_CATEGORY_PUBLIC: 3254 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3255 goto invalid; 3256 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3257 break; 3258 if (!rx->sta) 3259 break; 3260 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3261 break; 3262 if (mgmt->u.action.u.ext_chan_switch.action_code != 3263 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3264 break; 3265 if (len < offsetof(struct ieee80211_mgmt, 3266 u.action.u.ext_chan_switch.variable)) 3267 goto invalid; 3268 goto queue; 3269 case WLAN_CATEGORY_VHT: 3270 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3271 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3272 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3273 sdata->vif.type != NL80211_IFTYPE_AP && 3274 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3275 break; 3276 3277 /* verify action code is present */ 3278 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3279 goto invalid; 3280 3281 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3282 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3283 /* verify opmode is present */ 3284 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3285 goto invalid; 3286 goto queue; 3287 } 3288 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3289 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3290 goto invalid; 3291 goto queue; 3292 } 3293 default: 3294 break; 3295 } 3296 break; 3297 case WLAN_CATEGORY_BACK: 3298 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3299 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3300 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3301 sdata->vif.type != NL80211_IFTYPE_AP && 3302 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3303 break; 3304 3305 /* verify action_code is present */ 3306 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3307 break; 3308 3309 switch (mgmt->u.action.u.addba_req.action_code) { 3310 case WLAN_ACTION_ADDBA_REQ: 3311 if (len < (IEEE80211_MIN_ACTION_SIZE + 3312 sizeof(mgmt->u.action.u.addba_req))) 3313 goto invalid; 3314 break; 3315 case WLAN_ACTION_ADDBA_RESP: 3316 if (len < (IEEE80211_MIN_ACTION_SIZE + 3317 sizeof(mgmt->u.action.u.addba_resp))) 3318 goto invalid; 3319 break; 3320 case WLAN_ACTION_DELBA: 3321 if (len < (IEEE80211_MIN_ACTION_SIZE + 3322 sizeof(mgmt->u.action.u.delba))) 3323 goto invalid; 3324 break; 3325 default: 3326 goto invalid; 3327 } 3328 3329 goto queue; 3330 case WLAN_CATEGORY_SPECTRUM_MGMT: 3331 /* verify action_code is present */ 3332 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3333 break; 3334 3335 switch (mgmt->u.action.u.measurement.action_code) { 3336 case WLAN_ACTION_SPCT_MSR_REQ: 3337 if (status->band != NL80211_BAND_5GHZ) 3338 break; 3339 3340 if (len < (IEEE80211_MIN_ACTION_SIZE + 3341 sizeof(mgmt->u.action.u.measurement))) 3342 break; 3343 3344 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3345 break; 3346 3347 ieee80211_process_measurement_req(sdata, mgmt, len); 3348 goto handled; 3349 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3350 u8 *bssid; 3351 if (len < (IEEE80211_MIN_ACTION_SIZE + 3352 sizeof(mgmt->u.action.u.chan_switch))) 3353 break; 3354 3355 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3356 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3357 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3358 break; 3359 3360 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3361 bssid = sdata->u.mgd.bssid; 3362 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3363 bssid = sdata->u.ibss.bssid; 3364 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3365 bssid = mgmt->sa; 3366 else 3367 break; 3368 3369 if (!ether_addr_equal(mgmt->bssid, bssid)) 3370 break; 3371 3372 goto queue; 3373 } 3374 } 3375 break; 3376 case WLAN_CATEGORY_SELF_PROTECTED: 3377 if (len < (IEEE80211_MIN_ACTION_SIZE + 3378 sizeof(mgmt->u.action.u.self_prot.action_code))) 3379 break; 3380 3381 switch (mgmt->u.action.u.self_prot.action_code) { 3382 case WLAN_SP_MESH_PEERING_OPEN: 3383 case WLAN_SP_MESH_PEERING_CLOSE: 3384 case WLAN_SP_MESH_PEERING_CONFIRM: 3385 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3386 goto invalid; 3387 if (sdata->u.mesh.user_mpm) 3388 /* userspace handles this frame */ 3389 break; 3390 goto queue; 3391 case WLAN_SP_MGK_INFORM: 3392 case WLAN_SP_MGK_ACK: 3393 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3394 goto invalid; 3395 break; 3396 } 3397 break; 3398 case WLAN_CATEGORY_MESH_ACTION: 3399 if (len < (IEEE80211_MIN_ACTION_SIZE + 3400 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3401 break; 3402 3403 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3404 break; 3405 if (mesh_action_is_path_sel(mgmt) && 3406 !mesh_path_sel_is_hwmp(sdata)) 3407 break; 3408 goto queue; 3409 } 3410 3411 return RX_CONTINUE; 3412 3413 invalid: 3414 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3415 /* will return in the next handlers */ 3416 return RX_CONTINUE; 3417 3418 handled: 3419 if (rx->sta) 3420 rx->sta->rx_stats.packets++; 3421 dev_kfree_skb(rx->skb); 3422 return RX_QUEUED; 3423 3424 queue: 3425 skb_queue_tail(&sdata->skb_queue, rx->skb); 3426 ieee80211_queue_work(&local->hw, &sdata->work); 3427 if (rx->sta) 3428 rx->sta->rx_stats.packets++; 3429 return RX_QUEUED; 3430 } 3431 3432 static ieee80211_rx_result debug_noinline 3433 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3434 { 3435 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3436 int sig = 0; 3437 3438 /* skip known-bad action frames and return them in the next handler */ 3439 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3440 return RX_CONTINUE; 3441 3442 /* 3443 * Getting here means the kernel doesn't know how to handle 3444 * it, but maybe userspace does ... include returned frames 3445 * so userspace can register for those to know whether ones 3446 * it transmitted were processed or returned. 3447 */ 3448 3449 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3450 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3451 sig = status->signal; 3452 3453 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev, 3454 ieee80211_rx_status_to_khz(status), sig, 3455 rx->skb->data, rx->skb->len, 0)) { 3456 if (rx->sta) 3457 rx->sta->rx_stats.packets++; 3458 dev_kfree_skb(rx->skb); 3459 return RX_QUEUED; 3460 } 3461 3462 return RX_CONTINUE; 3463 } 3464 3465 static ieee80211_rx_result debug_noinline 3466 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 3467 { 3468 struct ieee80211_sub_if_data *sdata = rx->sdata; 3469 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3470 int len = rx->skb->len; 3471 3472 if (!ieee80211_is_action(mgmt->frame_control)) 3473 return RX_CONTINUE; 3474 3475 switch (mgmt->u.action.category) { 3476 case WLAN_CATEGORY_SA_QUERY: 3477 if (len < (IEEE80211_MIN_ACTION_SIZE + 3478 sizeof(mgmt->u.action.u.sa_query))) 3479 break; 3480 3481 switch (mgmt->u.action.u.sa_query.action) { 3482 case WLAN_ACTION_SA_QUERY_REQUEST: 3483 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3484 break; 3485 ieee80211_process_sa_query_req(sdata, mgmt, len); 3486 goto handled; 3487 } 3488 break; 3489 } 3490 3491 return RX_CONTINUE; 3492 3493 handled: 3494 if (rx->sta) 3495 rx->sta->rx_stats.packets++; 3496 dev_kfree_skb(rx->skb); 3497 return RX_QUEUED; 3498 } 3499 3500 static ieee80211_rx_result debug_noinline 3501 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3502 { 3503 struct ieee80211_local *local = rx->local; 3504 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3505 struct sk_buff *nskb; 3506 struct ieee80211_sub_if_data *sdata = rx->sdata; 3507 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3508 3509 if (!ieee80211_is_action(mgmt->frame_control)) 3510 return RX_CONTINUE; 3511 3512 /* 3513 * For AP mode, hostapd is responsible for handling any action 3514 * frames that we didn't handle, including returning unknown 3515 * ones. For all other modes we will return them to the sender, 3516 * setting the 0x80 bit in the action category, as required by 3517 * 802.11-2012 9.24.4. 3518 * Newer versions of hostapd shall also use the management frame 3519 * registration mechanisms, but older ones still use cooked 3520 * monitor interfaces so push all frames there. 3521 */ 3522 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3523 (sdata->vif.type == NL80211_IFTYPE_AP || 3524 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3525 return RX_DROP_MONITOR; 3526 3527 if (is_multicast_ether_addr(mgmt->da)) 3528 return RX_DROP_MONITOR; 3529 3530 /* do not return rejected action frames */ 3531 if (mgmt->u.action.category & 0x80) 3532 return RX_DROP_UNUSABLE; 3533 3534 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3535 GFP_ATOMIC); 3536 if (nskb) { 3537 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3538 3539 nmgmt->u.action.category |= 0x80; 3540 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3541 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3542 3543 memset(nskb->cb, 0, sizeof(nskb->cb)); 3544 3545 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3546 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3547 3548 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3549 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3550 IEEE80211_TX_CTL_NO_CCK_RATE; 3551 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3552 info->hw_queue = 3553 local->hw.offchannel_tx_hw_queue; 3554 } 3555 3556 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3557 status->band); 3558 } 3559 dev_kfree_skb(rx->skb); 3560 return RX_QUEUED; 3561 } 3562 3563 static ieee80211_rx_result debug_noinline 3564 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 3565 { 3566 struct ieee80211_sub_if_data *sdata = rx->sdata; 3567 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 3568 3569 if (!ieee80211_is_ext(hdr->frame_control)) 3570 return RX_CONTINUE; 3571 3572 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3573 return RX_DROP_MONITOR; 3574 3575 /* for now only beacons are ext, so queue them */ 3576 skb_queue_tail(&sdata->skb_queue, rx->skb); 3577 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3578 if (rx->sta) 3579 rx->sta->rx_stats.packets++; 3580 3581 return RX_QUEUED; 3582 } 3583 3584 static ieee80211_rx_result debug_noinline 3585 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3586 { 3587 struct ieee80211_sub_if_data *sdata = rx->sdata; 3588 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3589 __le16 stype; 3590 3591 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3592 3593 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3594 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3595 sdata->vif.type != NL80211_IFTYPE_OCB && 3596 sdata->vif.type != NL80211_IFTYPE_STATION) 3597 return RX_DROP_MONITOR; 3598 3599 switch (stype) { 3600 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3601 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3602 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3603 /* process for all: mesh, mlme, ibss */ 3604 break; 3605 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3606 if (is_multicast_ether_addr(mgmt->da) && 3607 !is_broadcast_ether_addr(mgmt->da)) 3608 return RX_DROP_MONITOR; 3609 3610 /* process only for station/IBSS */ 3611 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3612 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3613 return RX_DROP_MONITOR; 3614 break; 3615 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3616 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3617 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3618 if (is_multicast_ether_addr(mgmt->da) && 3619 !is_broadcast_ether_addr(mgmt->da)) 3620 return RX_DROP_MONITOR; 3621 3622 /* process only for station */ 3623 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3624 return RX_DROP_MONITOR; 3625 break; 3626 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3627 /* process only for ibss and mesh */ 3628 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3629 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3630 return RX_DROP_MONITOR; 3631 break; 3632 default: 3633 return RX_DROP_MONITOR; 3634 } 3635 3636 /* queue up frame and kick off work to process it */ 3637 skb_queue_tail(&sdata->skb_queue, rx->skb); 3638 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3639 if (rx->sta) 3640 rx->sta->rx_stats.packets++; 3641 3642 return RX_QUEUED; 3643 } 3644 3645 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3646 struct ieee80211_rate *rate) 3647 { 3648 struct ieee80211_sub_if_data *sdata; 3649 struct ieee80211_local *local = rx->local; 3650 struct sk_buff *skb = rx->skb, *skb2; 3651 struct net_device *prev_dev = NULL; 3652 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3653 int needed_headroom; 3654 3655 /* 3656 * If cooked monitor has been processed already, then 3657 * don't do it again. If not, set the flag. 3658 */ 3659 if (rx->flags & IEEE80211_RX_CMNTR) 3660 goto out_free_skb; 3661 rx->flags |= IEEE80211_RX_CMNTR; 3662 3663 /* If there are no cooked monitor interfaces, just free the SKB */ 3664 if (!local->cooked_mntrs) 3665 goto out_free_skb; 3666 3667 /* vendor data is long removed here */ 3668 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3669 /* room for the radiotap header based on driver features */ 3670 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3671 3672 if (skb_headroom(skb) < needed_headroom && 3673 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3674 goto out_free_skb; 3675 3676 /* prepend radiotap information */ 3677 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3678 false); 3679 3680 skb_reset_mac_header(skb); 3681 skb->ip_summed = CHECKSUM_UNNECESSARY; 3682 skb->pkt_type = PACKET_OTHERHOST; 3683 skb->protocol = htons(ETH_P_802_2); 3684 3685 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3686 if (!ieee80211_sdata_running(sdata)) 3687 continue; 3688 3689 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3690 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3691 continue; 3692 3693 if (prev_dev) { 3694 skb2 = skb_clone(skb, GFP_ATOMIC); 3695 if (skb2) { 3696 skb2->dev = prev_dev; 3697 netif_receive_skb(skb2); 3698 } 3699 } 3700 3701 prev_dev = sdata->dev; 3702 ieee80211_rx_stats(sdata->dev, skb->len); 3703 } 3704 3705 if (prev_dev) { 3706 skb->dev = prev_dev; 3707 netif_receive_skb(skb); 3708 return; 3709 } 3710 3711 out_free_skb: 3712 dev_kfree_skb(skb); 3713 } 3714 3715 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3716 ieee80211_rx_result res) 3717 { 3718 switch (res) { 3719 case RX_DROP_MONITOR: 3720 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3721 if (rx->sta) 3722 rx->sta->rx_stats.dropped++; 3723 fallthrough; 3724 case RX_CONTINUE: { 3725 struct ieee80211_rate *rate = NULL; 3726 struct ieee80211_supported_band *sband; 3727 struct ieee80211_rx_status *status; 3728 3729 status = IEEE80211_SKB_RXCB((rx->skb)); 3730 3731 sband = rx->local->hw.wiphy->bands[status->band]; 3732 if (status->encoding == RX_ENC_LEGACY) 3733 rate = &sband->bitrates[status->rate_idx]; 3734 3735 ieee80211_rx_cooked_monitor(rx, rate); 3736 break; 3737 } 3738 case RX_DROP_UNUSABLE: 3739 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3740 if (rx->sta) 3741 rx->sta->rx_stats.dropped++; 3742 dev_kfree_skb(rx->skb); 3743 break; 3744 case RX_QUEUED: 3745 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3746 break; 3747 } 3748 } 3749 3750 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3751 struct sk_buff_head *frames) 3752 { 3753 ieee80211_rx_result res = RX_DROP_MONITOR; 3754 struct sk_buff *skb; 3755 3756 #define CALL_RXH(rxh) \ 3757 do { \ 3758 res = rxh(rx); \ 3759 if (res != RX_CONTINUE) \ 3760 goto rxh_next; \ 3761 } while (0) 3762 3763 /* Lock here to avoid hitting all of the data used in the RX 3764 * path (e.g. key data, station data, ...) concurrently when 3765 * a frame is released from the reorder buffer due to timeout 3766 * from the timer, potentially concurrently with RX from the 3767 * driver. 3768 */ 3769 spin_lock_bh(&rx->local->rx_path_lock); 3770 3771 while ((skb = __skb_dequeue(frames))) { 3772 /* 3773 * all the other fields are valid across frames 3774 * that belong to an aMPDU since they are on the 3775 * same TID from the same station 3776 */ 3777 rx->skb = skb; 3778 3779 CALL_RXH(ieee80211_rx_h_check_more_data); 3780 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3781 CALL_RXH(ieee80211_rx_h_sta_process); 3782 CALL_RXH(ieee80211_rx_h_decrypt); 3783 CALL_RXH(ieee80211_rx_h_defragment); 3784 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3785 /* must be after MMIC verify so header is counted in MPDU mic */ 3786 #ifdef CONFIG_MAC80211_MESH 3787 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3788 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3789 #endif 3790 CALL_RXH(ieee80211_rx_h_amsdu); 3791 CALL_RXH(ieee80211_rx_h_data); 3792 3793 /* special treatment -- needs the queue */ 3794 res = ieee80211_rx_h_ctrl(rx, frames); 3795 if (res != RX_CONTINUE) 3796 goto rxh_next; 3797 3798 CALL_RXH(ieee80211_rx_h_mgmt_check); 3799 CALL_RXH(ieee80211_rx_h_action); 3800 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3801 CALL_RXH(ieee80211_rx_h_action_post_userspace); 3802 CALL_RXH(ieee80211_rx_h_action_return); 3803 CALL_RXH(ieee80211_rx_h_ext); 3804 CALL_RXH(ieee80211_rx_h_mgmt); 3805 3806 rxh_next: 3807 ieee80211_rx_handlers_result(rx, res); 3808 3809 #undef CALL_RXH 3810 } 3811 3812 spin_unlock_bh(&rx->local->rx_path_lock); 3813 } 3814 3815 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3816 { 3817 struct sk_buff_head reorder_release; 3818 ieee80211_rx_result res = RX_DROP_MONITOR; 3819 3820 __skb_queue_head_init(&reorder_release); 3821 3822 #define CALL_RXH(rxh) \ 3823 do { \ 3824 res = rxh(rx); \ 3825 if (res != RX_CONTINUE) \ 3826 goto rxh_next; \ 3827 } while (0) 3828 3829 CALL_RXH(ieee80211_rx_h_check_dup); 3830 CALL_RXH(ieee80211_rx_h_check); 3831 3832 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3833 3834 ieee80211_rx_handlers(rx, &reorder_release); 3835 return; 3836 3837 rxh_next: 3838 ieee80211_rx_handlers_result(rx, res); 3839 3840 #undef CALL_RXH 3841 } 3842 3843 /* 3844 * This function makes calls into the RX path, therefore 3845 * it has to be invoked under RCU read lock. 3846 */ 3847 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3848 { 3849 struct sk_buff_head frames; 3850 struct ieee80211_rx_data rx = { 3851 .sta = sta, 3852 .sdata = sta->sdata, 3853 .local = sta->local, 3854 /* This is OK -- must be QoS data frame */ 3855 .security_idx = tid, 3856 .seqno_idx = tid, 3857 }; 3858 struct tid_ampdu_rx *tid_agg_rx; 3859 3860 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3861 if (!tid_agg_rx) 3862 return; 3863 3864 __skb_queue_head_init(&frames); 3865 3866 spin_lock(&tid_agg_rx->reorder_lock); 3867 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3868 spin_unlock(&tid_agg_rx->reorder_lock); 3869 3870 if (!skb_queue_empty(&frames)) { 3871 struct ieee80211_event event = { 3872 .type = BA_FRAME_TIMEOUT, 3873 .u.ba.tid = tid, 3874 .u.ba.sta = &sta->sta, 3875 }; 3876 drv_event_callback(rx.local, rx.sdata, &event); 3877 } 3878 3879 ieee80211_rx_handlers(&rx, &frames); 3880 } 3881 3882 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3883 u16 ssn, u64 filtered, 3884 u16 received_mpdus) 3885 { 3886 struct sta_info *sta; 3887 struct tid_ampdu_rx *tid_agg_rx; 3888 struct sk_buff_head frames; 3889 struct ieee80211_rx_data rx = { 3890 /* This is OK -- must be QoS data frame */ 3891 .security_idx = tid, 3892 .seqno_idx = tid, 3893 }; 3894 int i, diff; 3895 3896 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3897 return; 3898 3899 __skb_queue_head_init(&frames); 3900 3901 sta = container_of(pubsta, struct sta_info, sta); 3902 3903 rx.sta = sta; 3904 rx.sdata = sta->sdata; 3905 rx.local = sta->local; 3906 3907 rcu_read_lock(); 3908 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3909 if (!tid_agg_rx) 3910 goto out; 3911 3912 spin_lock_bh(&tid_agg_rx->reorder_lock); 3913 3914 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3915 int release; 3916 3917 /* release all frames in the reorder buffer */ 3918 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3919 IEEE80211_SN_MODULO; 3920 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3921 release, &frames); 3922 /* update ssn to match received ssn */ 3923 tid_agg_rx->head_seq_num = ssn; 3924 } else { 3925 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3926 &frames); 3927 } 3928 3929 /* handle the case that received ssn is behind the mac ssn. 3930 * it can be tid_agg_rx->buf_size behind and still be valid */ 3931 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 3932 if (diff >= tid_agg_rx->buf_size) { 3933 tid_agg_rx->reorder_buf_filtered = 0; 3934 goto release; 3935 } 3936 filtered = filtered >> diff; 3937 ssn += diff; 3938 3939 /* update bitmap */ 3940 for (i = 0; i < tid_agg_rx->buf_size; i++) { 3941 int index = (ssn + i) % tid_agg_rx->buf_size; 3942 3943 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 3944 if (filtered & BIT_ULL(i)) 3945 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 3946 } 3947 3948 /* now process also frames that the filter marking released */ 3949 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3950 3951 release: 3952 spin_unlock_bh(&tid_agg_rx->reorder_lock); 3953 3954 ieee80211_rx_handlers(&rx, &frames); 3955 3956 out: 3957 rcu_read_unlock(); 3958 } 3959 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 3960 3961 /* main receive path */ 3962 3963 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3964 { 3965 struct ieee80211_sub_if_data *sdata = rx->sdata; 3966 struct sk_buff *skb = rx->skb; 3967 struct ieee80211_hdr *hdr = (void *)skb->data; 3968 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3969 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3970 bool multicast = is_multicast_ether_addr(hdr->addr1) || 3971 ieee80211_is_s1g_beacon(hdr->frame_control); 3972 3973 switch (sdata->vif.type) { 3974 case NL80211_IFTYPE_STATION: 3975 if (!bssid && !sdata->u.mgd.use_4addr) 3976 return false; 3977 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 3978 return false; 3979 if (multicast) 3980 return true; 3981 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3982 case NL80211_IFTYPE_ADHOC: 3983 if (!bssid) 3984 return false; 3985 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3986 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3987 return false; 3988 if (ieee80211_is_beacon(hdr->frame_control)) 3989 return true; 3990 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3991 return false; 3992 if (!multicast && 3993 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3994 return false; 3995 if (!rx->sta) { 3996 int rate_idx; 3997 if (status->encoding != RX_ENC_LEGACY) 3998 rate_idx = 0; /* TODO: HT/VHT rates */ 3999 else 4000 rate_idx = status->rate_idx; 4001 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4002 BIT(rate_idx)); 4003 } 4004 return true; 4005 case NL80211_IFTYPE_OCB: 4006 if (!bssid) 4007 return false; 4008 if (!ieee80211_is_data_present(hdr->frame_control)) 4009 return false; 4010 if (!is_broadcast_ether_addr(bssid)) 4011 return false; 4012 if (!multicast && 4013 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4014 return false; 4015 if (!rx->sta) { 4016 int rate_idx; 4017 if (status->encoding != RX_ENC_LEGACY) 4018 rate_idx = 0; /* TODO: HT rates */ 4019 else 4020 rate_idx = status->rate_idx; 4021 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4022 BIT(rate_idx)); 4023 } 4024 return true; 4025 case NL80211_IFTYPE_MESH_POINT: 4026 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4027 return false; 4028 if (multicast) 4029 return true; 4030 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4031 case NL80211_IFTYPE_AP_VLAN: 4032 case NL80211_IFTYPE_AP: 4033 if (!bssid) 4034 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4035 4036 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 4037 /* 4038 * Accept public action frames even when the 4039 * BSSID doesn't match, this is used for P2P 4040 * and location updates. Note that mac80211 4041 * itself never looks at these frames. 4042 */ 4043 if (!multicast && 4044 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4045 return false; 4046 if (ieee80211_is_public_action(hdr, skb->len)) 4047 return true; 4048 return ieee80211_is_beacon(hdr->frame_control); 4049 } 4050 4051 if (!ieee80211_has_tods(hdr->frame_control)) { 4052 /* ignore data frames to TDLS-peers */ 4053 if (ieee80211_is_data(hdr->frame_control)) 4054 return false; 4055 /* ignore action frames to TDLS-peers */ 4056 if (ieee80211_is_action(hdr->frame_control) && 4057 !is_broadcast_ether_addr(bssid) && 4058 !ether_addr_equal(bssid, hdr->addr1)) 4059 return false; 4060 } 4061 4062 /* 4063 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4064 * the BSSID - we've checked that already but may have accepted 4065 * the wildcard (ff:ff:ff:ff:ff:ff). 4066 * 4067 * It also says: 4068 * The BSSID of the Data frame is determined as follows: 4069 * a) If the STA is contained within an AP or is associated 4070 * with an AP, the BSSID is the address currently in use 4071 * by the STA contained in the AP. 4072 * 4073 * So we should not accept data frames with an address that's 4074 * multicast. 4075 * 4076 * Accepting it also opens a security problem because stations 4077 * could encrypt it with the GTK and inject traffic that way. 4078 */ 4079 if (ieee80211_is_data(hdr->frame_control) && multicast) 4080 return false; 4081 4082 return true; 4083 case NL80211_IFTYPE_WDS: 4084 if (bssid || !ieee80211_is_data(hdr->frame_control)) 4085 return false; 4086 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 4087 case NL80211_IFTYPE_P2P_DEVICE: 4088 return ieee80211_is_public_action(hdr, skb->len) || 4089 ieee80211_is_probe_req(hdr->frame_control) || 4090 ieee80211_is_probe_resp(hdr->frame_control) || 4091 ieee80211_is_beacon(hdr->frame_control); 4092 case NL80211_IFTYPE_NAN: 4093 /* Currently no frames on NAN interface are allowed */ 4094 return false; 4095 default: 4096 break; 4097 } 4098 4099 WARN_ON_ONCE(1); 4100 return false; 4101 } 4102 4103 void ieee80211_check_fast_rx(struct sta_info *sta) 4104 { 4105 struct ieee80211_sub_if_data *sdata = sta->sdata; 4106 struct ieee80211_local *local = sdata->local; 4107 struct ieee80211_key *key; 4108 struct ieee80211_fast_rx fastrx = { 4109 .dev = sdata->dev, 4110 .vif_type = sdata->vif.type, 4111 .control_port_protocol = sdata->control_port_protocol, 4112 }, *old, *new = NULL; 4113 bool assign = false; 4114 4115 /* use sparse to check that we don't return without updating */ 4116 __acquire(check_fast_rx); 4117 4118 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4119 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4120 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4121 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4122 4123 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4124 4125 /* fast-rx doesn't do reordering */ 4126 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4127 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4128 goto clear; 4129 4130 switch (sdata->vif.type) { 4131 case NL80211_IFTYPE_STATION: 4132 if (sta->sta.tdls) { 4133 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4134 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4135 fastrx.expected_ds_bits = 0; 4136 } else { 4137 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4138 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4139 fastrx.expected_ds_bits = 4140 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4141 } 4142 4143 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4144 fastrx.expected_ds_bits |= 4145 cpu_to_le16(IEEE80211_FCTL_TODS); 4146 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4147 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4148 } 4149 4150 if (!sdata->u.mgd.powersave) 4151 break; 4152 4153 /* software powersave is a huge mess, avoid all of it */ 4154 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4155 goto clear; 4156 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4157 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4158 goto clear; 4159 break; 4160 case NL80211_IFTYPE_AP_VLAN: 4161 case NL80211_IFTYPE_AP: 4162 /* parallel-rx requires this, at least with calls to 4163 * ieee80211_sta_ps_transition() 4164 */ 4165 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4166 goto clear; 4167 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4168 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4169 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4170 4171 fastrx.internal_forward = 4172 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4173 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4174 !sdata->u.vlan.sta); 4175 4176 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4177 sdata->u.vlan.sta) { 4178 fastrx.expected_ds_bits |= 4179 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4180 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4181 fastrx.internal_forward = 0; 4182 } 4183 4184 break; 4185 default: 4186 goto clear; 4187 } 4188 4189 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4190 goto clear; 4191 4192 rcu_read_lock(); 4193 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4194 if (key) { 4195 switch (key->conf.cipher) { 4196 case WLAN_CIPHER_SUITE_TKIP: 4197 /* we don't want to deal with MMIC in fast-rx */ 4198 goto clear_rcu; 4199 case WLAN_CIPHER_SUITE_CCMP: 4200 case WLAN_CIPHER_SUITE_CCMP_256: 4201 case WLAN_CIPHER_SUITE_GCMP: 4202 case WLAN_CIPHER_SUITE_GCMP_256: 4203 break; 4204 default: 4205 /* We also don't want to deal with 4206 * WEP or cipher scheme. 4207 */ 4208 goto clear_rcu; 4209 } 4210 4211 fastrx.key = true; 4212 fastrx.icv_len = key->conf.icv_len; 4213 } 4214 4215 assign = true; 4216 clear_rcu: 4217 rcu_read_unlock(); 4218 clear: 4219 __release(check_fast_rx); 4220 4221 if (assign) 4222 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4223 4224 spin_lock_bh(&sta->lock); 4225 old = rcu_dereference_protected(sta->fast_rx, true); 4226 rcu_assign_pointer(sta->fast_rx, new); 4227 spin_unlock_bh(&sta->lock); 4228 4229 if (old) 4230 kfree_rcu(old, rcu_head); 4231 } 4232 4233 void ieee80211_clear_fast_rx(struct sta_info *sta) 4234 { 4235 struct ieee80211_fast_rx *old; 4236 4237 spin_lock_bh(&sta->lock); 4238 old = rcu_dereference_protected(sta->fast_rx, true); 4239 RCU_INIT_POINTER(sta->fast_rx, NULL); 4240 spin_unlock_bh(&sta->lock); 4241 4242 if (old) 4243 kfree_rcu(old, rcu_head); 4244 } 4245 4246 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4247 { 4248 struct ieee80211_local *local = sdata->local; 4249 struct sta_info *sta; 4250 4251 lockdep_assert_held(&local->sta_mtx); 4252 4253 list_for_each_entry(sta, &local->sta_list, list) { 4254 if (sdata != sta->sdata && 4255 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4256 continue; 4257 ieee80211_check_fast_rx(sta); 4258 } 4259 } 4260 4261 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4262 { 4263 struct ieee80211_local *local = sdata->local; 4264 4265 mutex_lock(&local->sta_mtx); 4266 __ieee80211_check_fast_rx_iface(sdata); 4267 mutex_unlock(&local->sta_mtx); 4268 } 4269 4270 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4271 struct ieee80211_fast_rx *fast_rx) 4272 { 4273 struct sk_buff *skb = rx->skb; 4274 struct ieee80211_hdr *hdr = (void *)skb->data; 4275 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4276 struct sta_info *sta = rx->sta; 4277 int orig_len = skb->len; 4278 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4279 int snap_offs = hdrlen; 4280 struct { 4281 u8 snap[sizeof(rfc1042_header)]; 4282 __be16 proto; 4283 } *payload __aligned(2); 4284 struct { 4285 u8 da[ETH_ALEN]; 4286 u8 sa[ETH_ALEN]; 4287 } addrs __aligned(2); 4288 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4289 4290 if (fast_rx->uses_rss) 4291 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4292 4293 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4294 * to a common data structure; drivers can implement that per queue 4295 * but we don't have that information in mac80211 4296 */ 4297 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4298 return false; 4299 4300 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4301 4302 /* If using encryption, we also need to have: 4303 * - PN_VALIDATED: similar, but the implementation is tricky 4304 * - DECRYPTED: necessary for PN_VALIDATED 4305 */ 4306 if (fast_rx->key && 4307 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4308 return false; 4309 4310 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4311 return false; 4312 4313 if (unlikely(ieee80211_is_frag(hdr))) 4314 return false; 4315 4316 /* Since our interface address cannot be multicast, this 4317 * implicitly also rejects multicast frames without the 4318 * explicit check. 4319 * 4320 * We shouldn't get any *data* frames not addressed to us 4321 * (AP mode will accept multicast *management* frames), but 4322 * punting here will make it go through the full checks in 4323 * ieee80211_accept_frame(). 4324 */ 4325 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4326 return false; 4327 4328 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4329 IEEE80211_FCTL_TODS)) != 4330 fast_rx->expected_ds_bits) 4331 return false; 4332 4333 /* assign the key to drop unencrypted frames (later) 4334 * and strip the IV/MIC if necessary 4335 */ 4336 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4337 /* GCMP header length is the same */ 4338 snap_offs += IEEE80211_CCMP_HDR_LEN; 4339 } 4340 4341 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4342 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4343 goto drop; 4344 4345 payload = (void *)(skb->data + snap_offs); 4346 4347 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4348 return false; 4349 4350 /* Don't handle these here since they require special code. 4351 * Accept AARP and IPX even though they should come with a 4352 * bridge-tunnel header - but if we get them this way then 4353 * there's little point in discarding them. 4354 */ 4355 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4356 payload->proto == fast_rx->control_port_protocol)) 4357 return false; 4358 } 4359 4360 /* after this point, don't punt to the slowpath! */ 4361 4362 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4363 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4364 goto drop; 4365 4366 /* statistics part of ieee80211_rx_h_sta_process() */ 4367 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4368 stats->last_signal = status->signal; 4369 if (!fast_rx->uses_rss) 4370 ewma_signal_add(&sta->rx_stats_avg.signal, 4371 -status->signal); 4372 } 4373 4374 if (status->chains) { 4375 int i; 4376 4377 stats->chains = status->chains; 4378 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4379 int signal = status->chain_signal[i]; 4380 4381 if (!(status->chains & BIT(i))) 4382 continue; 4383 4384 stats->chain_signal_last[i] = signal; 4385 if (!fast_rx->uses_rss) 4386 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4387 -signal); 4388 } 4389 } 4390 /* end of statistics */ 4391 4392 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4393 goto drop; 4394 4395 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4396 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4397 RX_QUEUED) 4398 goto drop; 4399 4400 return true; 4401 } 4402 4403 stats->last_rx = jiffies; 4404 stats->last_rate = sta_stats_encode_rate(status); 4405 4406 stats->fragments++; 4407 stats->packets++; 4408 4409 /* do the header conversion - first grab the addresses */ 4410 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4411 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4412 /* remove the SNAP but leave the ethertype */ 4413 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4414 /* push the addresses in front */ 4415 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4416 4417 skb->dev = fast_rx->dev; 4418 4419 ieee80211_rx_stats(fast_rx->dev, skb->len); 4420 4421 /* The seqno index has the same property as needed 4422 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4423 * for non-QoS-data frames. Here we know it's a data 4424 * frame, so count MSDUs. 4425 */ 4426 u64_stats_update_begin(&stats->syncp); 4427 stats->msdu[rx->seqno_idx]++; 4428 stats->bytes += orig_len; 4429 u64_stats_update_end(&stats->syncp); 4430 4431 if (fast_rx->internal_forward) { 4432 struct sk_buff *xmit_skb = NULL; 4433 if (is_multicast_ether_addr(addrs.da)) { 4434 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4435 } else if (!ether_addr_equal(addrs.da, addrs.sa) && 4436 sta_info_get(rx->sdata, addrs.da)) { 4437 xmit_skb = skb; 4438 skb = NULL; 4439 } 4440 4441 if (xmit_skb) { 4442 /* 4443 * Send to wireless media and increase priority by 256 4444 * to keep the received priority instead of 4445 * reclassifying the frame (see cfg80211_classify8021d). 4446 */ 4447 xmit_skb->priority += 256; 4448 xmit_skb->protocol = htons(ETH_P_802_3); 4449 skb_reset_network_header(xmit_skb); 4450 skb_reset_mac_header(xmit_skb); 4451 dev_queue_xmit(xmit_skb); 4452 } 4453 4454 if (!skb) 4455 return true; 4456 } 4457 4458 /* deliver to local stack */ 4459 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4460 memset(skb->cb, 0, sizeof(skb->cb)); 4461 if (rx->list) 4462 list_add_tail(&skb->list, rx->list); 4463 else 4464 netif_receive_skb(skb); 4465 4466 return true; 4467 drop: 4468 dev_kfree_skb(skb); 4469 stats->dropped++; 4470 return true; 4471 } 4472 4473 /* 4474 * This function returns whether or not the SKB 4475 * was destined for RX processing or not, which, 4476 * if consume is true, is equivalent to whether 4477 * or not the skb was consumed. 4478 */ 4479 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4480 struct sk_buff *skb, bool consume) 4481 { 4482 struct ieee80211_local *local = rx->local; 4483 struct ieee80211_sub_if_data *sdata = rx->sdata; 4484 4485 rx->skb = skb; 4486 4487 /* See if we can do fast-rx; if we have to copy we already lost, 4488 * so punt in that case. We should never have to deliver a data 4489 * frame to multiple interfaces anyway. 4490 * 4491 * We skip the ieee80211_accept_frame() call and do the necessary 4492 * checking inside ieee80211_invoke_fast_rx(). 4493 */ 4494 if (consume && rx->sta) { 4495 struct ieee80211_fast_rx *fast_rx; 4496 4497 fast_rx = rcu_dereference(rx->sta->fast_rx); 4498 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4499 return true; 4500 } 4501 4502 if (!ieee80211_accept_frame(rx)) 4503 return false; 4504 4505 if (!consume) { 4506 skb = skb_copy(skb, GFP_ATOMIC); 4507 if (!skb) { 4508 if (net_ratelimit()) 4509 wiphy_debug(local->hw.wiphy, 4510 "failed to copy skb for %s\n", 4511 sdata->name); 4512 return true; 4513 } 4514 4515 rx->skb = skb; 4516 } 4517 4518 ieee80211_invoke_rx_handlers(rx); 4519 return true; 4520 } 4521 4522 /* 4523 * This is the actual Rx frames handler. as it belongs to Rx path it must 4524 * be called with rcu_read_lock protection. 4525 */ 4526 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4527 struct ieee80211_sta *pubsta, 4528 struct sk_buff *skb, 4529 struct list_head *list) 4530 { 4531 struct ieee80211_local *local = hw_to_local(hw); 4532 struct ieee80211_sub_if_data *sdata; 4533 struct ieee80211_hdr *hdr; 4534 __le16 fc; 4535 struct ieee80211_rx_data rx; 4536 struct ieee80211_sub_if_data *prev; 4537 struct rhlist_head *tmp; 4538 int err = 0; 4539 4540 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4541 memset(&rx, 0, sizeof(rx)); 4542 rx.skb = skb; 4543 rx.local = local; 4544 rx.list = list; 4545 4546 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4547 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4548 4549 if (ieee80211_is_mgmt(fc)) { 4550 /* drop frame if too short for header */ 4551 if (skb->len < ieee80211_hdrlen(fc)) 4552 err = -ENOBUFS; 4553 else 4554 err = skb_linearize(skb); 4555 } else { 4556 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4557 } 4558 4559 if (err) { 4560 dev_kfree_skb(skb); 4561 return; 4562 } 4563 4564 hdr = (struct ieee80211_hdr *)skb->data; 4565 ieee80211_parse_qos(&rx); 4566 ieee80211_verify_alignment(&rx); 4567 4568 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4569 ieee80211_is_beacon(hdr->frame_control) || 4570 ieee80211_is_s1g_beacon(hdr->frame_control))) 4571 ieee80211_scan_rx(local, skb); 4572 4573 if (ieee80211_is_data(fc)) { 4574 struct sta_info *sta, *prev_sta; 4575 4576 if (pubsta) { 4577 rx.sta = container_of(pubsta, struct sta_info, sta); 4578 rx.sdata = rx.sta->sdata; 4579 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4580 return; 4581 goto out; 4582 } 4583 4584 prev_sta = NULL; 4585 4586 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4587 if (!prev_sta) { 4588 prev_sta = sta; 4589 continue; 4590 } 4591 4592 rx.sta = prev_sta; 4593 rx.sdata = prev_sta->sdata; 4594 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4595 4596 prev_sta = sta; 4597 } 4598 4599 if (prev_sta) { 4600 rx.sta = prev_sta; 4601 rx.sdata = prev_sta->sdata; 4602 4603 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4604 return; 4605 goto out; 4606 } 4607 } 4608 4609 prev = NULL; 4610 4611 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4612 if (!ieee80211_sdata_running(sdata)) 4613 continue; 4614 4615 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4616 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4617 continue; 4618 4619 /* 4620 * frame is destined for this interface, but if it's 4621 * not also for the previous one we handle that after 4622 * the loop to avoid copying the SKB once too much 4623 */ 4624 4625 if (!prev) { 4626 prev = sdata; 4627 continue; 4628 } 4629 4630 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4631 rx.sdata = prev; 4632 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4633 4634 prev = sdata; 4635 } 4636 4637 if (prev) { 4638 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4639 rx.sdata = prev; 4640 4641 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4642 return; 4643 } 4644 4645 out: 4646 dev_kfree_skb(skb); 4647 } 4648 4649 /* 4650 * This is the receive path handler. It is called by a low level driver when an 4651 * 802.11 MPDU is received from the hardware. 4652 */ 4653 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4654 struct sk_buff *skb, struct list_head *list) 4655 { 4656 struct ieee80211_local *local = hw_to_local(hw); 4657 struct ieee80211_rate *rate = NULL; 4658 struct ieee80211_supported_band *sband; 4659 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4660 4661 WARN_ON_ONCE(softirq_count() == 0); 4662 4663 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4664 goto drop; 4665 4666 sband = local->hw.wiphy->bands[status->band]; 4667 if (WARN_ON(!sband)) 4668 goto drop; 4669 4670 /* 4671 * If we're suspending, it is possible although not too likely 4672 * that we'd be receiving frames after having already partially 4673 * quiesced the stack. We can't process such frames then since 4674 * that might, for example, cause stations to be added or other 4675 * driver callbacks be invoked. 4676 */ 4677 if (unlikely(local->quiescing || local->suspended)) 4678 goto drop; 4679 4680 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4681 if (unlikely(local->in_reconfig)) 4682 goto drop; 4683 4684 /* 4685 * The same happens when we're not even started, 4686 * but that's worth a warning. 4687 */ 4688 if (WARN_ON(!local->started)) 4689 goto drop; 4690 4691 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4692 /* 4693 * Validate the rate, unless a PLCP error means that 4694 * we probably can't have a valid rate here anyway. 4695 */ 4696 4697 switch (status->encoding) { 4698 case RX_ENC_HT: 4699 /* 4700 * rate_idx is MCS index, which can be [0-76] 4701 * as documented on: 4702 * 4703 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 4704 * 4705 * Anything else would be some sort of driver or 4706 * hardware error. The driver should catch hardware 4707 * errors. 4708 */ 4709 if (WARN(status->rate_idx > 76, 4710 "Rate marked as an HT rate but passed " 4711 "status->rate_idx is not " 4712 "an MCS index [0-76]: %d (0x%02x)\n", 4713 status->rate_idx, 4714 status->rate_idx)) 4715 goto drop; 4716 break; 4717 case RX_ENC_VHT: 4718 if (WARN_ONCE(status->rate_idx > 9 || 4719 !status->nss || 4720 status->nss > 8, 4721 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4722 status->rate_idx, status->nss)) 4723 goto drop; 4724 break; 4725 case RX_ENC_HE: 4726 if (WARN_ONCE(status->rate_idx > 11 || 4727 !status->nss || 4728 status->nss > 8, 4729 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4730 status->rate_idx, status->nss)) 4731 goto drop; 4732 break; 4733 default: 4734 WARN_ON_ONCE(1); 4735 fallthrough; 4736 case RX_ENC_LEGACY: 4737 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4738 goto drop; 4739 rate = &sband->bitrates[status->rate_idx]; 4740 } 4741 } 4742 4743 status->rx_flags = 0; 4744 4745 /* 4746 * Frames with failed FCS/PLCP checksum are not returned, 4747 * all other frames are returned without radiotap header 4748 * if it was previously present. 4749 * Also, frames with less than 16 bytes are dropped. 4750 */ 4751 skb = ieee80211_rx_monitor(local, skb, rate); 4752 if (!skb) 4753 return; 4754 4755 ieee80211_tpt_led_trig_rx(local, 4756 ((struct ieee80211_hdr *)skb->data)->frame_control, 4757 skb->len); 4758 4759 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 4760 4761 return; 4762 drop: 4763 kfree_skb(skb); 4764 } 4765 EXPORT_SYMBOL(ieee80211_rx_list); 4766 4767 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4768 struct sk_buff *skb, struct napi_struct *napi) 4769 { 4770 struct sk_buff *tmp; 4771 LIST_HEAD(list); 4772 4773 4774 /* 4775 * key references and virtual interfaces are protected using RCU 4776 * and this requires that we are in a read-side RCU section during 4777 * receive processing 4778 */ 4779 rcu_read_lock(); 4780 ieee80211_rx_list(hw, pubsta, skb, &list); 4781 rcu_read_unlock(); 4782 4783 if (!napi) { 4784 netif_receive_skb_list(&list); 4785 return; 4786 } 4787 4788 list_for_each_entry_safe(skb, tmp, &list, list) { 4789 skb_list_del_init(skb); 4790 napi_gro_receive(napi, skb); 4791 } 4792 } 4793 EXPORT_SYMBOL(ieee80211_rx_napi); 4794 4795 /* This is a version of the rx handler that can be called from hard irq 4796 * context. Post the skb on the queue and schedule the tasklet */ 4797 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4798 { 4799 struct ieee80211_local *local = hw_to_local(hw); 4800 4801 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4802 4803 skb->pkt_type = IEEE80211_RX_MSG; 4804 skb_queue_tail(&local->skb_queue, skb); 4805 tasklet_schedule(&local->tasklet); 4806 } 4807 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4808