1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2021 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/kcov.h> 21 #include <linux/bitops.h> 22 #include <net/mac80211.h> 23 #include <net/ieee80211_radiotap.h> 24 #include <asm/unaligned.h> 25 26 #include "ieee80211_i.h" 27 #include "driver-ops.h" 28 #include "led.h" 29 #include "mesh.h" 30 #include "wep.h" 31 #include "wpa.h" 32 #include "tkip.h" 33 #include "wme.h" 34 #include "rate.h" 35 36 /* 37 * monitor mode reception 38 * 39 * This function cleans up the SKB, i.e. it removes all the stuff 40 * only useful for monitoring. 41 */ 42 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 43 unsigned int present_fcs_len, 44 unsigned int rtap_space) 45 { 46 struct ieee80211_hdr *hdr; 47 unsigned int hdrlen; 48 __le16 fc; 49 50 if (present_fcs_len) 51 __pskb_trim(skb, skb->len - present_fcs_len); 52 __pskb_pull(skb, rtap_space); 53 54 hdr = (void *)skb->data; 55 fc = hdr->frame_control; 56 57 /* 58 * Remove the HT-Control field (if present) on management 59 * frames after we've sent the frame to monitoring. We 60 * (currently) don't need it, and don't properly parse 61 * frames with it present, due to the assumption of a 62 * fixed management header length. 63 */ 64 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 65 return skb; 66 67 hdrlen = ieee80211_hdrlen(fc); 68 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 69 70 if (!pskb_may_pull(skb, hdrlen)) { 71 dev_kfree_skb(skb); 72 return NULL; 73 } 74 75 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 76 hdrlen - IEEE80211_HT_CTL_LEN); 77 __pskb_pull(skb, IEEE80211_HT_CTL_LEN); 78 79 return skb; 80 } 81 82 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 83 unsigned int rtap_space) 84 { 85 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 86 struct ieee80211_hdr *hdr; 87 88 hdr = (void *)(skb->data + rtap_space); 89 90 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 91 RX_FLAG_FAILED_PLCP_CRC | 92 RX_FLAG_ONLY_MONITOR | 93 RX_FLAG_NO_PSDU)) 94 return true; 95 96 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 97 return true; 98 99 if (ieee80211_is_ctl(hdr->frame_control) && 100 !ieee80211_is_pspoll(hdr->frame_control) && 101 !ieee80211_is_back_req(hdr->frame_control)) 102 return true; 103 104 return false; 105 } 106 107 static int 108 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 109 struct ieee80211_rx_status *status, 110 struct sk_buff *skb) 111 { 112 int len; 113 114 /* always present fields */ 115 len = sizeof(struct ieee80211_radiotap_header) + 8; 116 117 /* allocate extra bitmaps */ 118 if (status->chains) 119 len += 4 * hweight8(status->chains); 120 /* vendor presence bitmap */ 121 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) 122 len += 4; 123 124 if (ieee80211_have_rx_timestamp(status)) { 125 len = ALIGN(len, 8); 126 len += 8; 127 } 128 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 129 len += 1; 130 131 /* antenna field, if we don't have per-chain info */ 132 if (!status->chains) 133 len += 1; 134 135 /* padding for RX_FLAGS if necessary */ 136 len = ALIGN(len, 2); 137 138 if (status->encoding == RX_ENC_HT) /* HT info */ 139 len += 3; 140 141 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 142 len = ALIGN(len, 4); 143 len += 8; 144 } 145 146 if (status->encoding == RX_ENC_VHT) { 147 len = ALIGN(len, 2); 148 len += 12; 149 } 150 151 if (local->hw.radiotap_timestamp.units_pos >= 0) { 152 len = ALIGN(len, 8); 153 len += 12; 154 } 155 156 if (status->encoding == RX_ENC_HE && 157 status->flag & RX_FLAG_RADIOTAP_HE) { 158 len = ALIGN(len, 2); 159 len += 12; 160 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 161 } 162 163 if (status->encoding == RX_ENC_HE && 164 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 165 len = ALIGN(len, 2); 166 len += 12; 167 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 168 } 169 170 if (status->flag & RX_FLAG_NO_PSDU) 171 len += 1; 172 173 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 174 len = ALIGN(len, 2); 175 len += 4; 176 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 177 } 178 179 if (status->chains) { 180 /* antenna and antenna signal fields */ 181 len += 2 * hweight8(status->chains); 182 } 183 184 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 185 struct ieee80211_vendor_radiotap *rtap; 186 int vendor_data_offset = 0; 187 188 /* 189 * The position to look at depends on the existence (or non- 190 * existence) of other elements, so take that into account... 191 */ 192 if (status->flag & RX_FLAG_RADIOTAP_HE) 193 vendor_data_offset += 194 sizeof(struct ieee80211_radiotap_he); 195 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 196 vendor_data_offset += 197 sizeof(struct ieee80211_radiotap_he_mu); 198 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 199 vendor_data_offset += 200 sizeof(struct ieee80211_radiotap_lsig); 201 202 rtap = (void *)&skb->data[vendor_data_offset]; 203 204 /* alignment for fixed 6-byte vendor data header */ 205 len = ALIGN(len, 2); 206 /* vendor data header */ 207 len += 6; 208 if (WARN_ON(rtap->align == 0)) 209 rtap->align = 1; 210 len = ALIGN(len, rtap->align); 211 len += rtap->len + rtap->pad; 212 } 213 214 return len; 215 } 216 217 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 218 struct sta_info *sta, 219 struct sk_buff *skb) 220 { 221 skb_queue_tail(&sdata->skb_queue, skb); 222 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 223 if (sta) 224 sta->rx_stats.packets++; 225 } 226 227 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 228 struct sta_info *sta, 229 struct sk_buff *skb) 230 { 231 skb->protocol = 0; 232 __ieee80211_queue_skb_to_iface(sdata, sta, skb); 233 } 234 235 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 236 struct sk_buff *skb, 237 int rtap_space) 238 { 239 struct { 240 struct ieee80211_hdr_3addr hdr; 241 u8 category; 242 u8 action_code; 243 } __packed __aligned(2) action; 244 245 if (!sdata) 246 return; 247 248 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 249 250 if (skb->len < rtap_space + sizeof(action) + 251 VHT_MUMIMO_GROUPS_DATA_LEN) 252 return; 253 254 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 255 return; 256 257 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 258 259 if (!ieee80211_is_action(action.hdr.frame_control)) 260 return; 261 262 if (action.category != WLAN_CATEGORY_VHT) 263 return; 264 265 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 266 return; 267 268 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 269 return; 270 271 skb = skb_copy(skb, GFP_ATOMIC); 272 if (!skb) 273 return; 274 275 ieee80211_queue_skb_to_iface(sdata, NULL, skb); 276 } 277 278 /* 279 * ieee80211_add_rx_radiotap_header - add radiotap header 280 * 281 * add a radiotap header containing all the fields which the hardware provided. 282 */ 283 static void 284 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 285 struct sk_buff *skb, 286 struct ieee80211_rate *rate, 287 int rtap_len, bool has_fcs) 288 { 289 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 290 struct ieee80211_radiotap_header *rthdr; 291 unsigned char *pos; 292 __le32 *it_present; 293 u32 it_present_val; 294 u16 rx_flags = 0; 295 u16 channel_flags = 0; 296 int mpdulen, chain; 297 unsigned long chains = status->chains; 298 struct ieee80211_vendor_radiotap rtap = {}; 299 struct ieee80211_radiotap_he he = {}; 300 struct ieee80211_radiotap_he_mu he_mu = {}; 301 struct ieee80211_radiotap_lsig lsig = {}; 302 303 if (status->flag & RX_FLAG_RADIOTAP_HE) { 304 he = *(struct ieee80211_radiotap_he *)skb->data; 305 skb_pull(skb, sizeof(he)); 306 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 307 } 308 309 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 310 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 311 skb_pull(skb, sizeof(he_mu)); 312 } 313 314 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 315 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 316 skb_pull(skb, sizeof(lsig)); 317 } 318 319 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 320 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 321 /* rtap.len and rtap.pad are undone immediately */ 322 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 323 } 324 325 mpdulen = skb->len; 326 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 327 mpdulen += FCS_LEN; 328 329 rthdr = skb_push(skb, rtap_len); 330 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 331 it_present = &rthdr->it_present; 332 333 /* radiotap header, set always present flags */ 334 rthdr->it_len = cpu_to_le16(rtap_len); 335 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 336 BIT(IEEE80211_RADIOTAP_CHANNEL) | 337 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 338 339 if (!status->chains) 340 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 341 342 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 343 it_present_val |= 344 BIT(IEEE80211_RADIOTAP_EXT) | 345 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 346 put_unaligned_le32(it_present_val, it_present); 347 it_present++; 348 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 349 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 350 } 351 352 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 353 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 354 BIT(IEEE80211_RADIOTAP_EXT); 355 put_unaligned_le32(it_present_val, it_present); 356 it_present++; 357 it_present_val = rtap.present; 358 } 359 360 put_unaligned_le32(it_present_val, it_present); 361 362 pos = (void *)(it_present + 1); 363 364 /* the order of the following fields is important */ 365 366 /* IEEE80211_RADIOTAP_TSFT */ 367 if (ieee80211_have_rx_timestamp(status)) { 368 /* padding */ 369 while ((pos - (u8 *)rthdr) & 7) 370 *pos++ = 0; 371 put_unaligned_le64( 372 ieee80211_calculate_rx_timestamp(local, status, 373 mpdulen, 0), 374 pos); 375 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 376 pos += 8; 377 } 378 379 /* IEEE80211_RADIOTAP_FLAGS */ 380 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 381 *pos |= IEEE80211_RADIOTAP_F_FCS; 382 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 383 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 384 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 385 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 386 pos++; 387 388 /* IEEE80211_RADIOTAP_RATE */ 389 if (!rate || status->encoding != RX_ENC_LEGACY) { 390 /* 391 * Without rate information don't add it. If we have, 392 * MCS information is a separate field in radiotap, 393 * added below. The byte here is needed as padding 394 * for the channel though, so initialise it to 0. 395 */ 396 *pos = 0; 397 } else { 398 int shift = 0; 399 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 400 if (status->bw == RATE_INFO_BW_10) 401 shift = 1; 402 else if (status->bw == RATE_INFO_BW_5) 403 shift = 2; 404 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 405 } 406 pos++; 407 408 /* IEEE80211_RADIOTAP_CHANNEL */ 409 /* TODO: frequency offset in KHz */ 410 put_unaligned_le16(status->freq, pos); 411 pos += 2; 412 if (status->bw == RATE_INFO_BW_10) 413 channel_flags |= IEEE80211_CHAN_HALF; 414 else if (status->bw == RATE_INFO_BW_5) 415 channel_flags |= IEEE80211_CHAN_QUARTER; 416 417 if (status->band == NL80211_BAND_5GHZ || 418 status->band == NL80211_BAND_6GHZ) 419 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 420 else if (status->encoding != RX_ENC_LEGACY) 421 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 422 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 423 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 424 else if (rate) 425 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 426 else 427 channel_flags |= IEEE80211_CHAN_2GHZ; 428 put_unaligned_le16(channel_flags, pos); 429 pos += 2; 430 431 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 432 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 433 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 434 *pos = status->signal; 435 rthdr->it_present |= 436 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 437 pos++; 438 } 439 440 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 441 442 if (!status->chains) { 443 /* IEEE80211_RADIOTAP_ANTENNA */ 444 *pos = status->antenna; 445 pos++; 446 } 447 448 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 449 450 /* IEEE80211_RADIOTAP_RX_FLAGS */ 451 /* ensure 2 byte alignment for the 2 byte field as required */ 452 if ((pos - (u8 *)rthdr) & 1) 453 *pos++ = 0; 454 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 455 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 456 put_unaligned_le16(rx_flags, pos); 457 pos += 2; 458 459 if (status->encoding == RX_ENC_HT) { 460 unsigned int stbc; 461 462 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 463 *pos++ = local->hw.radiotap_mcs_details; 464 *pos = 0; 465 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 466 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 467 if (status->bw == RATE_INFO_BW_40) 468 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 469 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 470 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 471 if (status->enc_flags & RX_ENC_FLAG_LDPC) 472 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 473 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 474 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 475 pos++; 476 *pos++ = status->rate_idx; 477 } 478 479 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 480 u16 flags = 0; 481 482 /* ensure 4 byte alignment */ 483 while ((pos - (u8 *)rthdr) & 3) 484 pos++; 485 rthdr->it_present |= 486 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 487 put_unaligned_le32(status->ampdu_reference, pos); 488 pos += 4; 489 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 490 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 491 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 492 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 493 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 494 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 495 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 496 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 497 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 498 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 499 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 500 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 501 put_unaligned_le16(flags, pos); 502 pos += 2; 503 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 504 *pos++ = status->ampdu_delimiter_crc; 505 else 506 *pos++ = 0; 507 *pos++ = 0; 508 } 509 510 if (status->encoding == RX_ENC_VHT) { 511 u16 known = local->hw.radiotap_vht_details; 512 513 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 514 put_unaligned_le16(known, pos); 515 pos += 2; 516 /* flags */ 517 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 518 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 519 /* in VHT, STBC is binary */ 520 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 521 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 522 if (status->enc_flags & RX_ENC_FLAG_BF) 523 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 524 pos++; 525 /* bandwidth */ 526 switch (status->bw) { 527 case RATE_INFO_BW_80: 528 *pos++ = 4; 529 break; 530 case RATE_INFO_BW_160: 531 *pos++ = 11; 532 break; 533 case RATE_INFO_BW_40: 534 *pos++ = 1; 535 break; 536 default: 537 *pos++ = 0; 538 } 539 /* MCS/NSS */ 540 *pos = (status->rate_idx << 4) | status->nss; 541 pos += 4; 542 /* coding field */ 543 if (status->enc_flags & RX_ENC_FLAG_LDPC) 544 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 545 pos++; 546 /* group ID */ 547 pos++; 548 /* partial_aid */ 549 pos += 2; 550 } 551 552 if (local->hw.radiotap_timestamp.units_pos >= 0) { 553 u16 accuracy = 0; 554 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 555 556 rthdr->it_present |= 557 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 558 559 /* ensure 8 byte alignment */ 560 while ((pos - (u8 *)rthdr) & 7) 561 pos++; 562 563 put_unaligned_le64(status->device_timestamp, pos); 564 pos += sizeof(u64); 565 566 if (local->hw.radiotap_timestamp.accuracy >= 0) { 567 accuracy = local->hw.radiotap_timestamp.accuracy; 568 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 569 } 570 put_unaligned_le16(accuracy, pos); 571 pos += sizeof(u16); 572 573 *pos++ = local->hw.radiotap_timestamp.units_pos; 574 *pos++ = flags; 575 } 576 577 if (status->encoding == RX_ENC_HE && 578 status->flag & RX_FLAG_RADIOTAP_HE) { 579 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 580 581 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 582 he.data6 |= HE_PREP(DATA6_NSTS, 583 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 584 status->enc_flags)); 585 he.data3 |= HE_PREP(DATA3_STBC, 1); 586 } else { 587 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 588 } 589 590 #define CHECK_GI(s) \ 591 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 592 (int)NL80211_RATE_INFO_HE_GI_##s) 593 594 CHECK_GI(0_8); 595 CHECK_GI(1_6); 596 CHECK_GI(3_2); 597 598 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 599 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 600 he.data3 |= HE_PREP(DATA3_CODING, 601 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 602 603 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 604 605 switch (status->bw) { 606 case RATE_INFO_BW_20: 607 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 608 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 609 break; 610 case RATE_INFO_BW_40: 611 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 612 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 613 break; 614 case RATE_INFO_BW_80: 615 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 616 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 617 break; 618 case RATE_INFO_BW_160: 619 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 620 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 621 break; 622 case RATE_INFO_BW_HE_RU: 623 #define CHECK_RU_ALLOC(s) \ 624 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 625 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 626 627 CHECK_RU_ALLOC(26); 628 CHECK_RU_ALLOC(52); 629 CHECK_RU_ALLOC(106); 630 CHECK_RU_ALLOC(242); 631 CHECK_RU_ALLOC(484); 632 CHECK_RU_ALLOC(996); 633 CHECK_RU_ALLOC(2x996); 634 635 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 636 status->he_ru + 4); 637 break; 638 default: 639 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 640 } 641 642 /* ensure 2 byte alignment */ 643 while ((pos - (u8 *)rthdr) & 1) 644 pos++; 645 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 646 memcpy(pos, &he, sizeof(he)); 647 pos += sizeof(he); 648 } 649 650 if (status->encoding == RX_ENC_HE && 651 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 652 /* ensure 2 byte alignment */ 653 while ((pos - (u8 *)rthdr) & 1) 654 pos++; 655 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 656 memcpy(pos, &he_mu, sizeof(he_mu)); 657 pos += sizeof(he_mu); 658 } 659 660 if (status->flag & RX_FLAG_NO_PSDU) { 661 rthdr->it_present |= 662 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 663 *pos++ = status->zero_length_psdu_type; 664 } 665 666 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 667 /* ensure 2 byte alignment */ 668 while ((pos - (u8 *)rthdr) & 1) 669 pos++; 670 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 671 memcpy(pos, &lsig, sizeof(lsig)); 672 pos += sizeof(lsig); 673 } 674 675 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 676 *pos++ = status->chain_signal[chain]; 677 *pos++ = chain; 678 } 679 680 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 681 /* ensure 2 byte alignment for the vendor field as required */ 682 if ((pos - (u8 *)rthdr) & 1) 683 *pos++ = 0; 684 *pos++ = rtap.oui[0]; 685 *pos++ = rtap.oui[1]; 686 *pos++ = rtap.oui[2]; 687 *pos++ = rtap.subns; 688 put_unaligned_le16(rtap.len, pos); 689 pos += 2; 690 /* align the actual payload as requested */ 691 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 692 *pos++ = 0; 693 /* data (and possible padding) already follows */ 694 } 695 } 696 697 static struct sk_buff * 698 ieee80211_make_monitor_skb(struct ieee80211_local *local, 699 struct sk_buff **origskb, 700 struct ieee80211_rate *rate, 701 int rtap_space, bool use_origskb) 702 { 703 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 704 int rt_hdrlen, needed_headroom; 705 struct sk_buff *skb; 706 707 /* room for the radiotap header based on driver features */ 708 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 709 needed_headroom = rt_hdrlen - rtap_space; 710 711 if (use_origskb) { 712 /* only need to expand headroom if necessary */ 713 skb = *origskb; 714 *origskb = NULL; 715 716 /* 717 * This shouldn't trigger often because most devices have an 718 * RX header they pull before we get here, and that should 719 * be big enough for our radiotap information. We should 720 * probably export the length to drivers so that we can have 721 * them allocate enough headroom to start with. 722 */ 723 if (skb_headroom(skb) < needed_headroom && 724 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 725 dev_kfree_skb(skb); 726 return NULL; 727 } 728 } else { 729 /* 730 * Need to make a copy and possibly remove radiotap header 731 * and FCS from the original. 732 */ 733 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 734 735 if (!skb) 736 return NULL; 737 } 738 739 /* prepend radiotap information */ 740 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 741 742 skb_reset_mac_header(skb); 743 skb->ip_summed = CHECKSUM_UNNECESSARY; 744 skb->pkt_type = PACKET_OTHERHOST; 745 skb->protocol = htons(ETH_P_802_2); 746 747 return skb; 748 } 749 750 /* 751 * This function copies a received frame to all monitor interfaces and 752 * returns a cleaned-up SKB that no longer includes the FCS nor the 753 * radiotap header the driver might have added. 754 */ 755 static struct sk_buff * 756 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 757 struct ieee80211_rate *rate) 758 { 759 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 760 struct ieee80211_sub_if_data *sdata; 761 struct sk_buff *monskb = NULL; 762 int present_fcs_len = 0; 763 unsigned int rtap_space = 0; 764 struct ieee80211_sub_if_data *monitor_sdata = 765 rcu_dereference(local->monitor_sdata); 766 bool only_monitor = false; 767 unsigned int min_head_len; 768 769 if (status->flag & RX_FLAG_RADIOTAP_HE) 770 rtap_space += sizeof(struct ieee80211_radiotap_he); 771 772 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 773 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 774 775 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 776 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 777 778 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 779 struct ieee80211_vendor_radiotap *rtap = 780 (void *)(origskb->data + rtap_space); 781 782 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 783 } 784 785 min_head_len = rtap_space; 786 787 /* 788 * First, we may need to make a copy of the skb because 789 * (1) we need to modify it for radiotap (if not present), and 790 * (2) the other RX handlers will modify the skb we got. 791 * 792 * We don't need to, of course, if we aren't going to return 793 * the SKB because it has a bad FCS/PLCP checksum. 794 */ 795 796 if (!(status->flag & RX_FLAG_NO_PSDU)) { 797 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 798 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 799 /* driver bug */ 800 WARN_ON(1); 801 dev_kfree_skb(origskb); 802 return NULL; 803 } 804 present_fcs_len = FCS_LEN; 805 } 806 807 /* also consider the hdr->frame_control */ 808 min_head_len += 2; 809 } 810 811 /* ensure that the expected data elements are in skb head */ 812 if (!pskb_may_pull(origskb, min_head_len)) { 813 dev_kfree_skb(origskb); 814 return NULL; 815 } 816 817 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 818 819 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 820 if (only_monitor) { 821 dev_kfree_skb(origskb); 822 return NULL; 823 } 824 825 return ieee80211_clean_skb(origskb, present_fcs_len, 826 rtap_space); 827 } 828 829 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 830 831 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 832 bool last_monitor = list_is_last(&sdata->u.mntr.list, 833 &local->mon_list); 834 835 if (!monskb) 836 monskb = ieee80211_make_monitor_skb(local, &origskb, 837 rate, rtap_space, 838 only_monitor && 839 last_monitor); 840 841 if (monskb) { 842 struct sk_buff *skb; 843 844 if (last_monitor) { 845 skb = monskb; 846 monskb = NULL; 847 } else { 848 skb = skb_clone(monskb, GFP_ATOMIC); 849 } 850 851 if (skb) { 852 skb->dev = sdata->dev; 853 dev_sw_netstats_rx_add(skb->dev, skb->len); 854 netif_receive_skb(skb); 855 } 856 } 857 858 if (last_monitor) 859 break; 860 } 861 862 /* this happens if last_monitor was erroneously false */ 863 dev_kfree_skb(monskb); 864 865 /* ditto */ 866 if (!origskb) 867 return NULL; 868 869 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 870 } 871 872 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 873 { 874 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 875 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 876 int tid, seqno_idx, security_idx; 877 878 /* does the frame have a qos control field? */ 879 if (ieee80211_is_data_qos(hdr->frame_control)) { 880 u8 *qc = ieee80211_get_qos_ctl(hdr); 881 /* frame has qos control */ 882 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 883 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 884 status->rx_flags |= IEEE80211_RX_AMSDU; 885 886 seqno_idx = tid; 887 security_idx = tid; 888 } else { 889 /* 890 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 891 * 892 * Sequence numbers for management frames, QoS data 893 * frames with a broadcast/multicast address in the 894 * Address 1 field, and all non-QoS data frames sent 895 * by QoS STAs are assigned using an additional single 896 * modulo-4096 counter, [...] 897 * 898 * We also use that counter for non-QoS STAs. 899 */ 900 seqno_idx = IEEE80211_NUM_TIDS; 901 security_idx = 0; 902 if (ieee80211_is_mgmt(hdr->frame_control)) 903 security_idx = IEEE80211_NUM_TIDS; 904 tid = 0; 905 } 906 907 rx->seqno_idx = seqno_idx; 908 rx->security_idx = security_idx; 909 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 910 * For now, set skb->priority to 0 for other cases. */ 911 rx->skb->priority = (tid > 7) ? 0 : tid; 912 } 913 914 /** 915 * DOC: Packet alignment 916 * 917 * Drivers always need to pass packets that are aligned to two-byte boundaries 918 * to the stack. 919 * 920 * Additionally, should, if possible, align the payload data in a way that 921 * guarantees that the contained IP header is aligned to a four-byte 922 * boundary. In the case of regular frames, this simply means aligning the 923 * payload to a four-byte boundary (because either the IP header is directly 924 * contained, or IV/RFC1042 headers that have a length divisible by four are 925 * in front of it). If the payload data is not properly aligned and the 926 * architecture doesn't support efficient unaligned operations, mac80211 927 * will align the data. 928 * 929 * With A-MSDU frames, however, the payload data address must yield two modulo 930 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 931 * push the IP header further back to a multiple of four again. Thankfully, the 932 * specs were sane enough this time around to require padding each A-MSDU 933 * subframe to a length that is a multiple of four. 934 * 935 * Padding like Atheros hardware adds which is between the 802.11 header and 936 * the payload is not supported, the driver is required to move the 802.11 937 * header to be directly in front of the payload in that case. 938 */ 939 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 940 { 941 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 942 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 943 #endif 944 } 945 946 947 /* rx handlers */ 948 949 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 950 { 951 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 952 953 if (is_multicast_ether_addr(hdr->addr1)) 954 return 0; 955 956 return ieee80211_is_robust_mgmt_frame(skb); 957 } 958 959 960 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 961 { 962 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 963 964 if (!is_multicast_ether_addr(hdr->addr1)) 965 return 0; 966 967 return ieee80211_is_robust_mgmt_frame(skb); 968 } 969 970 971 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 972 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 973 { 974 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 975 struct ieee80211_mmie *mmie; 976 struct ieee80211_mmie_16 *mmie16; 977 978 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 979 return -1; 980 981 if (!ieee80211_is_robust_mgmt_frame(skb) && 982 !ieee80211_is_beacon(hdr->frame_control)) 983 return -1; /* not a robust management frame */ 984 985 mmie = (struct ieee80211_mmie *) 986 (skb->data + skb->len - sizeof(*mmie)); 987 if (mmie->element_id == WLAN_EID_MMIE && 988 mmie->length == sizeof(*mmie) - 2) 989 return le16_to_cpu(mmie->key_id); 990 991 mmie16 = (struct ieee80211_mmie_16 *) 992 (skb->data + skb->len - sizeof(*mmie16)); 993 if (skb->len >= 24 + sizeof(*mmie16) && 994 mmie16->element_id == WLAN_EID_MMIE && 995 mmie16->length == sizeof(*mmie16) - 2) 996 return le16_to_cpu(mmie16->key_id); 997 998 return -1; 999 } 1000 1001 static int ieee80211_get_keyid(struct sk_buff *skb, 1002 const struct ieee80211_cipher_scheme *cs) 1003 { 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1005 __le16 fc; 1006 int hdrlen; 1007 int minlen; 1008 u8 key_idx_off; 1009 u8 key_idx_shift; 1010 u8 keyid; 1011 1012 fc = hdr->frame_control; 1013 hdrlen = ieee80211_hdrlen(fc); 1014 1015 if (cs) { 1016 minlen = hdrlen + cs->hdr_len; 1017 key_idx_off = hdrlen + cs->key_idx_off; 1018 key_idx_shift = cs->key_idx_shift; 1019 } else { 1020 /* WEP, TKIP, CCMP and GCMP */ 1021 minlen = hdrlen + IEEE80211_WEP_IV_LEN; 1022 key_idx_off = hdrlen + 3; 1023 key_idx_shift = 6; 1024 } 1025 1026 if (unlikely(skb->len < minlen)) 1027 return -EINVAL; 1028 1029 skb_copy_bits(skb, key_idx_off, &keyid, 1); 1030 1031 if (cs) 1032 keyid &= cs->key_idx_mask; 1033 keyid >>= key_idx_shift; 1034 1035 /* cs could use more than the usual two bits for the keyid */ 1036 if (unlikely(keyid >= NUM_DEFAULT_KEYS)) 1037 return -EINVAL; 1038 1039 return keyid; 1040 } 1041 1042 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1043 { 1044 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1045 char *dev_addr = rx->sdata->vif.addr; 1046 1047 if (ieee80211_is_data(hdr->frame_control)) { 1048 if (is_multicast_ether_addr(hdr->addr1)) { 1049 if (ieee80211_has_tods(hdr->frame_control) || 1050 !ieee80211_has_fromds(hdr->frame_control)) 1051 return RX_DROP_MONITOR; 1052 if (ether_addr_equal(hdr->addr3, dev_addr)) 1053 return RX_DROP_MONITOR; 1054 } else { 1055 if (!ieee80211_has_a4(hdr->frame_control)) 1056 return RX_DROP_MONITOR; 1057 if (ether_addr_equal(hdr->addr4, dev_addr)) 1058 return RX_DROP_MONITOR; 1059 } 1060 } 1061 1062 /* If there is not an established peer link and this is not a peer link 1063 * establisment frame, beacon or probe, drop the frame. 1064 */ 1065 1066 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1067 struct ieee80211_mgmt *mgmt; 1068 1069 if (!ieee80211_is_mgmt(hdr->frame_control)) 1070 return RX_DROP_MONITOR; 1071 1072 if (ieee80211_is_action(hdr->frame_control)) { 1073 u8 category; 1074 1075 /* make sure category field is present */ 1076 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1077 return RX_DROP_MONITOR; 1078 1079 mgmt = (struct ieee80211_mgmt *)hdr; 1080 category = mgmt->u.action.category; 1081 if (category != WLAN_CATEGORY_MESH_ACTION && 1082 category != WLAN_CATEGORY_SELF_PROTECTED) 1083 return RX_DROP_MONITOR; 1084 return RX_CONTINUE; 1085 } 1086 1087 if (ieee80211_is_probe_req(hdr->frame_control) || 1088 ieee80211_is_probe_resp(hdr->frame_control) || 1089 ieee80211_is_beacon(hdr->frame_control) || 1090 ieee80211_is_auth(hdr->frame_control)) 1091 return RX_CONTINUE; 1092 1093 return RX_DROP_MONITOR; 1094 } 1095 1096 return RX_CONTINUE; 1097 } 1098 1099 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1100 int index) 1101 { 1102 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1103 struct sk_buff *tail = skb_peek_tail(frames); 1104 struct ieee80211_rx_status *status; 1105 1106 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1107 return true; 1108 1109 if (!tail) 1110 return false; 1111 1112 status = IEEE80211_SKB_RXCB(tail); 1113 if (status->flag & RX_FLAG_AMSDU_MORE) 1114 return false; 1115 1116 return true; 1117 } 1118 1119 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1120 struct tid_ampdu_rx *tid_agg_rx, 1121 int index, 1122 struct sk_buff_head *frames) 1123 { 1124 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1125 struct sk_buff *skb; 1126 struct ieee80211_rx_status *status; 1127 1128 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1129 1130 if (skb_queue_empty(skb_list)) 1131 goto no_frame; 1132 1133 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1134 __skb_queue_purge(skb_list); 1135 goto no_frame; 1136 } 1137 1138 /* release frames from the reorder ring buffer */ 1139 tid_agg_rx->stored_mpdu_num--; 1140 while ((skb = __skb_dequeue(skb_list))) { 1141 status = IEEE80211_SKB_RXCB(skb); 1142 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1143 __skb_queue_tail(frames, skb); 1144 } 1145 1146 no_frame: 1147 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1148 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1149 } 1150 1151 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1152 struct tid_ampdu_rx *tid_agg_rx, 1153 u16 head_seq_num, 1154 struct sk_buff_head *frames) 1155 { 1156 int index; 1157 1158 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1159 1160 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1161 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1162 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1163 frames); 1164 } 1165 } 1166 1167 /* 1168 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1169 * the skb was added to the buffer longer than this time ago, the earlier 1170 * frames that have not yet been received are assumed to be lost and the skb 1171 * can be released for processing. This may also release other skb's from the 1172 * reorder buffer if there are no additional gaps between the frames. 1173 * 1174 * Callers must hold tid_agg_rx->reorder_lock. 1175 */ 1176 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1177 1178 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1179 struct tid_ampdu_rx *tid_agg_rx, 1180 struct sk_buff_head *frames) 1181 { 1182 int index, i, j; 1183 1184 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1185 1186 /* release the buffer until next missing frame */ 1187 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1188 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1189 tid_agg_rx->stored_mpdu_num) { 1190 /* 1191 * No buffers ready to be released, but check whether any 1192 * frames in the reorder buffer have timed out. 1193 */ 1194 int skipped = 1; 1195 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1196 j = (j + 1) % tid_agg_rx->buf_size) { 1197 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1198 skipped++; 1199 continue; 1200 } 1201 if (skipped && 1202 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1203 HT_RX_REORDER_BUF_TIMEOUT)) 1204 goto set_release_timer; 1205 1206 /* don't leave incomplete A-MSDUs around */ 1207 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1208 i = (i + 1) % tid_agg_rx->buf_size) 1209 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1210 1211 ht_dbg_ratelimited(sdata, 1212 "release an RX reorder frame due to timeout on earlier frames\n"); 1213 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1214 frames); 1215 1216 /* 1217 * Increment the head seq# also for the skipped slots. 1218 */ 1219 tid_agg_rx->head_seq_num = 1220 (tid_agg_rx->head_seq_num + 1221 skipped) & IEEE80211_SN_MASK; 1222 skipped = 0; 1223 } 1224 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1225 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1226 frames); 1227 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1228 } 1229 1230 if (tid_agg_rx->stored_mpdu_num) { 1231 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1232 1233 for (; j != (index - 1) % tid_agg_rx->buf_size; 1234 j = (j + 1) % tid_agg_rx->buf_size) { 1235 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1236 break; 1237 } 1238 1239 set_release_timer: 1240 1241 if (!tid_agg_rx->removed) 1242 mod_timer(&tid_agg_rx->reorder_timer, 1243 tid_agg_rx->reorder_time[j] + 1 + 1244 HT_RX_REORDER_BUF_TIMEOUT); 1245 } else { 1246 del_timer(&tid_agg_rx->reorder_timer); 1247 } 1248 } 1249 1250 /* 1251 * As this function belongs to the RX path it must be under 1252 * rcu_read_lock protection. It returns false if the frame 1253 * can be processed immediately, true if it was consumed. 1254 */ 1255 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1256 struct tid_ampdu_rx *tid_agg_rx, 1257 struct sk_buff *skb, 1258 struct sk_buff_head *frames) 1259 { 1260 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1261 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1262 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1263 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1264 u16 head_seq_num, buf_size; 1265 int index; 1266 bool ret = true; 1267 1268 spin_lock(&tid_agg_rx->reorder_lock); 1269 1270 /* 1271 * Offloaded BA sessions have no known starting sequence number so pick 1272 * one from first Rxed frame for this tid after BA was started. 1273 */ 1274 if (unlikely(tid_agg_rx->auto_seq)) { 1275 tid_agg_rx->auto_seq = false; 1276 tid_agg_rx->ssn = mpdu_seq_num; 1277 tid_agg_rx->head_seq_num = mpdu_seq_num; 1278 } 1279 1280 buf_size = tid_agg_rx->buf_size; 1281 head_seq_num = tid_agg_rx->head_seq_num; 1282 1283 /* 1284 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1285 * be reordered. 1286 */ 1287 if (unlikely(!tid_agg_rx->started)) { 1288 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1289 ret = false; 1290 goto out; 1291 } 1292 tid_agg_rx->started = true; 1293 } 1294 1295 /* frame with out of date sequence number */ 1296 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1297 dev_kfree_skb(skb); 1298 goto out; 1299 } 1300 1301 /* 1302 * If frame the sequence number exceeds our buffering window 1303 * size release some previous frames to make room for this one. 1304 */ 1305 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1306 head_seq_num = ieee80211_sn_inc( 1307 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1308 /* release stored frames up to new head to stack */ 1309 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1310 head_seq_num, frames); 1311 } 1312 1313 /* Now the new frame is always in the range of the reordering buffer */ 1314 1315 index = mpdu_seq_num % tid_agg_rx->buf_size; 1316 1317 /* check if we already stored this frame */ 1318 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1319 dev_kfree_skb(skb); 1320 goto out; 1321 } 1322 1323 /* 1324 * If the current MPDU is in the right order and nothing else 1325 * is stored we can process it directly, no need to buffer it. 1326 * If it is first but there's something stored, we may be able 1327 * to release frames after this one. 1328 */ 1329 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1330 tid_agg_rx->stored_mpdu_num == 0) { 1331 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1332 tid_agg_rx->head_seq_num = 1333 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1334 ret = false; 1335 goto out; 1336 } 1337 1338 /* put the frame in the reordering buffer */ 1339 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1340 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1341 tid_agg_rx->reorder_time[index] = jiffies; 1342 tid_agg_rx->stored_mpdu_num++; 1343 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1344 } 1345 1346 out: 1347 spin_unlock(&tid_agg_rx->reorder_lock); 1348 return ret; 1349 } 1350 1351 /* 1352 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1353 * true if the MPDU was buffered, false if it should be processed. 1354 */ 1355 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1356 struct sk_buff_head *frames) 1357 { 1358 struct sk_buff *skb = rx->skb; 1359 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1360 struct sta_info *sta = rx->sta; 1361 struct tid_ampdu_rx *tid_agg_rx; 1362 u16 sc; 1363 u8 tid, ack_policy; 1364 1365 if (!ieee80211_is_data_qos(hdr->frame_control) || 1366 is_multicast_ether_addr(hdr->addr1)) 1367 goto dont_reorder; 1368 1369 /* 1370 * filter the QoS data rx stream according to 1371 * STA/TID and check if this STA/TID is on aggregation 1372 */ 1373 1374 if (!sta) 1375 goto dont_reorder; 1376 1377 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1378 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1379 tid = ieee80211_get_tid(hdr); 1380 1381 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1382 if (!tid_agg_rx) { 1383 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1384 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1385 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1386 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1387 WLAN_BACK_RECIPIENT, 1388 WLAN_REASON_QSTA_REQUIRE_SETUP); 1389 goto dont_reorder; 1390 } 1391 1392 /* qos null data frames are excluded */ 1393 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1394 goto dont_reorder; 1395 1396 /* not part of a BA session */ 1397 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1398 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1399 goto dont_reorder; 1400 1401 /* new, potentially un-ordered, ampdu frame - process it */ 1402 1403 /* reset session timer */ 1404 if (tid_agg_rx->timeout) 1405 tid_agg_rx->last_rx = jiffies; 1406 1407 /* if this mpdu is fragmented - terminate rx aggregation session */ 1408 sc = le16_to_cpu(hdr->seq_ctrl); 1409 if (sc & IEEE80211_SCTL_FRAG) { 1410 ieee80211_queue_skb_to_iface(rx->sdata, NULL, skb); 1411 return; 1412 } 1413 1414 /* 1415 * No locking needed -- we will only ever process one 1416 * RX packet at a time, and thus own tid_agg_rx. All 1417 * other code manipulating it needs to (and does) make 1418 * sure that we cannot get to it any more before doing 1419 * anything with it. 1420 */ 1421 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1422 frames)) 1423 return; 1424 1425 dont_reorder: 1426 __skb_queue_tail(frames, skb); 1427 } 1428 1429 static ieee80211_rx_result debug_noinline 1430 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1431 { 1432 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1433 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1434 1435 if (status->flag & RX_FLAG_DUP_VALIDATED) 1436 return RX_CONTINUE; 1437 1438 /* 1439 * Drop duplicate 802.11 retransmissions 1440 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1441 */ 1442 1443 if (rx->skb->len < 24) 1444 return RX_CONTINUE; 1445 1446 if (ieee80211_is_ctl(hdr->frame_control) || 1447 ieee80211_is_any_nullfunc(hdr->frame_control) || 1448 is_multicast_ether_addr(hdr->addr1)) 1449 return RX_CONTINUE; 1450 1451 if (!rx->sta) 1452 return RX_CONTINUE; 1453 1454 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1455 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1456 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1457 rx->sta->rx_stats.num_duplicates++; 1458 return RX_DROP_UNUSABLE; 1459 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1460 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1461 } 1462 1463 return RX_CONTINUE; 1464 } 1465 1466 static ieee80211_rx_result debug_noinline 1467 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1468 { 1469 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1470 1471 /* Drop disallowed frame classes based on STA auth/assoc state; 1472 * IEEE 802.11, Chap 5.5. 1473 * 1474 * mac80211 filters only based on association state, i.e. it drops 1475 * Class 3 frames from not associated stations. hostapd sends 1476 * deauth/disassoc frames when needed. In addition, hostapd is 1477 * responsible for filtering on both auth and assoc states. 1478 */ 1479 1480 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1481 return ieee80211_rx_mesh_check(rx); 1482 1483 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1484 ieee80211_is_pspoll(hdr->frame_control)) && 1485 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1486 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1487 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1488 /* 1489 * accept port control frames from the AP even when it's not 1490 * yet marked ASSOC to prevent a race where we don't set the 1491 * assoc bit quickly enough before it sends the first frame 1492 */ 1493 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1494 ieee80211_is_data_present(hdr->frame_control)) { 1495 unsigned int hdrlen; 1496 __be16 ethertype; 1497 1498 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1499 1500 if (rx->skb->len < hdrlen + 8) 1501 return RX_DROP_MONITOR; 1502 1503 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1504 if (ethertype == rx->sdata->control_port_protocol) 1505 return RX_CONTINUE; 1506 } 1507 1508 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1509 cfg80211_rx_spurious_frame(rx->sdata->dev, 1510 hdr->addr2, 1511 GFP_ATOMIC)) 1512 return RX_DROP_UNUSABLE; 1513 1514 return RX_DROP_MONITOR; 1515 } 1516 1517 return RX_CONTINUE; 1518 } 1519 1520 1521 static ieee80211_rx_result debug_noinline 1522 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1523 { 1524 struct ieee80211_local *local; 1525 struct ieee80211_hdr *hdr; 1526 struct sk_buff *skb; 1527 1528 local = rx->local; 1529 skb = rx->skb; 1530 hdr = (struct ieee80211_hdr *) skb->data; 1531 1532 if (!local->pspolling) 1533 return RX_CONTINUE; 1534 1535 if (!ieee80211_has_fromds(hdr->frame_control)) 1536 /* this is not from AP */ 1537 return RX_CONTINUE; 1538 1539 if (!ieee80211_is_data(hdr->frame_control)) 1540 return RX_CONTINUE; 1541 1542 if (!ieee80211_has_moredata(hdr->frame_control)) { 1543 /* AP has no more frames buffered for us */ 1544 local->pspolling = false; 1545 return RX_CONTINUE; 1546 } 1547 1548 /* more data bit is set, let's request a new frame from the AP */ 1549 ieee80211_send_pspoll(local, rx->sdata); 1550 1551 return RX_CONTINUE; 1552 } 1553 1554 static void sta_ps_start(struct sta_info *sta) 1555 { 1556 struct ieee80211_sub_if_data *sdata = sta->sdata; 1557 struct ieee80211_local *local = sdata->local; 1558 struct ps_data *ps; 1559 int tid; 1560 1561 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1562 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1563 ps = &sdata->bss->ps; 1564 else 1565 return; 1566 1567 atomic_inc(&ps->num_sta_ps); 1568 set_sta_flag(sta, WLAN_STA_PS_STA); 1569 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1570 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1571 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1572 sta->sta.addr, sta->sta.aid); 1573 1574 ieee80211_clear_fast_xmit(sta); 1575 1576 if (!sta->sta.txq[0]) 1577 return; 1578 1579 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1580 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1581 1582 ieee80211_unschedule_txq(&local->hw, txq, false); 1583 1584 if (txq_has_queue(txq)) 1585 set_bit(tid, &sta->txq_buffered_tids); 1586 else 1587 clear_bit(tid, &sta->txq_buffered_tids); 1588 } 1589 } 1590 1591 static void sta_ps_end(struct sta_info *sta) 1592 { 1593 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1594 sta->sta.addr, sta->sta.aid); 1595 1596 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1597 /* 1598 * Clear the flag only if the other one is still set 1599 * so that the TX path won't start TX'ing new frames 1600 * directly ... In the case that the driver flag isn't 1601 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1602 */ 1603 clear_sta_flag(sta, WLAN_STA_PS_STA); 1604 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1605 sta->sta.addr, sta->sta.aid); 1606 return; 1607 } 1608 1609 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1610 clear_sta_flag(sta, WLAN_STA_PS_STA); 1611 ieee80211_sta_ps_deliver_wakeup(sta); 1612 } 1613 1614 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1615 { 1616 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1617 bool in_ps; 1618 1619 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1620 1621 /* Don't let the same PS state be set twice */ 1622 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1623 if ((start && in_ps) || (!start && !in_ps)) 1624 return -EINVAL; 1625 1626 if (start) 1627 sta_ps_start(sta); 1628 else 1629 sta_ps_end(sta); 1630 1631 return 0; 1632 } 1633 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1634 1635 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1636 { 1637 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1638 1639 if (test_sta_flag(sta, WLAN_STA_SP)) 1640 return; 1641 1642 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1643 ieee80211_sta_ps_deliver_poll_response(sta); 1644 else 1645 set_sta_flag(sta, WLAN_STA_PSPOLL); 1646 } 1647 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1648 1649 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1650 { 1651 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1652 int ac = ieee80211_ac_from_tid(tid); 1653 1654 /* 1655 * If this AC is not trigger-enabled do nothing unless the 1656 * driver is calling us after it already checked. 1657 * 1658 * NB: This could/should check a separate bitmap of trigger- 1659 * enabled queues, but for now we only implement uAPSD w/o 1660 * TSPEC changes to the ACs, so they're always the same. 1661 */ 1662 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1663 tid != IEEE80211_NUM_TIDS) 1664 return; 1665 1666 /* if we are in a service period, do nothing */ 1667 if (test_sta_flag(sta, WLAN_STA_SP)) 1668 return; 1669 1670 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1671 ieee80211_sta_ps_deliver_uapsd(sta); 1672 else 1673 set_sta_flag(sta, WLAN_STA_UAPSD); 1674 } 1675 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1676 1677 static ieee80211_rx_result debug_noinline 1678 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1679 { 1680 struct ieee80211_sub_if_data *sdata = rx->sdata; 1681 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1682 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1683 1684 if (!rx->sta) 1685 return RX_CONTINUE; 1686 1687 if (sdata->vif.type != NL80211_IFTYPE_AP && 1688 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1689 return RX_CONTINUE; 1690 1691 /* 1692 * The device handles station powersave, so don't do anything about 1693 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1694 * it to mac80211 since they're handled.) 1695 */ 1696 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1697 return RX_CONTINUE; 1698 1699 /* 1700 * Don't do anything if the station isn't already asleep. In 1701 * the uAPSD case, the station will probably be marked asleep, 1702 * in the PS-Poll case the station must be confused ... 1703 */ 1704 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1705 return RX_CONTINUE; 1706 1707 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1708 ieee80211_sta_pspoll(&rx->sta->sta); 1709 1710 /* Free PS Poll skb here instead of returning RX_DROP that would 1711 * count as an dropped frame. */ 1712 dev_kfree_skb(rx->skb); 1713 1714 return RX_QUEUED; 1715 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1716 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1717 ieee80211_has_pm(hdr->frame_control) && 1718 (ieee80211_is_data_qos(hdr->frame_control) || 1719 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1720 u8 tid = ieee80211_get_tid(hdr); 1721 1722 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1723 } 1724 1725 return RX_CONTINUE; 1726 } 1727 1728 static ieee80211_rx_result debug_noinline 1729 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1730 { 1731 struct sta_info *sta = rx->sta; 1732 struct sk_buff *skb = rx->skb; 1733 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1734 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1735 int i; 1736 1737 if (!sta) 1738 return RX_CONTINUE; 1739 1740 /* 1741 * Update last_rx only for IBSS packets which are for the current 1742 * BSSID and for station already AUTHORIZED to avoid keeping the 1743 * current IBSS network alive in cases where other STAs start 1744 * using different BSSID. This will also give the station another 1745 * chance to restart the authentication/authorization in case 1746 * something went wrong the first time. 1747 */ 1748 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1749 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1750 NL80211_IFTYPE_ADHOC); 1751 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1752 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1753 sta->rx_stats.last_rx = jiffies; 1754 if (ieee80211_is_data(hdr->frame_control) && 1755 !is_multicast_ether_addr(hdr->addr1)) 1756 sta->rx_stats.last_rate = 1757 sta_stats_encode_rate(status); 1758 } 1759 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1760 sta->rx_stats.last_rx = jiffies; 1761 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1762 !is_multicast_ether_addr(hdr->addr1)) { 1763 /* 1764 * Mesh beacons will update last_rx when if they are found to 1765 * match the current local configuration when processed. 1766 */ 1767 sta->rx_stats.last_rx = jiffies; 1768 if (ieee80211_is_data(hdr->frame_control)) 1769 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1770 } 1771 1772 sta->rx_stats.fragments++; 1773 1774 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1775 sta->rx_stats.bytes += rx->skb->len; 1776 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1777 1778 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1779 sta->rx_stats.last_signal = status->signal; 1780 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1781 } 1782 1783 if (status->chains) { 1784 sta->rx_stats.chains = status->chains; 1785 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1786 int signal = status->chain_signal[i]; 1787 1788 if (!(status->chains & BIT(i))) 1789 continue; 1790 1791 sta->rx_stats.chain_signal_last[i] = signal; 1792 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1793 -signal); 1794 } 1795 } 1796 1797 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1798 return RX_CONTINUE; 1799 1800 /* 1801 * Change STA power saving mode only at the end of a frame 1802 * exchange sequence, and only for a data or management 1803 * frame as specified in IEEE 802.11-2016 11.2.3.2 1804 */ 1805 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1806 !ieee80211_has_morefrags(hdr->frame_control) && 1807 !is_multicast_ether_addr(hdr->addr1) && 1808 (ieee80211_is_mgmt(hdr->frame_control) || 1809 ieee80211_is_data(hdr->frame_control)) && 1810 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1811 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1812 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1813 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1814 if (!ieee80211_has_pm(hdr->frame_control)) 1815 sta_ps_end(sta); 1816 } else { 1817 if (ieee80211_has_pm(hdr->frame_control)) 1818 sta_ps_start(sta); 1819 } 1820 } 1821 1822 /* mesh power save support */ 1823 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1824 ieee80211_mps_rx_h_sta_process(sta, hdr); 1825 1826 /* 1827 * Drop (qos-)data::nullfunc frames silently, since they 1828 * are used only to control station power saving mode. 1829 */ 1830 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1831 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1832 1833 /* 1834 * If we receive a 4-addr nullfunc frame from a STA 1835 * that was not moved to a 4-addr STA vlan yet send 1836 * the event to userspace and for older hostapd drop 1837 * the frame to the monitor interface. 1838 */ 1839 if (ieee80211_has_a4(hdr->frame_control) && 1840 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1841 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1842 !rx->sdata->u.vlan.sta))) { 1843 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1844 cfg80211_rx_unexpected_4addr_frame( 1845 rx->sdata->dev, sta->sta.addr, 1846 GFP_ATOMIC); 1847 return RX_DROP_MONITOR; 1848 } 1849 /* 1850 * Update counter and free packet here to avoid 1851 * counting this as a dropped packed. 1852 */ 1853 sta->rx_stats.packets++; 1854 dev_kfree_skb(rx->skb); 1855 return RX_QUEUED; 1856 } 1857 1858 return RX_CONTINUE; 1859 } /* ieee80211_rx_h_sta_process */ 1860 1861 static struct ieee80211_key * 1862 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1863 { 1864 struct ieee80211_key *key = NULL; 1865 struct ieee80211_sub_if_data *sdata = rx->sdata; 1866 int idx2; 1867 1868 /* Make sure key gets set if either BIGTK key index is set so that 1869 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1870 * Beacon frames and Beacon frames that claim to use another BIGTK key 1871 * index (i.e., a key that we do not have). 1872 */ 1873 1874 if (idx < 0) { 1875 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1876 idx2 = idx + 1; 1877 } else { 1878 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1879 idx2 = idx + 1; 1880 else 1881 idx2 = idx - 1; 1882 } 1883 1884 if (rx->sta) 1885 key = rcu_dereference(rx->sta->gtk[idx]); 1886 if (!key) 1887 key = rcu_dereference(sdata->keys[idx]); 1888 if (!key && rx->sta) 1889 key = rcu_dereference(rx->sta->gtk[idx2]); 1890 if (!key) 1891 key = rcu_dereference(sdata->keys[idx2]); 1892 1893 return key; 1894 } 1895 1896 static ieee80211_rx_result debug_noinline 1897 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1898 { 1899 struct sk_buff *skb = rx->skb; 1900 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1901 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1902 int keyidx; 1903 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1904 struct ieee80211_key *sta_ptk = NULL; 1905 struct ieee80211_key *ptk_idx = NULL; 1906 int mmie_keyidx = -1; 1907 __le16 fc; 1908 const struct ieee80211_cipher_scheme *cs = NULL; 1909 1910 if (ieee80211_is_ext(hdr->frame_control)) 1911 return RX_CONTINUE; 1912 1913 /* 1914 * Key selection 101 1915 * 1916 * There are five types of keys: 1917 * - GTK (group keys) 1918 * - IGTK (group keys for management frames) 1919 * - BIGTK (group keys for Beacon frames) 1920 * - PTK (pairwise keys) 1921 * - STK (station-to-station pairwise keys) 1922 * 1923 * When selecting a key, we have to distinguish between multicast 1924 * (including broadcast) and unicast frames, the latter can only 1925 * use PTKs and STKs while the former always use GTKs, IGTKs, and 1926 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 1927 * then unicast frames can also use key indices like GTKs. Hence, if we 1928 * don't have a PTK/STK we check the key index for a WEP key. 1929 * 1930 * Note that in a regular BSS, multicast frames are sent by the 1931 * AP only, associated stations unicast the frame to the AP first 1932 * which then multicasts it on their behalf. 1933 * 1934 * There is also a slight problem in IBSS mode: GTKs are negotiated 1935 * with each station, that is something we don't currently handle. 1936 * The spec seems to expect that one negotiates the same key with 1937 * every station but there's no such requirement; VLANs could be 1938 * possible. 1939 */ 1940 1941 /* start without a key */ 1942 rx->key = NULL; 1943 fc = hdr->frame_control; 1944 1945 if (rx->sta) { 1946 int keyid = rx->sta->ptk_idx; 1947 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1948 1949 if (ieee80211_has_protected(fc)) { 1950 cs = rx->sta->cipher_scheme; 1951 keyid = ieee80211_get_keyid(rx->skb, cs); 1952 1953 if (unlikely(keyid < 0)) 1954 return RX_DROP_UNUSABLE; 1955 1956 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 1957 } 1958 } 1959 1960 if (!ieee80211_has_protected(fc)) 1961 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1962 1963 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1964 rx->key = ptk_idx ? ptk_idx : sta_ptk; 1965 if ((status->flag & RX_FLAG_DECRYPTED) && 1966 (status->flag & RX_FLAG_IV_STRIPPED)) 1967 return RX_CONTINUE; 1968 /* Skip decryption if the frame is not protected. */ 1969 if (!ieee80211_has_protected(fc)) 1970 return RX_CONTINUE; 1971 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 1972 /* Broadcast/multicast robust management frame / BIP */ 1973 if ((status->flag & RX_FLAG_DECRYPTED) && 1974 (status->flag & RX_FLAG_IV_STRIPPED)) 1975 return RX_CONTINUE; 1976 1977 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 1978 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 1979 NUM_DEFAULT_BEACON_KEYS) { 1980 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 1981 skb->data, 1982 skb->len); 1983 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1984 } 1985 1986 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 1987 if (!rx->key) 1988 return RX_CONTINUE; /* Beacon protection not in use */ 1989 } else if (mmie_keyidx >= 0) { 1990 /* Broadcast/multicast robust management frame / BIP */ 1991 if ((status->flag & RX_FLAG_DECRYPTED) && 1992 (status->flag & RX_FLAG_IV_STRIPPED)) 1993 return RX_CONTINUE; 1994 1995 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1996 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1997 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1998 if (rx->sta) { 1999 if (ieee80211_is_group_privacy_action(skb) && 2000 test_sta_flag(rx->sta, WLAN_STA_MFP)) 2001 return RX_DROP_MONITOR; 2002 2003 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 2004 } 2005 if (!rx->key) 2006 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 2007 } else if (!ieee80211_has_protected(fc)) { 2008 /* 2009 * The frame was not protected, so skip decryption. However, we 2010 * need to set rx->key if there is a key that could have been 2011 * used so that the frame may be dropped if encryption would 2012 * have been expected. 2013 */ 2014 struct ieee80211_key *key = NULL; 2015 struct ieee80211_sub_if_data *sdata = rx->sdata; 2016 int i; 2017 2018 if (ieee80211_is_beacon(fc)) { 2019 key = ieee80211_rx_get_bigtk(rx, -1); 2020 } else if (ieee80211_is_mgmt(fc) && 2021 is_multicast_ether_addr(hdr->addr1)) { 2022 key = rcu_dereference(rx->sdata->default_mgmt_key); 2023 } else { 2024 if (rx->sta) { 2025 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2026 key = rcu_dereference(rx->sta->gtk[i]); 2027 if (key) 2028 break; 2029 } 2030 } 2031 if (!key) { 2032 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2033 key = rcu_dereference(sdata->keys[i]); 2034 if (key) 2035 break; 2036 } 2037 } 2038 } 2039 if (key) 2040 rx->key = key; 2041 return RX_CONTINUE; 2042 } else { 2043 /* 2044 * The device doesn't give us the IV so we won't be 2045 * able to look up the key. That's ok though, we 2046 * don't need to decrypt the frame, we just won't 2047 * be able to keep statistics accurate. 2048 * Except for key threshold notifications, should 2049 * we somehow allow the driver to tell us which key 2050 * the hardware used if this flag is set? 2051 */ 2052 if ((status->flag & RX_FLAG_DECRYPTED) && 2053 (status->flag & RX_FLAG_IV_STRIPPED)) 2054 return RX_CONTINUE; 2055 2056 keyidx = ieee80211_get_keyid(rx->skb, cs); 2057 2058 if (unlikely(keyidx < 0)) 2059 return RX_DROP_UNUSABLE; 2060 2061 /* check per-station GTK first, if multicast packet */ 2062 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 2063 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 2064 2065 /* if not found, try default key */ 2066 if (!rx->key) { 2067 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2068 2069 /* 2070 * RSNA-protected unicast frames should always be 2071 * sent with pairwise or station-to-station keys, 2072 * but for WEP we allow using a key index as well. 2073 */ 2074 if (rx->key && 2075 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2076 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2077 !is_multicast_ether_addr(hdr->addr1)) 2078 rx->key = NULL; 2079 } 2080 } 2081 2082 if (rx->key) { 2083 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2084 return RX_DROP_MONITOR; 2085 2086 /* TODO: add threshold stuff again */ 2087 } else { 2088 return RX_DROP_MONITOR; 2089 } 2090 2091 switch (rx->key->conf.cipher) { 2092 case WLAN_CIPHER_SUITE_WEP40: 2093 case WLAN_CIPHER_SUITE_WEP104: 2094 result = ieee80211_crypto_wep_decrypt(rx); 2095 break; 2096 case WLAN_CIPHER_SUITE_TKIP: 2097 result = ieee80211_crypto_tkip_decrypt(rx); 2098 break; 2099 case WLAN_CIPHER_SUITE_CCMP: 2100 result = ieee80211_crypto_ccmp_decrypt( 2101 rx, IEEE80211_CCMP_MIC_LEN); 2102 break; 2103 case WLAN_CIPHER_SUITE_CCMP_256: 2104 result = ieee80211_crypto_ccmp_decrypt( 2105 rx, IEEE80211_CCMP_256_MIC_LEN); 2106 break; 2107 case WLAN_CIPHER_SUITE_AES_CMAC: 2108 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2109 break; 2110 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2111 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2112 break; 2113 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2114 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2115 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2116 break; 2117 case WLAN_CIPHER_SUITE_GCMP: 2118 case WLAN_CIPHER_SUITE_GCMP_256: 2119 result = ieee80211_crypto_gcmp_decrypt(rx); 2120 break; 2121 default: 2122 result = ieee80211_crypto_hw_decrypt(rx); 2123 } 2124 2125 /* the hdr variable is invalid after the decrypt handlers */ 2126 2127 /* either the frame has been decrypted or will be dropped */ 2128 status->flag |= RX_FLAG_DECRYPTED; 2129 2130 if (unlikely(ieee80211_is_beacon(fc) && result == RX_DROP_UNUSABLE)) 2131 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2132 skb->data, skb->len); 2133 2134 return result; 2135 } 2136 2137 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) 2138 { 2139 int i; 2140 2141 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2142 skb_queue_head_init(&cache->entries[i].skb_list); 2143 } 2144 2145 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) 2146 { 2147 int i; 2148 2149 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2150 __skb_queue_purge(&cache->entries[i].skb_list); 2151 } 2152 2153 static inline struct ieee80211_fragment_entry * 2154 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, 2155 unsigned int frag, unsigned int seq, int rx_queue, 2156 struct sk_buff **skb) 2157 { 2158 struct ieee80211_fragment_entry *entry; 2159 2160 entry = &cache->entries[cache->next++]; 2161 if (cache->next >= IEEE80211_FRAGMENT_MAX) 2162 cache->next = 0; 2163 2164 __skb_queue_purge(&entry->skb_list); 2165 2166 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2167 *skb = NULL; 2168 entry->first_frag_time = jiffies; 2169 entry->seq = seq; 2170 entry->rx_queue = rx_queue; 2171 entry->last_frag = frag; 2172 entry->check_sequential_pn = false; 2173 entry->extra_len = 0; 2174 2175 return entry; 2176 } 2177 2178 static inline struct ieee80211_fragment_entry * 2179 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, 2180 unsigned int frag, unsigned int seq, 2181 int rx_queue, struct ieee80211_hdr *hdr) 2182 { 2183 struct ieee80211_fragment_entry *entry; 2184 int i, idx; 2185 2186 idx = cache->next; 2187 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2188 struct ieee80211_hdr *f_hdr; 2189 struct sk_buff *f_skb; 2190 2191 idx--; 2192 if (idx < 0) 2193 idx = IEEE80211_FRAGMENT_MAX - 1; 2194 2195 entry = &cache->entries[idx]; 2196 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2197 entry->rx_queue != rx_queue || 2198 entry->last_frag + 1 != frag) 2199 continue; 2200 2201 f_skb = __skb_peek(&entry->skb_list); 2202 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2203 2204 /* 2205 * Check ftype and addresses are equal, else check next fragment 2206 */ 2207 if (((hdr->frame_control ^ f_hdr->frame_control) & 2208 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2209 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2210 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2211 continue; 2212 2213 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2214 __skb_queue_purge(&entry->skb_list); 2215 continue; 2216 } 2217 return entry; 2218 } 2219 2220 return NULL; 2221 } 2222 2223 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) 2224 { 2225 return rx->key && 2226 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2227 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2228 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2229 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2230 ieee80211_has_protected(fc); 2231 } 2232 2233 static ieee80211_rx_result debug_noinline 2234 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2235 { 2236 struct ieee80211_fragment_cache *cache = &rx->sdata->frags; 2237 struct ieee80211_hdr *hdr; 2238 u16 sc; 2239 __le16 fc; 2240 unsigned int frag, seq; 2241 struct ieee80211_fragment_entry *entry; 2242 struct sk_buff *skb; 2243 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2244 2245 hdr = (struct ieee80211_hdr *)rx->skb->data; 2246 fc = hdr->frame_control; 2247 2248 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2249 return RX_CONTINUE; 2250 2251 sc = le16_to_cpu(hdr->seq_ctrl); 2252 frag = sc & IEEE80211_SCTL_FRAG; 2253 2254 if (rx->sta) 2255 cache = &rx->sta->frags; 2256 2257 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2258 goto out; 2259 2260 if (is_multicast_ether_addr(hdr->addr1)) 2261 return RX_DROP_MONITOR; 2262 2263 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2264 2265 if (skb_linearize(rx->skb)) 2266 return RX_DROP_UNUSABLE; 2267 2268 /* 2269 * skb_linearize() might change the skb->data and 2270 * previously cached variables (in this case, hdr) need to 2271 * be refreshed with the new data. 2272 */ 2273 hdr = (struct ieee80211_hdr *)rx->skb->data; 2274 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2275 2276 if (frag == 0) { 2277 /* This is the first fragment of a new frame. */ 2278 entry = ieee80211_reassemble_add(cache, frag, seq, 2279 rx->seqno_idx, &(rx->skb)); 2280 if (requires_sequential_pn(rx, fc)) { 2281 int queue = rx->security_idx; 2282 2283 /* Store CCMP/GCMP PN so that we can verify that the 2284 * next fragment has a sequential PN value. 2285 */ 2286 entry->check_sequential_pn = true; 2287 entry->is_protected = true; 2288 entry->key_color = rx->key->color; 2289 memcpy(entry->last_pn, 2290 rx->key->u.ccmp.rx_pn[queue], 2291 IEEE80211_CCMP_PN_LEN); 2292 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2293 u.ccmp.rx_pn) != 2294 offsetof(struct ieee80211_key, 2295 u.gcmp.rx_pn)); 2296 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2297 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2298 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2299 IEEE80211_GCMP_PN_LEN); 2300 } else if (rx->key && 2301 (ieee80211_has_protected(fc) || 2302 (status->flag & RX_FLAG_DECRYPTED))) { 2303 entry->is_protected = true; 2304 entry->key_color = rx->key->color; 2305 } 2306 return RX_QUEUED; 2307 } 2308 2309 /* This is a fragment for a frame that should already be pending in 2310 * fragment cache. Add this fragment to the end of the pending entry. 2311 */ 2312 entry = ieee80211_reassemble_find(cache, frag, seq, 2313 rx->seqno_idx, hdr); 2314 if (!entry) { 2315 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2316 return RX_DROP_MONITOR; 2317 } 2318 2319 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2320 * MPDU PN values are not incrementing in steps of 1." 2321 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2322 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2323 */ 2324 if (entry->check_sequential_pn) { 2325 int i; 2326 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2327 2328 if (!requires_sequential_pn(rx, fc)) 2329 return RX_DROP_UNUSABLE; 2330 2331 /* Prevent mixed key and fragment cache attacks */ 2332 if (entry->key_color != rx->key->color) 2333 return RX_DROP_UNUSABLE; 2334 2335 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2336 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2337 pn[i]++; 2338 if (pn[i]) 2339 break; 2340 } 2341 2342 rpn = rx->ccm_gcm.pn; 2343 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2344 return RX_DROP_UNUSABLE; 2345 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2346 } else if (entry->is_protected && 2347 (!rx->key || 2348 (!ieee80211_has_protected(fc) && 2349 !(status->flag & RX_FLAG_DECRYPTED)) || 2350 rx->key->color != entry->key_color)) { 2351 /* Drop this as a mixed key or fragment cache attack, even 2352 * if for TKIP Michael MIC should protect us, and WEP is a 2353 * lost cause anyway. 2354 */ 2355 return RX_DROP_UNUSABLE; 2356 } else if (entry->is_protected && rx->key && 2357 entry->key_color != rx->key->color && 2358 (status->flag & RX_FLAG_DECRYPTED)) { 2359 return RX_DROP_UNUSABLE; 2360 } 2361 2362 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2363 __skb_queue_tail(&entry->skb_list, rx->skb); 2364 entry->last_frag = frag; 2365 entry->extra_len += rx->skb->len; 2366 if (ieee80211_has_morefrags(fc)) { 2367 rx->skb = NULL; 2368 return RX_QUEUED; 2369 } 2370 2371 rx->skb = __skb_dequeue(&entry->skb_list); 2372 if (skb_tailroom(rx->skb) < entry->extra_len) { 2373 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2374 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2375 GFP_ATOMIC))) { 2376 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2377 __skb_queue_purge(&entry->skb_list); 2378 return RX_DROP_UNUSABLE; 2379 } 2380 } 2381 while ((skb = __skb_dequeue(&entry->skb_list))) { 2382 skb_put_data(rx->skb, skb->data, skb->len); 2383 dev_kfree_skb(skb); 2384 } 2385 2386 out: 2387 ieee80211_led_rx(rx->local); 2388 if (rx->sta) 2389 rx->sta->rx_stats.packets++; 2390 return RX_CONTINUE; 2391 } 2392 2393 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2394 { 2395 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2396 return -EACCES; 2397 2398 return 0; 2399 } 2400 2401 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2402 { 2403 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 2404 struct sk_buff *skb = rx->skb; 2405 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2406 2407 /* 2408 * Pass through unencrypted frames if the hardware has 2409 * decrypted them already. 2410 */ 2411 if (status->flag & RX_FLAG_DECRYPTED) 2412 return 0; 2413 2414 /* check mesh EAPOL frames first */ 2415 if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) && 2416 ieee80211_is_data(fc))) { 2417 struct ieee80211s_hdr *mesh_hdr; 2418 u16 hdr_len = ieee80211_hdrlen(fc); 2419 u16 ethertype_offset; 2420 __be16 ethertype; 2421 2422 if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr)) 2423 goto drop_check; 2424 2425 /* make sure fixed part of mesh header is there, also checks skb len */ 2426 if (!pskb_may_pull(rx->skb, hdr_len + 6)) 2427 goto drop_check; 2428 2429 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len); 2430 ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) + 2431 sizeof(rfc1042_header); 2432 2433 if (skb_copy_bits(rx->skb, ethertype_offset, ðertype, 2) == 0 && 2434 ethertype == rx->sdata->control_port_protocol) 2435 return 0; 2436 } 2437 2438 drop_check: 2439 /* Drop unencrypted frames if key is set. */ 2440 if (unlikely(!ieee80211_has_protected(fc) && 2441 !ieee80211_is_any_nullfunc(fc) && 2442 ieee80211_is_data(fc) && rx->key)) 2443 return -EACCES; 2444 2445 return 0; 2446 } 2447 2448 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2449 { 2450 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2451 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2452 __le16 fc = hdr->frame_control; 2453 2454 /* 2455 * Pass through unencrypted frames if the hardware has 2456 * decrypted them already. 2457 */ 2458 if (status->flag & RX_FLAG_DECRYPTED) 2459 return 0; 2460 2461 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2462 if (unlikely(!ieee80211_has_protected(fc) && 2463 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2464 rx->key)) { 2465 if (ieee80211_is_deauth(fc) || 2466 ieee80211_is_disassoc(fc)) 2467 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2468 rx->skb->data, 2469 rx->skb->len); 2470 return -EACCES; 2471 } 2472 /* BIP does not use Protected field, so need to check MMIE */ 2473 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2474 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2475 if (ieee80211_is_deauth(fc) || 2476 ieee80211_is_disassoc(fc)) 2477 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2478 rx->skb->data, 2479 rx->skb->len); 2480 return -EACCES; 2481 } 2482 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2483 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2484 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2485 rx->skb->data, 2486 rx->skb->len); 2487 return -EACCES; 2488 } 2489 /* 2490 * When using MFP, Action frames are not allowed prior to 2491 * having configured keys. 2492 */ 2493 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2494 ieee80211_is_robust_mgmt_frame(rx->skb))) 2495 return -EACCES; 2496 } 2497 2498 return 0; 2499 } 2500 2501 static int 2502 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2503 { 2504 struct ieee80211_sub_if_data *sdata = rx->sdata; 2505 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2506 bool check_port_control = false; 2507 struct ethhdr *ehdr; 2508 int ret; 2509 2510 *port_control = false; 2511 if (ieee80211_has_a4(hdr->frame_control) && 2512 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2513 return -1; 2514 2515 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2516 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2517 2518 if (!sdata->u.mgd.use_4addr) 2519 return -1; 2520 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2521 check_port_control = true; 2522 } 2523 2524 if (is_multicast_ether_addr(hdr->addr1) && 2525 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2526 return -1; 2527 2528 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2529 if (ret < 0) 2530 return ret; 2531 2532 ehdr = (struct ethhdr *) rx->skb->data; 2533 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2534 *port_control = true; 2535 else if (check_port_control) 2536 return -1; 2537 2538 return 0; 2539 } 2540 2541 /* 2542 * requires that rx->skb is a frame with ethernet header 2543 */ 2544 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2545 { 2546 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2547 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2548 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2549 2550 /* 2551 * Allow EAPOL frames to us/the PAE group address regardless of 2552 * whether the frame was encrypted or not, and always disallow 2553 * all other destination addresses for them. 2554 */ 2555 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) 2556 return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2557 ether_addr_equal(ehdr->h_dest, pae_group_addr); 2558 2559 if (ieee80211_802_1x_port_control(rx) || 2560 ieee80211_drop_unencrypted(rx, fc)) 2561 return false; 2562 2563 return true; 2564 } 2565 2566 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2567 struct ieee80211_rx_data *rx) 2568 { 2569 struct ieee80211_sub_if_data *sdata = rx->sdata; 2570 struct net_device *dev = sdata->dev; 2571 2572 if (unlikely((skb->protocol == sdata->control_port_protocol || 2573 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2574 !sdata->control_port_no_preauth)) && 2575 sdata->control_port_over_nl80211)) { 2576 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2577 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2578 2579 cfg80211_rx_control_port(dev, skb, noencrypt); 2580 dev_kfree_skb(skb); 2581 } else { 2582 struct ethhdr *ehdr = (void *)skb_mac_header(skb); 2583 2584 memset(skb->cb, 0, sizeof(skb->cb)); 2585 2586 /* 2587 * 802.1X over 802.11 requires that the authenticator address 2588 * be used for EAPOL frames. However, 802.1X allows the use of 2589 * the PAE group address instead. If the interface is part of 2590 * a bridge and we pass the frame with the PAE group address, 2591 * then the bridge will forward it to the network (even if the 2592 * client was not associated yet), which isn't supposed to 2593 * happen. 2594 * To avoid that, rewrite the destination address to our own 2595 * address, so that the authenticator (e.g. hostapd) will see 2596 * the frame, but bridge won't forward it anywhere else. Note 2597 * that due to earlier filtering, the only other address can 2598 * be the PAE group address. 2599 */ 2600 if (unlikely(skb->protocol == sdata->control_port_protocol && 2601 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) 2602 ether_addr_copy(ehdr->h_dest, sdata->vif.addr); 2603 2604 /* deliver to local stack */ 2605 if (rx->list) 2606 list_add_tail(&skb->list, rx->list); 2607 else 2608 netif_receive_skb(skb); 2609 } 2610 } 2611 2612 /* 2613 * requires that rx->skb is a frame with ethernet header 2614 */ 2615 static void 2616 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2617 { 2618 struct ieee80211_sub_if_data *sdata = rx->sdata; 2619 struct net_device *dev = sdata->dev; 2620 struct sk_buff *skb, *xmit_skb; 2621 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2622 struct sta_info *dsta; 2623 2624 skb = rx->skb; 2625 xmit_skb = NULL; 2626 2627 dev_sw_netstats_rx_add(dev, skb->len); 2628 2629 if (rx->sta) { 2630 /* The seqno index has the same property as needed 2631 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2632 * for non-QoS-data frames. Here we know it's a data 2633 * frame, so count MSDUs. 2634 */ 2635 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2636 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2637 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2638 } 2639 2640 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2641 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2642 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2643 ehdr->h_proto != rx->sdata->control_port_protocol && 2644 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2645 if (is_multicast_ether_addr(ehdr->h_dest) && 2646 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2647 /* 2648 * send multicast frames both to higher layers in 2649 * local net stack and back to the wireless medium 2650 */ 2651 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2652 if (!xmit_skb) 2653 net_info_ratelimited("%s: failed to clone multicast frame\n", 2654 dev->name); 2655 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2656 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2657 dsta = sta_info_get(sdata, ehdr->h_dest); 2658 if (dsta) { 2659 /* 2660 * The destination station is associated to 2661 * this AP (in this VLAN), so send the frame 2662 * directly to it and do not pass it to local 2663 * net stack. 2664 */ 2665 xmit_skb = skb; 2666 skb = NULL; 2667 } 2668 } 2669 } 2670 2671 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2672 if (skb) { 2673 /* 'align' will only take the values 0 or 2 here since all 2674 * frames are required to be aligned to 2-byte boundaries 2675 * when being passed to mac80211; the code here works just 2676 * as well if that isn't true, but mac80211 assumes it can 2677 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2678 */ 2679 int align; 2680 2681 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2682 if (align) { 2683 if (WARN_ON(skb_headroom(skb) < 3)) { 2684 dev_kfree_skb(skb); 2685 skb = NULL; 2686 } else { 2687 u8 *data = skb->data; 2688 size_t len = skb_headlen(skb); 2689 skb->data -= align; 2690 memmove(skb->data, data, len); 2691 skb_set_tail_pointer(skb, len); 2692 } 2693 } 2694 } 2695 #endif 2696 2697 if (skb) { 2698 skb->protocol = eth_type_trans(skb, dev); 2699 ieee80211_deliver_skb_to_local_stack(skb, rx); 2700 } 2701 2702 if (xmit_skb) { 2703 /* 2704 * Send to wireless media and increase priority by 256 to 2705 * keep the received priority instead of reclassifying 2706 * the frame (see cfg80211_classify8021d). 2707 */ 2708 xmit_skb->priority += 256; 2709 xmit_skb->protocol = htons(ETH_P_802_3); 2710 skb_reset_network_header(xmit_skb); 2711 skb_reset_mac_header(xmit_skb); 2712 dev_queue_xmit(xmit_skb); 2713 } 2714 } 2715 2716 static ieee80211_rx_result debug_noinline 2717 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2718 { 2719 struct net_device *dev = rx->sdata->dev; 2720 struct sk_buff *skb = rx->skb; 2721 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2722 __le16 fc = hdr->frame_control; 2723 struct sk_buff_head frame_list; 2724 struct ethhdr ethhdr; 2725 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2726 2727 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2728 check_da = NULL; 2729 check_sa = NULL; 2730 } else switch (rx->sdata->vif.type) { 2731 case NL80211_IFTYPE_AP: 2732 case NL80211_IFTYPE_AP_VLAN: 2733 check_da = NULL; 2734 break; 2735 case NL80211_IFTYPE_STATION: 2736 if (!rx->sta || 2737 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2738 check_sa = NULL; 2739 break; 2740 case NL80211_IFTYPE_MESH_POINT: 2741 check_sa = NULL; 2742 break; 2743 default: 2744 break; 2745 } 2746 2747 skb->dev = dev; 2748 __skb_queue_head_init(&frame_list); 2749 2750 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2751 rx->sdata->vif.addr, 2752 rx->sdata->vif.type, 2753 data_offset, true)) 2754 return RX_DROP_UNUSABLE; 2755 2756 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2757 rx->sdata->vif.type, 2758 rx->local->hw.extra_tx_headroom, 2759 check_da, check_sa); 2760 2761 while (!skb_queue_empty(&frame_list)) { 2762 rx->skb = __skb_dequeue(&frame_list); 2763 2764 if (!ieee80211_frame_allowed(rx, fc)) { 2765 dev_kfree_skb(rx->skb); 2766 continue; 2767 } 2768 2769 ieee80211_deliver_skb(rx); 2770 } 2771 2772 return RX_QUEUED; 2773 } 2774 2775 static ieee80211_rx_result debug_noinline 2776 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2777 { 2778 struct sk_buff *skb = rx->skb; 2779 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2781 __le16 fc = hdr->frame_control; 2782 2783 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2784 return RX_CONTINUE; 2785 2786 if (unlikely(!ieee80211_is_data(fc))) 2787 return RX_CONTINUE; 2788 2789 if (unlikely(!ieee80211_is_data_present(fc))) 2790 return RX_DROP_MONITOR; 2791 2792 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2793 switch (rx->sdata->vif.type) { 2794 case NL80211_IFTYPE_AP_VLAN: 2795 if (!rx->sdata->u.vlan.sta) 2796 return RX_DROP_UNUSABLE; 2797 break; 2798 case NL80211_IFTYPE_STATION: 2799 if (!rx->sdata->u.mgd.use_4addr) 2800 return RX_DROP_UNUSABLE; 2801 break; 2802 default: 2803 return RX_DROP_UNUSABLE; 2804 } 2805 } 2806 2807 if (is_multicast_ether_addr(hdr->addr1)) 2808 return RX_DROP_UNUSABLE; 2809 2810 if (rx->key) { 2811 /* 2812 * We should not receive A-MSDUs on pre-HT connections, 2813 * and HT connections cannot use old ciphers. Thus drop 2814 * them, as in those cases we couldn't even have SPP 2815 * A-MSDUs or such. 2816 */ 2817 switch (rx->key->conf.cipher) { 2818 case WLAN_CIPHER_SUITE_WEP40: 2819 case WLAN_CIPHER_SUITE_WEP104: 2820 case WLAN_CIPHER_SUITE_TKIP: 2821 return RX_DROP_UNUSABLE; 2822 default: 2823 break; 2824 } 2825 } 2826 2827 return __ieee80211_rx_h_amsdu(rx, 0); 2828 } 2829 2830 #ifdef CONFIG_MAC80211_MESH 2831 static ieee80211_rx_result 2832 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2833 { 2834 struct ieee80211_hdr *fwd_hdr, *hdr; 2835 struct ieee80211_tx_info *info; 2836 struct ieee80211s_hdr *mesh_hdr; 2837 struct sk_buff *skb = rx->skb, *fwd_skb; 2838 struct ieee80211_local *local = rx->local; 2839 struct ieee80211_sub_if_data *sdata = rx->sdata; 2840 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2841 u16 ac, q, hdrlen; 2842 int tailroom = 0; 2843 2844 hdr = (struct ieee80211_hdr *) skb->data; 2845 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2846 2847 /* make sure fixed part of mesh header is there, also checks skb len */ 2848 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2849 return RX_DROP_MONITOR; 2850 2851 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2852 2853 /* make sure full mesh header is there, also checks skb len */ 2854 if (!pskb_may_pull(rx->skb, 2855 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2856 return RX_DROP_MONITOR; 2857 2858 /* reload pointers */ 2859 hdr = (struct ieee80211_hdr *) skb->data; 2860 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2861 2862 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2863 return RX_DROP_MONITOR; 2864 2865 /* frame is in RMC, don't forward */ 2866 if (ieee80211_is_data(hdr->frame_control) && 2867 is_multicast_ether_addr(hdr->addr1) && 2868 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2869 return RX_DROP_MONITOR; 2870 2871 if (!ieee80211_is_data(hdr->frame_control)) 2872 return RX_CONTINUE; 2873 2874 if (!mesh_hdr->ttl) 2875 return RX_DROP_MONITOR; 2876 2877 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2878 struct mesh_path *mppath; 2879 char *proxied_addr; 2880 char *mpp_addr; 2881 2882 if (is_multicast_ether_addr(hdr->addr1)) { 2883 mpp_addr = hdr->addr3; 2884 proxied_addr = mesh_hdr->eaddr1; 2885 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2886 MESH_FLAGS_AE_A5_A6) { 2887 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2888 mpp_addr = hdr->addr4; 2889 proxied_addr = mesh_hdr->eaddr2; 2890 } else { 2891 return RX_DROP_MONITOR; 2892 } 2893 2894 rcu_read_lock(); 2895 mppath = mpp_path_lookup(sdata, proxied_addr); 2896 if (!mppath) { 2897 mpp_path_add(sdata, proxied_addr, mpp_addr); 2898 } else { 2899 spin_lock_bh(&mppath->state_lock); 2900 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2901 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2902 mppath->exp_time = jiffies; 2903 spin_unlock_bh(&mppath->state_lock); 2904 } 2905 rcu_read_unlock(); 2906 } 2907 2908 /* Frame has reached destination. Don't forward */ 2909 if (!is_multicast_ether_addr(hdr->addr1) && 2910 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2911 return RX_CONTINUE; 2912 2913 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2914 q = sdata->vif.hw_queue[ac]; 2915 if (ieee80211_queue_stopped(&local->hw, q)) { 2916 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2917 return RX_DROP_MONITOR; 2918 } 2919 skb_set_queue_mapping(skb, q); 2920 2921 if (!--mesh_hdr->ttl) { 2922 if (!is_multicast_ether_addr(hdr->addr1)) 2923 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, 2924 dropped_frames_ttl); 2925 goto out; 2926 } 2927 2928 if (!ifmsh->mshcfg.dot11MeshForwarding) 2929 goto out; 2930 2931 if (sdata->crypto_tx_tailroom_needed_cnt) 2932 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2933 2934 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2935 sdata->encrypt_headroom, 2936 tailroom, GFP_ATOMIC); 2937 if (!fwd_skb) 2938 goto out; 2939 2940 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2941 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2942 info = IEEE80211_SKB_CB(fwd_skb); 2943 memset(info, 0, sizeof(*info)); 2944 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 2945 info->control.vif = &rx->sdata->vif; 2946 info->control.jiffies = jiffies; 2947 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2948 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2949 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2950 /* update power mode indication when forwarding */ 2951 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2952 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2953 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2954 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2955 } else { 2956 /* unable to resolve next hop */ 2957 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2958 fwd_hdr->addr3, 0, 2959 WLAN_REASON_MESH_PATH_NOFORWARD, 2960 fwd_hdr->addr2); 2961 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2962 kfree_skb(fwd_skb); 2963 return RX_DROP_MONITOR; 2964 } 2965 2966 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2967 ieee80211_add_pending_skb(local, fwd_skb); 2968 out: 2969 if (is_multicast_ether_addr(hdr->addr1)) 2970 return RX_CONTINUE; 2971 return RX_DROP_MONITOR; 2972 } 2973 #endif 2974 2975 static ieee80211_rx_result debug_noinline 2976 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2977 { 2978 struct ieee80211_sub_if_data *sdata = rx->sdata; 2979 struct ieee80211_local *local = rx->local; 2980 struct net_device *dev = sdata->dev; 2981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2982 __le16 fc = hdr->frame_control; 2983 bool port_control; 2984 int err; 2985 2986 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2987 return RX_CONTINUE; 2988 2989 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2990 return RX_DROP_MONITOR; 2991 2992 /* 2993 * Send unexpected-4addr-frame event to hostapd. For older versions, 2994 * also drop the frame to cooked monitor interfaces. 2995 */ 2996 if (ieee80211_has_a4(hdr->frame_control) && 2997 sdata->vif.type == NL80211_IFTYPE_AP) { 2998 if (rx->sta && 2999 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 3000 cfg80211_rx_unexpected_4addr_frame( 3001 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 3002 return RX_DROP_MONITOR; 3003 } 3004 3005 err = __ieee80211_data_to_8023(rx, &port_control); 3006 if (unlikely(err)) 3007 return RX_DROP_UNUSABLE; 3008 3009 if (!ieee80211_frame_allowed(rx, fc)) 3010 return RX_DROP_MONITOR; 3011 3012 /* directly handle TDLS channel switch requests/responses */ 3013 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 3014 cpu_to_be16(ETH_P_TDLS))) { 3015 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 3016 3017 if (pskb_may_pull(rx->skb, 3018 offsetof(struct ieee80211_tdls_data, u)) && 3019 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 3020 tf->category == WLAN_CATEGORY_TDLS && 3021 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 3022 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 3023 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); 3024 __ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); 3025 return RX_QUEUED; 3026 } 3027 } 3028 3029 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3030 unlikely(port_control) && sdata->bss) { 3031 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 3032 u.ap); 3033 dev = sdata->dev; 3034 rx->sdata = sdata; 3035 } 3036 3037 rx->skb->dev = dev; 3038 3039 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 3040 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 3041 !is_multicast_ether_addr( 3042 ((struct ethhdr *)rx->skb->data)->h_dest) && 3043 (!local->scanning && 3044 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 3045 mod_timer(&local->dynamic_ps_timer, jiffies + 3046 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 3047 3048 ieee80211_deliver_skb(rx); 3049 3050 return RX_QUEUED; 3051 } 3052 3053 static ieee80211_rx_result debug_noinline 3054 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 3055 { 3056 struct sk_buff *skb = rx->skb; 3057 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 3058 struct tid_ampdu_rx *tid_agg_rx; 3059 u16 start_seq_num; 3060 u16 tid; 3061 3062 if (likely(!ieee80211_is_ctl(bar->frame_control))) 3063 return RX_CONTINUE; 3064 3065 if (ieee80211_is_back_req(bar->frame_control)) { 3066 struct { 3067 __le16 control, start_seq_num; 3068 } __packed bar_data; 3069 struct ieee80211_event event = { 3070 .type = BAR_RX_EVENT, 3071 }; 3072 3073 if (!rx->sta) 3074 return RX_DROP_MONITOR; 3075 3076 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 3077 &bar_data, sizeof(bar_data))) 3078 return RX_DROP_MONITOR; 3079 3080 tid = le16_to_cpu(bar_data.control) >> 12; 3081 3082 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3083 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3084 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3085 WLAN_BACK_RECIPIENT, 3086 WLAN_REASON_QSTA_REQUIRE_SETUP); 3087 3088 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3089 if (!tid_agg_rx) 3090 return RX_DROP_MONITOR; 3091 3092 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3093 event.u.ba.tid = tid; 3094 event.u.ba.ssn = start_seq_num; 3095 event.u.ba.sta = &rx->sta->sta; 3096 3097 /* reset session timer */ 3098 if (tid_agg_rx->timeout) 3099 mod_timer(&tid_agg_rx->session_timer, 3100 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3101 3102 spin_lock(&tid_agg_rx->reorder_lock); 3103 /* release stored frames up to start of BAR */ 3104 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3105 start_seq_num, frames); 3106 spin_unlock(&tid_agg_rx->reorder_lock); 3107 3108 drv_event_callback(rx->local, rx->sdata, &event); 3109 3110 kfree_skb(skb); 3111 return RX_QUEUED; 3112 } 3113 3114 /* 3115 * After this point, we only want management frames, 3116 * so we can drop all remaining control frames to 3117 * cooked monitor interfaces. 3118 */ 3119 return RX_DROP_MONITOR; 3120 } 3121 3122 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3123 struct ieee80211_mgmt *mgmt, 3124 size_t len) 3125 { 3126 struct ieee80211_local *local = sdata->local; 3127 struct sk_buff *skb; 3128 struct ieee80211_mgmt *resp; 3129 3130 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3131 /* Not to own unicast address */ 3132 return; 3133 } 3134 3135 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 3136 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 3137 /* Not from the current AP or not associated yet. */ 3138 return; 3139 } 3140 3141 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3142 /* Too short SA Query request frame */ 3143 return; 3144 } 3145 3146 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3147 if (skb == NULL) 3148 return; 3149 3150 skb_reserve(skb, local->hw.extra_tx_headroom); 3151 resp = skb_put_zero(skb, 24); 3152 memcpy(resp->da, mgmt->sa, ETH_ALEN); 3153 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3154 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 3155 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3156 IEEE80211_STYPE_ACTION); 3157 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3158 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3159 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3160 memcpy(resp->u.action.u.sa_query.trans_id, 3161 mgmt->u.action.u.sa_query.trans_id, 3162 WLAN_SA_QUERY_TR_ID_LEN); 3163 3164 ieee80211_tx_skb(sdata, skb); 3165 } 3166 3167 static ieee80211_rx_result debug_noinline 3168 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3169 { 3170 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3171 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3172 3173 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3174 return RX_CONTINUE; 3175 3176 /* 3177 * From here on, look only at management frames. 3178 * Data and control frames are already handled, 3179 * and unknown (reserved) frames are useless. 3180 */ 3181 if (rx->skb->len < 24) 3182 return RX_DROP_MONITOR; 3183 3184 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3185 return RX_DROP_MONITOR; 3186 3187 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3188 ieee80211_is_beacon(mgmt->frame_control) && 3189 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3190 int sig = 0; 3191 3192 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3193 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3194 sig = status->signal; 3195 3196 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3197 rx->skb->data, rx->skb->len, 3198 ieee80211_rx_status_to_khz(status), 3199 sig); 3200 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3201 } 3202 3203 if (ieee80211_drop_unencrypted_mgmt(rx)) 3204 return RX_DROP_UNUSABLE; 3205 3206 return RX_CONTINUE; 3207 } 3208 3209 static ieee80211_rx_result debug_noinline 3210 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3211 { 3212 struct ieee80211_local *local = rx->local; 3213 struct ieee80211_sub_if_data *sdata = rx->sdata; 3214 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3215 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3216 int len = rx->skb->len; 3217 3218 if (!ieee80211_is_action(mgmt->frame_control)) 3219 return RX_CONTINUE; 3220 3221 /* drop too small frames */ 3222 if (len < IEEE80211_MIN_ACTION_SIZE) 3223 return RX_DROP_UNUSABLE; 3224 3225 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3226 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3227 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3228 return RX_DROP_UNUSABLE; 3229 3230 switch (mgmt->u.action.category) { 3231 case WLAN_CATEGORY_HT: 3232 /* reject HT action frames from stations not supporting HT */ 3233 if (!rx->sta->sta.ht_cap.ht_supported) 3234 goto invalid; 3235 3236 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3237 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3238 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3239 sdata->vif.type != NL80211_IFTYPE_AP && 3240 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3241 break; 3242 3243 /* verify action & smps_control/chanwidth are present */ 3244 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3245 goto invalid; 3246 3247 switch (mgmt->u.action.u.ht_smps.action) { 3248 case WLAN_HT_ACTION_SMPS: { 3249 struct ieee80211_supported_band *sband; 3250 enum ieee80211_smps_mode smps_mode; 3251 struct sta_opmode_info sta_opmode = {}; 3252 3253 if (sdata->vif.type != NL80211_IFTYPE_AP && 3254 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3255 goto handled; 3256 3257 /* convert to HT capability */ 3258 switch (mgmt->u.action.u.ht_smps.smps_control) { 3259 case WLAN_HT_SMPS_CONTROL_DISABLED: 3260 smps_mode = IEEE80211_SMPS_OFF; 3261 break; 3262 case WLAN_HT_SMPS_CONTROL_STATIC: 3263 smps_mode = IEEE80211_SMPS_STATIC; 3264 break; 3265 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3266 smps_mode = IEEE80211_SMPS_DYNAMIC; 3267 break; 3268 default: 3269 goto invalid; 3270 } 3271 3272 /* if no change do nothing */ 3273 if (rx->sta->sta.smps_mode == smps_mode) 3274 goto handled; 3275 rx->sta->sta.smps_mode = smps_mode; 3276 sta_opmode.smps_mode = 3277 ieee80211_smps_mode_to_smps_mode(smps_mode); 3278 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3279 3280 sband = rx->local->hw.wiphy->bands[status->band]; 3281 3282 rate_control_rate_update(local, sband, rx->sta, 3283 IEEE80211_RC_SMPS_CHANGED); 3284 cfg80211_sta_opmode_change_notify(sdata->dev, 3285 rx->sta->addr, 3286 &sta_opmode, 3287 GFP_ATOMIC); 3288 goto handled; 3289 } 3290 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3291 struct ieee80211_supported_band *sband; 3292 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3293 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3294 struct sta_opmode_info sta_opmode = {}; 3295 3296 /* If it doesn't support 40 MHz it can't change ... */ 3297 if (!(rx->sta->sta.ht_cap.cap & 3298 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3299 goto handled; 3300 3301 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3302 max_bw = IEEE80211_STA_RX_BW_20; 3303 else 3304 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3305 3306 /* set cur_max_bandwidth and recalc sta bw */ 3307 rx->sta->cur_max_bandwidth = max_bw; 3308 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3309 3310 if (rx->sta->sta.bandwidth == new_bw) 3311 goto handled; 3312 3313 rx->sta->sta.bandwidth = new_bw; 3314 sband = rx->local->hw.wiphy->bands[status->band]; 3315 sta_opmode.bw = 3316 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3317 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3318 3319 rate_control_rate_update(local, sband, rx->sta, 3320 IEEE80211_RC_BW_CHANGED); 3321 cfg80211_sta_opmode_change_notify(sdata->dev, 3322 rx->sta->addr, 3323 &sta_opmode, 3324 GFP_ATOMIC); 3325 goto handled; 3326 } 3327 default: 3328 goto invalid; 3329 } 3330 3331 break; 3332 case WLAN_CATEGORY_PUBLIC: 3333 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3334 goto invalid; 3335 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3336 break; 3337 if (!rx->sta) 3338 break; 3339 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3340 break; 3341 if (mgmt->u.action.u.ext_chan_switch.action_code != 3342 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3343 break; 3344 if (len < offsetof(struct ieee80211_mgmt, 3345 u.action.u.ext_chan_switch.variable)) 3346 goto invalid; 3347 goto queue; 3348 case WLAN_CATEGORY_VHT: 3349 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3350 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3351 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3352 sdata->vif.type != NL80211_IFTYPE_AP && 3353 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3354 break; 3355 3356 /* verify action code is present */ 3357 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3358 goto invalid; 3359 3360 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3361 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3362 /* verify opmode is present */ 3363 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3364 goto invalid; 3365 goto queue; 3366 } 3367 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3368 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3369 goto invalid; 3370 goto queue; 3371 } 3372 default: 3373 break; 3374 } 3375 break; 3376 case WLAN_CATEGORY_BACK: 3377 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3378 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3379 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3380 sdata->vif.type != NL80211_IFTYPE_AP && 3381 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3382 break; 3383 3384 /* verify action_code is present */ 3385 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3386 break; 3387 3388 switch (mgmt->u.action.u.addba_req.action_code) { 3389 case WLAN_ACTION_ADDBA_REQ: 3390 if (len < (IEEE80211_MIN_ACTION_SIZE + 3391 sizeof(mgmt->u.action.u.addba_req))) 3392 goto invalid; 3393 break; 3394 case WLAN_ACTION_ADDBA_RESP: 3395 if (len < (IEEE80211_MIN_ACTION_SIZE + 3396 sizeof(mgmt->u.action.u.addba_resp))) 3397 goto invalid; 3398 break; 3399 case WLAN_ACTION_DELBA: 3400 if (len < (IEEE80211_MIN_ACTION_SIZE + 3401 sizeof(mgmt->u.action.u.delba))) 3402 goto invalid; 3403 break; 3404 default: 3405 goto invalid; 3406 } 3407 3408 goto queue; 3409 case WLAN_CATEGORY_SPECTRUM_MGMT: 3410 /* verify action_code is present */ 3411 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3412 break; 3413 3414 switch (mgmt->u.action.u.measurement.action_code) { 3415 case WLAN_ACTION_SPCT_MSR_REQ: 3416 if (status->band != NL80211_BAND_5GHZ) 3417 break; 3418 3419 if (len < (IEEE80211_MIN_ACTION_SIZE + 3420 sizeof(mgmt->u.action.u.measurement))) 3421 break; 3422 3423 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3424 break; 3425 3426 ieee80211_process_measurement_req(sdata, mgmt, len); 3427 goto handled; 3428 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3429 u8 *bssid; 3430 if (len < (IEEE80211_MIN_ACTION_SIZE + 3431 sizeof(mgmt->u.action.u.chan_switch))) 3432 break; 3433 3434 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3435 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3436 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3437 break; 3438 3439 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3440 bssid = sdata->u.mgd.bssid; 3441 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3442 bssid = sdata->u.ibss.bssid; 3443 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3444 bssid = mgmt->sa; 3445 else 3446 break; 3447 3448 if (!ether_addr_equal(mgmt->bssid, bssid)) 3449 break; 3450 3451 goto queue; 3452 } 3453 } 3454 break; 3455 case WLAN_CATEGORY_SELF_PROTECTED: 3456 if (len < (IEEE80211_MIN_ACTION_SIZE + 3457 sizeof(mgmt->u.action.u.self_prot.action_code))) 3458 break; 3459 3460 switch (mgmt->u.action.u.self_prot.action_code) { 3461 case WLAN_SP_MESH_PEERING_OPEN: 3462 case WLAN_SP_MESH_PEERING_CLOSE: 3463 case WLAN_SP_MESH_PEERING_CONFIRM: 3464 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3465 goto invalid; 3466 if (sdata->u.mesh.user_mpm) 3467 /* userspace handles this frame */ 3468 break; 3469 goto queue; 3470 case WLAN_SP_MGK_INFORM: 3471 case WLAN_SP_MGK_ACK: 3472 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3473 goto invalid; 3474 break; 3475 } 3476 break; 3477 case WLAN_CATEGORY_MESH_ACTION: 3478 if (len < (IEEE80211_MIN_ACTION_SIZE + 3479 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3480 break; 3481 3482 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3483 break; 3484 if (mesh_action_is_path_sel(mgmt) && 3485 !mesh_path_sel_is_hwmp(sdata)) 3486 break; 3487 goto queue; 3488 } 3489 3490 return RX_CONTINUE; 3491 3492 invalid: 3493 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3494 /* will return in the next handlers */ 3495 return RX_CONTINUE; 3496 3497 handled: 3498 if (rx->sta) 3499 rx->sta->rx_stats.packets++; 3500 dev_kfree_skb(rx->skb); 3501 return RX_QUEUED; 3502 3503 queue: 3504 ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); 3505 return RX_QUEUED; 3506 } 3507 3508 static ieee80211_rx_result debug_noinline 3509 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3510 { 3511 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3512 int sig = 0; 3513 3514 /* skip known-bad action frames and return them in the next handler */ 3515 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3516 return RX_CONTINUE; 3517 3518 /* 3519 * Getting here means the kernel doesn't know how to handle 3520 * it, but maybe userspace does ... include returned frames 3521 * so userspace can register for those to know whether ones 3522 * it transmitted were processed or returned. 3523 */ 3524 3525 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3526 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3527 sig = status->signal; 3528 3529 if (cfg80211_rx_mgmt_khz(&rx->sdata->wdev, 3530 ieee80211_rx_status_to_khz(status), sig, 3531 rx->skb->data, rx->skb->len, 0)) { 3532 if (rx->sta) 3533 rx->sta->rx_stats.packets++; 3534 dev_kfree_skb(rx->skb); 3535 return RX_QUEUED; 3536 } 3537 3538 return RX_CONTINUE; 3539 } 3540 3541 static ieee80211_rx_result debug_noinline 3542 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 3543 { 3544 struct ieee80211_sub_if_data *sdata = rx->sdata; 3545 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3546 int len = rx->skb->len; 3547 3548 if (!ieee80211_is_action(mgmt->frame_control)) 3549 return RX_CONTINUE; 3550 3551 switch (mgmt->u.action.category) { 3552 case WLAN_CATEGORY_SA_QUERY: 3553 if (len < (IEEE80211_MIN_ACTION_SIZE + 3554 sizeof(mgmt->u.action.u.sa_query))) 3555 break; 3556 3557 switch (mgmt->u.action.u.sa_query.action) { 3558 case WLAN_ACTION_SA_QUERY_REQUEST: 3559 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3560 break; 3561 ieee80211_process_sa_query_req(sdata, mgmt, len); 3562 goto handled; 3563 } 3564 break; 3565 } 3566 3567 return RX_CONTINUE; 3568 3569 handled: 3570 if (rx->sta) 3571 rx->sta->rx_stats.packets++; 3572 dev_kfree_skb(rx->skb); 3573 return RX_QUEUED; 3574 } 3575 3576 static ieee80211_rx_result debug_noinline 3577 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3578 { 3579 struct ieee80211_local *local = rx->local; 3580 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3581 struct sk_buff *nskb; 3582 struct ieee80211_sub_if_data *sdata = rx->sdata; 3583 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3584 3585 if (!ieee80211_is_action(mgmt->frame_control)) 3586 return RX_CONTINUE; 3587 3588 /* 3589 * For AP mode, hostapd is responsible for handling any action 3590 * frames that we didn't handle, including returning unknown 3591 * ones. For all other modes we will return them to the sender, 3592 * setting the 0x80 bit in the action category, as required by 3593 * 802.11-2012 9.24.4. 3594 * Newer versions of hostapd shall also use the management frame 3595 * registration mechanisms, but older ones still use cooked 3596 * monitor interfaces so push all frames there. 3597 */ 3598 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3599 (sdata->vif.type == NL80211_IFTYPE_AP || 3600 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3601 return RX_DROP_MONITOR; 3602 3603 if (is_multicast_ether_addr(mgmt->da)) 3604 return RX_DROP_MONITOR; 3605 3606 /* do not return rejected action frames */ 3607 if (mgmt->u.action.category & 0x80) 3608 return RX_DROP_UNUSABLE; 3609 3610 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3611 GFP_ATOMIC); 3612 if (nskb) { 3613 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3614 3615 nmgmt->u.action.category |= 0x80; 3616 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3617 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3618 3619 memset(nskb->cb, 0, sizeof(nskb->cb)); 3620 3621 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3622 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3623 3624 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3625 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3626 IEEE80211_TX_CTL_NO_CCK_RATE; 3627 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3628 info->hw_queue = 3629 local->hw.offchannel_tx_hw_queue; 3630 } 3631 3632 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3633 status->band); 3634 } 3635 dev_kfree_skb(rx->skb); 3636 return RX_QUEUED; 3637 } 3638 3639 static ieee80211_rx_result debug_noinline 3640 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 3641 { 3642 struct ieee80211_sub_if_data *sdata = rx->sdata; 3643 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 3644 3645 if (!ieee80211_is_ext(hdr->frame_control)) 3646 return RX_CONTINUE; 3647 3648 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3649 return RX_DROP_MONITOR; 3650 3651 /* for now only beacons are ext, so queue them */ 3652 ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); 3653 3654 return RX_QUEUED; 3655 } 3656 3657 static ieee80211_rx_result debug_noinline 3658 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3659 { 3660 struct ieee80211_sub_if_data *sdata = rx->sdata; 3661 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3662 __le16 stype; 3663 3664 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3665 3666 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3667 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3668 sdata->vif.type != NL80211_IFTYPE_OCB && 3669 sdata->vif.type != NL80211_IFTYPE_STATION) 3670 return RX_DROP_MONITOR; 3671 3672 switch (stype) { 3673 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3674 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3675 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3676 /* process for all: mesh, mlme, ibss */ 3677 break; 3678 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3679 if (is_multicast_ether_addr(mgmt->da) && 3680 !is_broadcast_ether_addr(mgmt->da)) 3681 return RX_DROP_MONITOR; 3682 3683 /* process only for station/IBSS */ 3684 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3685 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3686 return RX_DROP_MONITOR; 3687 break; 3688 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3689 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3690 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3691 if (is_multicast_ether_addr(mgmt->da) && 3692 !is_broadcast_ether_addr(mgmt->da)) 3693 return RX_DROP_MONITOR; 3694 3695 /* process only for station */ 3696 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3697 return RX_DROP_MONITOR; 3698 break; 3699 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3700 /* process only for ibss and mesh */ 3701 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3702 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3703 return RX_DROP_MONITOR; 3704 break; 3705 default: 3706 return RX_DROP_MONITOR; 3707 } 3708 3709 ieee80211_queue_skb_to_iface(sdata, rx->sta, rx->skb); 3710 3711 return RX_QUEUED; 3712 } 3713 3714 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3715 struct ieee80211_rate *rate) 3716 { 3717 struct ieee80211_sub_if_data *sdata; 3718 struct ieee80211_local *local = rx->local; 3719 struct sk_buff *skb = rx->skb, *skb2; 3720 struct net_device *prev_dev = NULL; 3721 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3722 int needed_headroom; 3723 3724 /* 3725 * If cooked monitor has been processed already, then 3726 * don't do it again. If not, set the flag. 3727 */ 3728 if (rx->flags & IEEE80211_RX_CMNTR) 3729 goto out_free_skb; 3730 rx->flags |= IEEE80211_RX_CMNTR; 3731 3732 /* If there are no cooked monitor interfaces, just free the SKB */ 3733 if (!local->cooked_mntrs) 3734 goto out_free_skb; 3735 3736 /* vendor data is long removed here */ 3737 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3738 /* room for the radiotap header based on driver features */ 3739 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3740 3741 if (skb_headroom(skb) < needed_headroom && 3742 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3743 goto out_free_skb; 3744 3745 /* prepend radiotap information */ 3746 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3747 false); 3748 3749 skb_reset_mac_header(skb); 3750 skb->ip_summed = CHECKSUM_UNNECESSARY; 3751 skb->pkt_type = PACKET_OTHERHOST; 3752 skb->protocol = htons(ETH_P_802_2); 3753 3754 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3755 if (!ieee80211_sdata_running(sdata)) 3756 continue; 3757 3758 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3759 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3760 continue; 3761 3762 if (prev_dev) { 3763 skb2 = skb_clone(skb, GFP_ATOMIC); 3764 if (skb2) { 3765 skb2->dev = prev_dev; 3766 netif_receive_skb(skb2); 3767 } 3768 } 3769 3770 prev_dev = sdata->dev; 3771 dev_sw_netstats_rx_add(sdata->dev, skb->len); 3772 } 3773 3774 if (prev_dev) { 3775 skb->dev = prev_dev; 3776 netif_receive_skb(skb); 3777 return; 3778 } 3779 3780 out_free_skb: 3781 dev_kfree_skb(skb); 3782 } 3783 3784 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3785 ieee80211_rx_result res) 3786 { 3787 switch (res) { 3788 case RX_DROP_MONITOR: 3789 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3790 if (rx->sta) 3791 rx->sta->rx_stats.dropped++; 3792 fallthrough; 3793 case RX_CONTINUE: { 3794 struct ieee80211_rate *rate = NULL; 3795 struct ieee80211_supported_band *sband; 3796 struct ieee80211_rx_status *status; 3797 3798 status = IEEE80211_SKB_RXCB((rx->skb)); 3799 3800 sband = rx->local->hw.wiphy->bands[status->band]; 3801 if (status->encoding == RX_ENC_LEGACY) 3802 rate = &sband->bitrates[status->rate_idx]; 3803 3804 ieee80211_rx_cooked_monitor(rx, rate); 3805 break; 3806 } 3807 case RX_DROP_UNUSABLE: 3808 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3809 if (rx->sta) 3810 rx->sta->rx_stats.dropped++; 3811 dev_kfree_skb(rx->skb); 3812 break; 3813 case RX_QUEUED: 3814 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3815 break; 3816 } 3817 } 3818 3819 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3820 struct sk_buff_head *frames) 3821 { 3822 ieee80211_rx_result res = RX_DROP_MONITOR; 3823 struct sk_buff *skb; 3824 3825 #define CALL_RXH(rxh) \ 3826 do { \ 3827 res = rxh(rx); \ 3828 if (res != RX_CONTINUE) \ 3829 goto rxh_next; \ 3830 } while (0) 3831 3832 /* Lock here to avoid hitting all of the data used in the RX 3833 * path (e.g. key data, station data, ...) concurrently when 3834 * a frame is released from the reorder buffer due to timeout 3835 * from the timer, potentially concurrently with RX from the 3836 * driver. 3837 */ 3838 spin_lock_bh(&rx->local->rx_path_lock); 3839 3840 while ((skb = __skb_dequeue(frames))) { 3841 /* 3842 * all the other fields are valid across frames 3843 * that belong to an aMPDU since they are on the 3844 * same TID from the same station 3845 */ 3846 rx->skb = skb; 3847 3848 CALL_RXH(ieee80211_rx_h_check_more_data); 3849 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3850 CALL_RXH(ieee80211_rx_h_sta_process); 3851 CALL_RXH(ieee80211_rx_h_decrypt); 3852 CALL_RXH(ieee80211_rx_h_defragment); 3853 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3854 /* must be after MMIC verify so header is counted in MPDU mic */ 3855 #ifdef CONFIG_MAC80211_MESH 3856 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3857 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3858 #endif 3859 CALL_RXH(ieee80211_rx_h_amsdu); 3860 CALL_RXH(ieee80211_rx_h_data); 3861 3862 /* special treatment -- needs the queue */ 3863 res = ieee80211_rx_h_ctrl(rx, frames); 3864 if (res != RX_CONTINUE) 3865 goto rxh_next; 3866 3867 CALL_RXH(ieee80211_rx_h_mgmt_check); 3868 CALL_RXH(ieee80211_rx_h_action); 3869 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3870 CALL_RXH(ieee80211_rx_h_action_post_userspace); 3871 CALL_RXH(ieee80211_rx_h_action_return); 3872 CALL_RXH(ieee80211_rx_h_ext); 3873 CALL_RXH(ieee80211_rx_h_mgmt); 3874 3875 rxh_next: 3876 ieee80211_rx_handlers_result(rx, res); 3877 3878 #undef CALL_RXH 3879 } 3880 3881 spin_unlock_bh(&rx->local->rx_path_lock); 3882 } 3883 3884 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3885 { 3886 struct sk_buff_head reorder_release; 3887 ieee80211_rx_result res = RX_DROP_MONITOR; 3888 3889 __skb_queue_head_init(&reorder_release); 3890 3891 #define CALL_RXH(rxh) \ 3892 do { \ 3893 res = rxh(rx); \ 3894 if (res != RX_CONTINUE) \ 3895 goto rxh_next; \ 3896 } while (0) 3897 3898 CALL_RXH(ieee80211_rx_h_check_dup); 3899 CALL_RXH(ieee80211_rx_h_check); 3900 3901 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3902 3903 ieee80211_rx_handlers(rx, &reorder_release); 3904 return; 3905 3906 rxh_next: 3907 ieee80211_rx_handlers_result(rx, res); 3908 3909 #undef CALL_RXH 3910 } 3911 3912 /* 3913 * This function makes calls into the RX path, therefore 3914 * it has to be invoked under RCU read lock. 3915 */ 3916 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3917 { 3918 struct sk_buff_head frames; 3919 struct ieee80211_rx_data rx = { 3920 .sta = sta, 3921 .sdata = sta->sdata, 3922 .local = sta->local, 3923 /* This is OK -- must be QoS data frame */ 3924 .security_idx = tid, 3925 .seqno_idx = tid, 3926 }; 3927 struct tid_ampdu_rx *tid_agg_rx; 3928 3929 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3930 if (!tid_agg_rx) 3931 return; 3932 3933 __skb_queue_head_init(&frames); 3934 3935 spin_lock(&tid_agg_rx->reorder_lock); 3936 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3937 spin_unlock(&tid_agg_rx->reorder_lock); 3938 3939 if (!skb_queue_empty(&frames)) { 3940 struct ieee80211_event event = { 3941 .type = BA_FRAME_TIMEOUT, 3942 .u.ba.tid = tid, 3943 .u.ba.sta = &sta->sta, 3944 }; 3945 drv_event_callback(rx.local, rx.sdata, &event); 3946 } 3947 3948 ieee80211_rx_handlers(&rx, &frames); 3949 } 3950 3951 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3952 u16 ssn, u64 filtered, 3953 u16 received_mpdus) 3954 { 3955 struct sta_info *sta; 3956 struct tid_ampdu_rx *tid_agg_rx; 3957 struct sk_buff_head frames; 3958 struct ieee80211_rx_data rx = { 3959 /* This is OK -- must be QoS data frame */ 3960 .security_idx = tid, 3961 .seqno_idx = tid, 3962 }; 3963 int i, diff; 3964 3965 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3966 return; 3967 3968 __skb_queue_head_init(&frames); 3969 3970 sta = container_of(pubsta, struct sta_info, sta); 3971 3972 rx.sta = sta; 3973 rx.sdata = sta->sdata; 3974 rx.local = sta->local; 3975 3976 rcu_read_lock(); 3977 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3978 if (!tid_agg_rx) 3979 goto out; 3980 3981 spin_lock_bh(&tid_agg_rx->reorder_lock); 3982 3983 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3984 int release; 3985 3986 /* release all frames in the reorder buffer */ 3987 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3988 IEEE80211_SN_MODULO; 3989 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3990 release, &frames); 3991 /* update ssn to match received ssn */ 3992 tid_agg_rx->head_seq_num = ssn; 3993 } else { 3994 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3995 &frames); 3996 } 3997 3998 /* handle the case that received ssn is behind the mac ssn. 3999 * it can be tid_agg_rx->buf_size behind and still be valid */ 4000 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 4001 if (diff >= tid_agg_rx->buf_size) { 4002 tid_agg_rx->reorder_buf_filtered = 0; 4003 goto release; 4004 } 4005 filtered = filtered >> diff; 4006 ssn += diff; 4007 4008 /* update bitmap */ 4009 for (i = 0; i < tid_agg_rx->buf_size; i++) { 4010 int index = (ssn + i) % tid_agg_rx->buf_size; 4011 4012 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 4013 if (filtered & BIT_ULL(i)) 4014 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 4015 } 4016 4017 /* now process also frames that the filter marking released */ 4018 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4019 4020 release: 4021 spin_unlock_bh(&tid_agg_rx->reorder_lock); 4022 4023 ieee80211_rx_handlers(&rx, &frames); 4024 4025 out: 4026 rcu_read_unlock(); 4027 } 4028 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 4029 4030 /* main receive path */ 4031 4032 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 4033 { 4034 struct ieee80211_sub_if_data *sdata = rx->sdata; 4035 struct sk_buff *skb = rx->skb; 4036 struct ieee80211_hdr *hdr = (void *)skb->data; 4037 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4038 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 4039 bool multicast = is_multicast_ether_addr(hdr->addr1) || 4040 ieee80211_is_s1g_beacon(hdr->frame_control); 4041 4042 switch (sdata->vif.type) { 4043 case NL80211_IFTYPE_STATION: 4044 if (!bssid && !sdata->u.mgd.use_4addr) 4045 return false; 4046 if (ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 4047 return false; 4048 if (multicast) 4049 return true; 4050 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4051 case NL80211_IFTYPE_ADHOC: 4052 if (!bssid) 4053 return false; 4054 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4055 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 4056 return false; 4057 if (ieee80211_is_beacon(hdr->frame_control)) 4058 return true; 4059 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 4060 return false; 4061 if (!multicast && 4062 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4063 return false; 4064 if (!rx->sta) { 4065 int rate_idx; 4066 if (status->encoding != RX_ENC_LEGACY) 4067 rate_idx = 0; /* TODO: HT/VHT rates */ 4068 else 4069 rate_idx = status->rate_idx; 4070 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4071 BIT(rate_idx)); 4072 } 4073 return true; 4074 case NL80211_IFTYPE_OCB: 4075 if (!bssid) 4076 return false; 4077 if (!ieee80211_is_data_present(hdr->frame_control)) 4078 return false; 4079 if (!is_broadcast_ether_addr(bssid)) 4080 return false; 4081 if (!multicast && 4082 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4083 return false; 4084 if (!rx->sta) { 4085 int rate_idx; 4086 if (status->encoding != RX_ENC_LEGACY) 4087 rate_idx = 0; /* TODO: HT rates */ 4088 else 4089 rate_idx = status->rate_idx; 4090 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4091 BIT(rate_idx)); 4092 } 4093 return true; 4094 case NL80211_IFTYPE_MESH_POINT: 4095 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4096 return false; 4097 if (multicast) 4098 return true; 4099 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4100 case NL80211_IFTYPE_AP_VLAN: 4101 case NL80211_IFTYPE_AP: 4102 if (!bssid) 4103 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4104 4105 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 4106 /* 4107 * Accept public action frames even when the 4108 * BSSID doesn't match, this is used for P2P 4109 * and location updates. Note that mac80211 4110 * itself never looks at these frames. 4111 */ 4112 if (!multicast && 4113 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4114 return false; 4115 if (ieee80211_is_public_action(hdr, skb->len)) 4116 return true; 4117 return ieee80211_is_beacon(hdr->frame_control); 4118 } 4119 4120 if (!ieee80211_has_tods(hdr->frame_control)) { 4121 /* ignore data frames to TDLS-peers */ 4122 if (ieee80211_is_data(hdr->frame_control)) 4123 return false; 4124 /* ignore action frames to TDLS-peers */ 4125 if (ieee80211_is_action(hdr->frame_control) && 4126 !is_broadcast_ether_addr(bssid) && 4127 !ether_addr_equal(bssid, hdr->addr1)) 4128 return false; 4129 } 4130 4131 /* 4132 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4133 * the BSSID - we've checked that already but may have accepted 4134 * the wildcard (ff:ff:ff:ff:ff:ff). 4135 * 4136 * It also says: 4137 * The BSSID of the Data frame is determined as follows: 4138 * a) If the STA is contained within an AP or is associated 4139 * with an AP, the BSSID is the address currently in use 4140 * by the STA contained in the AP. 4141 * 4142 * So we should not accept data frames with an address that's 4143 * multicast. 4144 * 4145 * Accepting it also opens a security problem because stations 4146 * could encrypt it with the GTK and inject traffic that way. 4147 */ 4148 if (ieee80211_is_data(hdr->frame_control) && multicast) 4149 return false; 4150 4151 return true; 4152 case NL80211_IFTYPE_P2P_DEVICE: 4153 return ieee80211_is_public_action(hdr, skb->len) || 4154 ieee80211_is_probe_req(hdr->frame_control) || 4155 ieee80211_is_probe_resp(hdr->frame_control) || 4156 ieee80211_is_beacon(hdr->frame_control); 4157 case NL80211_IFTYPE_NAN: 4158 /* Currently no frames on NAN interface are allowed */ 4159 return false; 4160 default: 4161 break; 4162 } 4163 4164 WARN_ON_ONCE(1); 4165 return false; 4166 } 4167 4168 void ieee80211_check_fast_rx(struct sta_info *sta) 4169 { 4170 struct ieee80211_sub_if_data *sdata = sta->sdata; 4171 struct ieee80211_local *local = sdata->local; 4172 struct ieee80211_key *key; 4173 struct ieee80211_fast_rx fastrx = { 4174 .dev = sdata->dev, 4175 .vif_type = sdata->vif.type, 4176 .control_port_protocol = sdata->control_port_protocol, 4177 }, *old, *new = NULL; 4178 bool set_offload = false; 4179 bool assign = false; 4180 bool offload; 4181 4182 /* use sparse to check that we don't return without updating */ 4183 __acquire(check_fast_rx); 4184 4185 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4186 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4187 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4188 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4189 4190 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4191 4192 /* fast-rx doesn't do reordering */ 4193 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4194 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4195 goto clear; 4196 4197 switch (sdata->vif.type) { 4198 case NL80211_IFTYPE_STATION: 4199 if (sta->sta.tdls) { 4200 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4201 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4202 fastrx.expected_ds_bits = 0; 4203 } else { 4204 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4205 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4206 fastrx.expected_ds_bits = 4207 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4208 } 4209 4210 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4211 fastrx.expected_ds_bits |= 4212 cpu_to_le16(IEEE80211_FCTL_TODS); 4213 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4214 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4215 } 4216 4217 if (!sdata->u.mgd.powersave) 4218 break; 4219 4220 /* software powersave is a huge mess, avoid all of it */ 4221 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4222 goto clear; 4223 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4224 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4225 goto clear; 4226 break; 4227 case NL80211_IFTYPE_AP_VLAN: 4228 case NL80211_IFTYPE_AP: 4229 /* parallel-rx requires this, at least with calls to 4230 * ieee80211_sta_ps_transition() 4231 */ 4232 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4233 goto clear; 4234 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4235 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4236 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4237 4238 fastrx.internal_forward = 4239 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4240 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4241 !sdata->u.vlan.sta); 4242 4243 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4244 sdata->u.vlan.sta) { 4245 fastrx.expected_ds_bits |= 4246 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4247 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4248 fastrx.internal_forward = 0; 4249 } 4250 4251 break; 4252 default: 4253 goto clear; 4254 } 4255 4256 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4257 goto clear; 4258 4259 rcu_read_lock(); 4260 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4261 if (!key) 4262 key = rcu_dereference(sdata->default_unicast_key); 4263 if (key) { 4264 switch (key->conf.cipher) { 4265 case WLAN_CIPHER_SUITE_TKIP: 4266 /* we don't want to deal with MMIC in fast-rx */ 4267 goto clear_rcu; 4268 case WLAN_CIPHER_SUITE_CCMP: 4269 case WLAN_CIPHER_SUITE_CCMP_256: 4270 case WLAN_CIPHER_SUITE_GCMP: 4271 case WLAN_CIPHER_SUITE_GCMP_256: 4272 break; 4273 default: 4274 /* We also don't want to deal with 4275 * WEP or cipher scheme. 4276 */ 4277 goto clear_rcu; 4278 } 4279 4280 fastrx.key = true; 4281 fastrx.icv_len = key->conf.icv_len; 4282 } 4283 4284 assign = true; 4285 clear_rcu: 4286 rcu_read_unlock(); 4287 clear: 4288 __release(check_fast_rx); 4289 4290 if (assign) 4291 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4292 4293 offload = assign && 4294 (sdata->vif.offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED); 4295 4296 if (offload) 4297 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4298 else 4299 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4300 4301 if (set_offload) 4302 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); 4303 4304 spin_lock_bh(&sta->lock); 4305 old = rcu_dereference_protected(sta->fast_rx, true); 4306 rcu_assign_pointer(sta->fast_rx, new); 4307 spin_unlock_bh(&sta->lock); 4308 4309 if (old) 4310 kfree_rcu(old, rcu_head); 4311 } 4312 4313 void ieee80211_clear_fast_rx(struct sta_info *sta) 4314 { 4315 struct ieee80211_fast_rx *old; 4316 4317 spin_lock_bh(&sta->lock); 4318 old = rcu_dereference_protected(sta->fast_rx, true); 4319 RCU_INIT_POINTER(sta->fast_rx, NULL); 4320 spin_unlock_bh(&sta->lock); 4321 4322 if (old) 4323 kfree_rcu(old, rcu_head); 4324 } 4325 4326 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4327 { 4328 struct ieee80211_local *local = sdata->local; 4329 struct sta_info *sta; 4330 4331 lockdep_assert_held(&local->sta_mtx); 4332 4333 list_for_each_entry(sta, &local->sta_list, list) { 4334 if (sdata != sta->sdata && 4335 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4336 continue; 4337 ieee80211_check_fast_rx(sta); 4338 } 4339 } 4340 4341 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4342 { 4343 struct ieee80211_local *local = sdata->local; 4344 4345 mutex_lock(&local->sta_mtx); 4346 __ieee80211_check_fast_rx_iface(sdata); 4347 mutex_unlock(&local->sta_mtx); 4348 } 4349 4350 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4351 struct ieee80211_fast_rx *fast_rx, 4352 int orig_len) 4353 { 4354 struct ieee80211_sta_rx_stats *stats; 4355 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4356 struct sta_info *sta = rx->sta; 4357 struct sk_buff *skb = rx->skb; 4358 void *sa = skb->data + ETH_ALEN; 4359 void *da = skb->data; 4360 4361 stats = &sta->rx_stats; 4362 if (fast_rx->uses_rss) 4363 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4364 4365 /* statistics part of ieee80211_rx_h_sta_process() */ 4366 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4367 stats->last_signal = status->signal; 4368 if (!fast_rx->uses_rss) 4369 ewma_signal_add(&sta->rx_stats_avg.signal, 4370 -status->signal); 4371 } 4372 4373 if (status->chains) { 4374 int i; 4375 4376 stats->chains = status->chains; 4377 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4378 int signal = status->chain_signal[i]; 4379 4380 if (!(status->chains & BIT(i))) 4381 continue; 4382 4383 stats->chain_signal_last[i] = signal; 4384 if (!fast_rx->uses_rss) 4385 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4386 -signal); 4387 } 4388 } 4389 /* end of statistics */ 4390 4391 stats->last_rx = jiffies; 4392 stats->last_rate = sta_stats_encode_rate(status); 4393 4394 stats->fragments++; 4395 stats->packets++; 4396 4397 skb->dev = fast_rx->dev; 4398 4399 dev_sw_netstats_rx_add(fast_rx->dev, skb->len); 4400 4401 /* The seqno index has the same property as needed 4402 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4403 * for non-QoS-data frames. Here we know it's a data 4404 * frame, so count MSDUs. 4405 */ 4406 u64_stats_update_begin(&stats->syncp); 4407 stats->msdu[rx->seqno_idx]++; 4408 stats->bytes += orig_len; 4409 u64_stats_update_end(&stats->syncp); 4410 4411 if (fast_rx->internal_forward) { 4412 struct sk_buff *xmit_skb = NULL; 4413 if (is_multicast_ether_addr(da)) { 4414 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4415 } else if (!ether_addr_equal(da, sa) && 4416 sta_info_get(rx->sdata, da)) { 4417 xmit_skb = skb; 4418 skb = NULL; 4419 } 4420 4421 if (xmit_skb) { 4422 /* 4423 * Send to wireless media and increase priority by 256 4424 * to keep the received priority instead of 4425 * reclassifying the frame (see cfg80211_classify8021d). 4426 */ 4427 xmit_skb->priority += 256; 4428 xmit_skb->protocol = htons(ETH_P_802_3); 4429 skb_reset_network_header(xmit_skb); 4430 skb_reset_mac_header(xmit_skb); 4431 dev_queue_xmit(xmit_skb); 4432 } 4433 4434 if (!skb) 4435 return; 4436 } 4437 4438 /* deliver to local stack */ 4439 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4440 memset(skb->cb, 0, sizeof(skb->cb)); 4441 if (rx->list) 4442 list_add_tail(&skb->list, rx->list); 4443 else 4444 netif_receive_skb(skb); 4445 4446 } 4447 4448 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4449 struct ieee80211_fast_rx *fast_rx) 4450 { 4451 struct sk_buff *skb = rx->skb; 4452 struct ieee80211_hdr *hdr = (void *)skb->data; 4453 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4454 struct sta_info *sta = rx->sta; 4455 int orig_len = skb->len; 4456 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4457 int snap_offs = hdrlen; 4458 struct { 4459 u8 snap[sizeof(rfc1042_header)]; 4460 __be16 proto; 4461 } *payload __aligned(2); 4462 struct { 4463 u8 da[ETH_ALEN]; 4464 u8 sa[ETH_ALEN]; 4465 } addrs __aligned(2); 4466 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4467 4468 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4469 * to a common data structure; drivers can implement that per queue 4470 * but we don't have that information in mac80211 4471 */ 4472 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4473 return false; 4474 4475 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4476 4477 /* If using encryption, we also need to have: 4478 * - PN_VALIDATED: similar, but the implementation is tricky 4479 * - DECRYPTED: necessary for PN_VALIDATED 4480 */ 4481 if (fast_rx->key && 4482 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4483 return false; 4484 4485 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4486 return false; 4487 4488 if (unlikely(ieee80211_is_frag(hdr))) 4489 return false; 4490 4491 /* Since our interface address cannot be multicast, this 4492 * implicitly also rejects multicast frames without the 4493 * explicit check. 4494 * 4495 * We shouldn't get any *data* frames not addressed to us 4496 * (AP mode will accept multicast *management* frames), but 4497 * punting here will make it go through the full checks in 4498 * ieee80211_accept_frame(). 4499 */ 4500 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4501 return false; 4502 4503 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4504 IEEE80211_FCTL_TODS)) != 4505 fast_rx->expected_ds_bits) 4506 return false; 4507 4508 /* assign the key to drop unencrypted frames (later) 4509 * and strip the IV/MIC if necessary 4510 */ 4511 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4512 /* GCMP header length is the same */ 4513 snap_offs += IEEE80211_CCMP_HDR_LEN; 4514 } 4515 4516 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4517 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4518 goto drop; 4519 4520 payload = (void *)(skb->data + snap_offs); 4521 4522 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4523 return false; 4524 4525 /* Don't handle these here since they require special code. 4526 * Accept AARP and IPX even though they should come with a 4527 * bridge-tunnel header - but if we get them this way then 4528 * there's little point in discarding them. 4529 */ 4530 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4531 payload->proto == fast_rx->control_port_protocol)) 4532 return false; 4533 } 4534 4535 /* after this point, don't punt to the slowpath! */ 4536 4537 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4538 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4539 goto drop; 4540 4541 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4542 goto drop; 4543 4544 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4545 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4546 RX_QUEUED) 4547 goto drop; 4548 4549 return true; 4550 } 4551 4552 /* do the header conversion - first grab the addresses */ 4553 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4554 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4555 /* remove the SNAP but leave the ethertype */ 4556 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4557 /* push the addresses in front */ 4558 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4559 4560 ieee80211_rx_8023(rx, fast_rx, orig_len); 4561 4562 return true; 4563 drop: 4564 dev_kfree_skb(skb); 4565 if (fast_rx->uses_rss) 4566 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4567 4568 stats->dropped++; 4569 return true; 4570 } 4571 4572 /* 4573 * This function returns whether or not the SKB 4574 * was destined for RX processing or not, which, 4575 * if consume is true, is equivalent to whether 4576 * or not the skb was consumed. 4577 */ 4578 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4579 struct sk_buff *skb, bool consume) 4580 { 4581 struct ieee80211_local *local = rx->local; 4582 struct ieee80211_sub_if_data *sdata = rx->sdata; 4583 4584 rx->skb = skb; 4585 4586 /* See if we can do fast-rx; if we have to copy we already lost, 4587 * so punt in that case. We should never have to deliver a data 4588 * frame to multiple interfaces anyway. 4589 * 4590 * We skip the ieee80211_accept_frame() call and do the necessary 4591 * checking inside ieee80211_invoke_fast_rx(). 4592 */ 4593 if (consume && rx->sta) { 4594 struct ieee80211_fast_rx *fast_rx; 4595 4596 fast_rx = rcu_dereference(rx->sta->fast_rx); 4597 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4598 return true; 4599 } 4600 4601 if (!ieee80211_accept_frame(rx)) 4602 return false; 4603 4604 if (!consume) { 4605 skb = skb_copy(skb, GFP_ATOMIC); 4606 if (!skb) { 4607 if (net_ratelimit()) 4608 wiphy_debug(local->hw.wiphy, 4609 "failed to copy skb for %s\n", 4610 sdata->name); 4611 return true; 4612 } 4613 4614 rx->skb = skb; 4615 } 4616 4617 ieee80211_invoke_rx_handlers(rx); 4618 return true; 4619 } 4620 4621 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, 4622 struct ieee80211_sta *pubsta, 4623 struct sk_buff *skb, 4624 struct list_head *list) 4625 { 4626 struct ieee80211_local *local = hw_to_local(hw); 4627 struct ieee80211_fast_rx *fast_rx; 4628 struct ieee80211_rx_data rx; 4629 4630 memset(&rx, 0, sizeof(rx)); 4631 rx.skb = skb; 4632 rx.local = local; 4633 rx.list = list; 4634 4635 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4636 4637 /* drop frame if too short for header */ 4638 if (skb->len < sizeof(struct ethhdr)) 4639 goto drop; 4640 4641 if (!pubsta) 4642 goto drop; 4643 4644 rx.sta = container_of(pubsta, struct sta_info, sta); 4645 rx.sdata = rx.sta->sdata; 4646 4647 fast_rx = rcu_dereference(rx.sta->fast_rx); 4648 if (!fast_rx) 4649 goto drop; 4650 4651 ieee80211_rx_8023(&rx, fast_rx, skb->len); 4652 return; 4653 4654 drop: 4655 dev_kfree_skb(skb); 4656 } 4657 4658 /* 4659 * This is the actual Rx frames handler. as it belongs to Rx path it must 4660 * be called with rcu_read_lock protection. 4661 */ 4662 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4663 struct ieee80211_sta *pubsta, 4664 struct sk_buff *skb, 4665 struct list_head *list) 4666 { 4667 struct ieee80211_local *local = hw_to_local(hw); 4668 struct ieee80211_sub_if_data *sdata; 4669 struct ieee80211_hdr *hdr; 4670 __le16 fc; 4671 struct ieee80211_rx_data rx; 4672 struct ieee80211_sub_if_data *prev; 4673 struct rhlist_head *tmp; 4674 int err = 0; 4675 4676 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4677 memset(&rx, 0, sizeof(rx)); 4678 rx.skb = skb; 4679 rx.local = local; 4680 rx.list = list; 4681 4682 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4683 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4684 4685 if (ieee80211_is_mgmt(fc)) { 4686 /* drop frame if too short for header */ 4687 if (skb->len < ieee80211_hdrlen(fc)) 4688 err = -ENOBUFS; 4689 else 4690 err = skb_linearize(skb); 4691 } else { 4692 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4693 } 4694 4695 if (err) { 4696 dev_kfree_skb(skb); 4697 return; 4698 } 4699 4700 hdr = (struct ieee80211_hdr *)skb->data; 4701 ieee80211_parse_qos(&rx); 4702 ieee80211_verify_alignment(&rx); 4703 4704 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4705 ieee80211_is_beacon(hdr->frame_control) || 4706 ieee80211_is_s1g_beacon(hdr->frame_control))) 4707 ieee80211_scan_rx(local, skb); 4708 4709 if (ieee80211_is_data(fc)) { 4710 struct sta_info *sta, *prev_sta; 4711 4712 if (pubsta) { 4713 rx.sta = container_of(pubsta, struct sta_info, sta); 4714 rx.sdata = rx.sta->sdata; 4715 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4716 return; 4717 goto out; 4718 } 4719 4720 prev_sta = NULL; 4721 4722 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4723 if (!prev_sta) { 4724 prev_sta = sta; 4725 continue; 4726 } 4727 4728 rx.sta = prev_sta; 4729 rx.sdata = prev_sta->sdata; 4730 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4731 4732 prev_sta = sta; 4733 } 4734 4735 if (prev_sta) { 4736 rx.sta = prev_sta; 4737 rx.sdata = prev_sta->sdata; 4738 4739 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4740 return; 4741 goto out; 4742 } 4743 } 4744 4745 prev = NULL; 4746 4747 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4748 if (!ieee80211_sdata_running(sdata)) 4749 continue; 4750 4751 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4752 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4753 continue; 4754 4755 /* 4756 * frame is destined for this interface, but if it's 4757 * not also for the previous one we handle that after 4758 * the loop to avoid copying the SKB once too much 4759 */ 4760 4761 if (!prev) { 4762 prev = sdata; 4763 continue; 4764 } 4765 4766 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4767 rx.sdata = prev; 4768 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4769 4770 prev = sdata; 4771 } 4772 4773 if (prev) { 4774 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4775 rx.sdata = prev; 4776 4777 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4778 return; 4779 } 4780 4781 out: 4782 dev_kfree_skb(skb); 4783 } 4784 4785 /* 4786 * This is the receive path handler. It is called by a low level driver when an 4787 * 802.11 MPDU is received from the hardware. 4788 */ 4789 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4790 struct sk_buff *skb, struct list_head *list) 4791 { 4792 struct ieee80211_local *local = hw_to_local(hw); 4793 struct ieee80211_rate *rate = NULL; 4794 struct ieee80211_supported_band *sband; 4795 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4796 4797 WARN_ON_ONCE(softirq_count() == 0); 4798 4799 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4800 goto drop; 4801 4802 sband = local->hw.wiphy->bands[status->band]; 4803 if (WARN_ON(!sband)) 4804 goto drop; 4805 4806 /* 4807 * If we're suspending, it is possible although not too likely 4808 * that we'd be receiving frames after having already partially 4809 * quiesced the stack. We can't process such frames then since 4810 * that might, for example, cause stations to be added or other 4811 * driver callbacks be invoked. 4812 */ 4813 if (unlikely(local->quiescing || local->suspended)) 4814 goto drop; 4815 4816 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4817 if (unlikely(local->in_reconfig)) 4818 goto drop; 4819 4820 /* 4821 * The same happens when we're not even started, 4822 * but that's worth a warning. 4823 */ 4824 if (WARN_ON(!local->started)) 4825 goto drop; 4826 4827 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4828 /* 4829 * Validate the rate, unless a PLCP error means that 4830 * we probably can't have a valid rate here anyway. 4831 */ 4832 4833 switch (status->encoding) { 4834 case RX_ENC_HT: 4835 /* 4836 * rate_idx is MCS index, which can be [0-76] 4837 * as documented on: 4838 * 4839 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 4840 * 4841 * Anything else would be some sort of driver or 4842 * hardware error. The driver should catch hardware 4843 * errors. 4844 */ 4845 if (WARN(status->rate_idx > 76, 4846 "Rate marked as an HT rate but passed " 4847 "status->rate_idx is not " 4848 "an MCS index [0-76]: %d (0x%02x)\n", 4849 status->rate_idx, 4850 status->rate_idx)) 4851 goto drop; 4852 break; 4853 case RX_ENC_VHT: 4854 if (WARN_ONCE(status->rate_idx > 9 || 4855 !status->nss || 4856 status->nss > 8, 4857 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4858 status->rate_idx, status->nss)) 4859 goto drop; 4860 break; 4861 case RX_ENC_HE: 4862 if (WARN_ONCE(status->rate_idx > 11 || 4863 !status->nss || 4864 status->nss > 8, 4865 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4866 status->rate_idx, status->nss)) 4867 goto drop; 4868 break; 4869 default: 4870 WARN_ON_ONCE(1); 4871 fallthrough; 4872 case RX_ENC_LEGACY: 4873 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4874 goto drop; 4875 rate = &sband->bitrates[status->rate_idx]; 4876 } 4877 } 4878 4879 status->rx_flags = 0; 4880 4881 kcov_remote_start_common(skb_get_kcov_handle(skb)); 4882 4883 /* 4884 * Frames with failed FCS/PLCP checksum are not returned, 4885 * all other frames are returned without radiotap header 4886 * if it was previously present. 4887 * Also, frames with less than 16 bytes are dropped. 4888 */ 4889 if (!(status->flag & RX_FLAG_8023)) 4890 skb = ieee80211_rx_monitor(local, skb, rate); 4891 if (skb) { 4892 ieee80211_tpt_led_trig_rx(local, 4893 ((struct ieee80211_hdr *)skb->data)->frame_control, 4894 skb->len); 4895 4896 if (status->flag & RX_FLAG_8023) 4897 __ieee80211_rx_handle_8023(hw, pubsta, skb, list); 4898 else 4899 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 4900 } 4901 4902 kcov_remote_stop(); 4903 return; 4904 drop: 4905 kfree_skb(skb); 4906 } 4907 EXPORT_SYMBOL(ieee80211_rx_list); 4908 4909 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4910 struct sk_buff *skb, struct napi_struct *napi) 4911 { 4912 struct sk_buff *tmp; 4913 LIST_HEAD(list); 4914 4915 4916 /* 4917 * key references and virtual interfaces are protected using RCU 4918 * and this requires that we are in a read-side RCU section during 4919 * receive processing 4920 */ 4921 rcu_read_lock(); 4922 ieee80211_rx_list(hw, pubsta, skb, &list); 4923 rcu_read_unlock(); 4924 4925 if (!napi) { 4926 netif_receive_skb_list(&list); 4927 return; 4928 } 4929 4930 list_for_each_entry_safe(skb, tmp, &list, list) { 4931 skb_list_del_init(skb); 4932 napi_gro_receive(napi, skb); 4933 } 4934 } 4935 EXPORT_SYMBOL(ieee80211_rx_napi); 4936 4937 /* This is a version of the rx handler that can be called from hard irq 4938 * context. Post the skb on the queue and schedule the tasklet */ 4939 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4940 { 4941 struct ieee80211_local *local = hw_to_local(hw); 4942 4943 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4944 4945 skb->pkt_type = IEEE80211_RX_MSG; 4946 skb_queue_tail(&local->skb_queue, skb); 4947 tasklet_schedule(&local->tasklet); 4948 } 4949 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4950